Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0117-4.1.18-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2764 - (show annotations) (download)
Wed Feb 17 09:11:44 2016 UTC (8 years, 2 months ago) by niro
File size: 377690 byte(s)
-linux-4.1.18
1 diff --git a/Makefile b/Makefile
2 index d398dd440bc9..001375cfd815 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 1
8 -SUBLEVEL = 17
9 +SUBLEVEL = 18
10 EXTRAVERSION =
11 NAME = Series 4800
12
13 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
14 index 0c12ffb155a2..f775d7161ffb 100644
15 --- a/arch/arm/Kconfig.debug
16 +++ b/arch/arm/Kconfig.debug
17 @@ -161,10 +161,9 @@ choice
18 mobile SoCs in the Kona family of chips (e.g. bcm28155,
19 bcm11351, etc...)
20
21 - config DEBUG_BCM63XX
22 + config DEBUG_BCM63XX_UART
23 bool "Kernel low-level debugging on BCM63XX UART"
24 depends on ARCH_BCM_63XX
25 - select DEBUG_UART_BCM63XX
26
27 config DEBUG_BERLIN_UART
28 bool "Marvell Berlin SoC Debug UART"
29 @@ -1304,7 +1303,7 @@ config DEBUG_LL_INCLUDE
30 default "debug/vf.S" if DEBUG_VF_UART
31 default "debug/vt8500.S" if DEBUG_VT8500_UART0
32 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
33 - default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
34 + default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
35 default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
36 default "mach/debug-macro.S"
37
38 @@ -1320,10 +1319,6 @@ config DEBUG_UART_8250
39 ARCH_IOP33X || ARCH_IXP4XX || \
40 ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
41
42 -# Compatibility options for BCM63xx
43 -config DEBUG_UART_BCM63XX
44 - def_bool ARCH_BCM_63XX
45 -
46 config DEBUG_UART_PHYS
47 hex "Physical base address of debug UART"
48 default 0x00100a00 if DEBUG_NETX_UART
49 @@ -1415,7 +1410,7 @@ config DEBUG_UART_PHYS
50 default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
51 default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
52 default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
53 - default 0xfffe8600 if DEBUG_UART_BCM63XX
54 + default 0xfffe8600 if DEBUG_BCM63XX_UART
55 default 0xfffff700 if ARCH_IOP33X
56 depends on ARCH_EP93XX || \
57 DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
58 @@ -1427,7 +1422,7 @@ config DEBUG_UART_PHYS
59 DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
60 DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
61 DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
62 - DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
63 + DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
64 DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
65
66 config DEBUG_UART_VIRT
67 @@ -1466,7 +1461,7 @@ config DEBUG_UART_VIRT
68 default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
69 default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
70 default 0xfc40ab00 if DEBUG_BRCMSTB_UART
71 - default 0xfcfe8600 if DEBUG_UART_BCM63XX
72 + default 0xfcfe8600 if DEBUG_BCM63XX_UART
73 default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
74 default 0xfd000000 if ARCH_SPEAR13XX
75 default 0xfd012000 if ARCH_MV78XX0
76 @@ -1516,7 +1511,7 @@ config DEBUG_UART_VIRT
77 DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
78 DEBUG_NETX_UART || \
79 DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
80 - DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
81 + DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
82 DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
83
84 config DEBUG_UART_8250_SHIFT
85 diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
86 index 78514ab0b47a..757ac079e7f2 100644
87 --- a/arch/arm/boot/dts/armada-388-gp.dts
88 +++ b/arch/arm/boot/dts/armada-388-gp.dts
89 @@ -288,16 +288,6 @@
90 gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
91 };
92
93 - reg_usb2_1_vbus: v5-vbus1 {
94 - compatible = "regulator-fixed";
95 - regulator-name = "v5.0-vbus1";
96 - regulator-min-microvolt = <5000000>;
97 - regulator-max-microvolt = <5000000>;
98 - enable-active-high;
99 - regulator-always-on;
100 - gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
101 - };
102 -
103 reg_sata0: pwr-sata0 {
104 compatible = "regulator-fixed";
105 regulator-name = "pwr_en_sata0";
106 diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
107 index c740e1a2a3a5..4f29968076ce 100644
108 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
109 +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
110 @@ -98,7 +98,7 @@
111
112 phy0: ethernet-phy@1 {
113 interrupt-parent = <&pioE>;
114 - interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
115 + interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
116 reg = <1>;
117 };
118 };
119 diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
120 index 45e7761b7a29..d4d24a081404 100644
121 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts
122 +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
123 @@ -141,8 +141,15 @@
124 };
125
126 macb0: ethernet@f8020000 {
127 + pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
128 phy-mode = "rmii";
129 status = "okay";
130 +
131 + ethernet-phy@1 {
132 + reg = <0x1>;
133 + interrupt-parent = <&pioE>;
134 + interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
135 + };
136 };
137
138 mmc1: mmc@fc000000 {
139 @@ -174,6 +181,10 @@
140
141 pinctrl@fc06a000 {
142 board {
143 + pinctrl_macb0_phy_irq: macb0_phy_irq {
144 + atmel,pins =
145 + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
146 + };
147 pinctrl_mmc0_cd: mmc0_cd {
148 atmel,pins =
149 <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
150 diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
151 index 9cf0ab62db7d..cf11660f35a1 100644
152 --- a/arch/arm/boot/dts/sama5d4.dtsi
153 +++ b/arch/arm/boot/dts/sama5d4.dtsi
154 @@ -1219,7 +1219,7 @@
155 dbgu: serial@fc069000 {
156 compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
157 reg = <0xfc069000 0x200>;
158 - interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
159 + interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
160 pinctrl-names = "default";
161 pinctrl-0 = <&pinctrl_dbgu>;
162 clocks = <&dbgu_clk>;
163 diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
164 index f182f6538e90..89ed9b45d533 100644
165 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
166 +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
167 @@ -122,22 +122,14 @@
168 };
169 mmcsd_default_mode: mmcsd_default {
170 mmcsd_default_cfg1 {
171 - /* MCCLK */
172 - pins = "GPIO8_B10";
173 - ste,output = <0>;
174 - };
175 - mmcsd_default_cfg2 {
176 - /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
177 - pins = "GPIO10_C11", "GPIO15_A12",
178 - "GPIO16_C13", "GPIO23_D15";
179 - ste,output = <1>;
180 - };
181 - mmcsd_default_cfg3 {
182 - /* MCCMD, MCDAT3-0, MCMSFBCLK */
183 - pins = "GPIO9_A10", "GPIO11_B11",
184 - "GPIO12_A11", "GPIO13_C12",
185 - "GPIO14_B12", "GPIO24_C15";
186 - ste,input = <1>;
187 + /*
188 + * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
189 + * MCCMD, MCDAT3-0, MCMSFBCLK
190 + */
191 + pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
192 + "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
193 + "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
194 + ste,output = <2>;
195 };
196 };
197 };
198 @@ -802,10 +794,21 @@
199 clock-names = "mclk", "apb_pclk";
200 interrupt-parent = <&vica>;
201 interrupts = <22>;
202 - max-frequency = <48000000>;
203 + max-frequency = <400000>;
204 bus-width = <4>;
205 cap-mmc-highspeed;
206 cap-sd-highspeed;
207 + full-pwr-cycle;
208 + /*
209 + * The STw4811 circuit used with the Nomadik strictly
210 + * requires that all of these signal direction pins be
211 + * routed and used for its 4-bit levelshifter.
212 + */
213 + st,sig-dir-dat0;
214 + st,sig-dir-dat2;
215 + st,sig-dir-dat31;
216 + st,sig-dir-cmd;
217 + st,sig-pin-fbclk;
218 pinctrl-names = "default";
219 pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
220 vmmc-supply = <&vmmc_regulator>;
221 diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
222 index eafd120b53f1..8e2a7acb823b 100644
223 --- a/arch/arm/mach-omap2/sleep34xx.S
224 +++ b/arch/arm/mach-omap2/sleep34xx.S
225 @@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
226 stmfd sp!, {lr} @ save registers on stack
227 /* Setup so that we will disable and enable l2 */
228 mov r1, #0x1
229 - adrl r2, l2dis_3630 @ may be too distant for plain adr
230 - str r1, [r2]
231 + adrl r3, l2dis_3630_offset @ may be too distant for plain adr
232 + ldr r2, [r3] @ value for offset
233 + str r1, [r2, r3] @ write to l2dis_3630
234 ldmfd sp!, {pc} @ restore regs and return
235 ENDPROC(enable_omap3630_toggle_l2_on_restore)
236
237 - .text
238 -/* Function to call rom code to save secure ram context */
239 +/*
240 + * Function to call rom code to save secure ram context. This gets
241 + * relocated to SRAM, so it can be all in .data section. Otherwise
242 + * we need to initialize api_params separately.
243 + */
244 + .data
245 .align 3
246 ENTRY(save_secure_ram_context)
247 stmfd sp!, {r4 - r11, lr} @ save registers on stack
248 @@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
249 ENTRY(save_secure_ram_context_sz)
250 .word . - save_secure_ram_context
251
252 + .text
253 +
254 /*
255 * ======================
256 * == Idle entry point ==
257 @@ -289,12 +296,6 @@ wait_sdrc_ready:
258 bic r5, r5, #0x40
259 str r5, [r4]
260
261 -/*
262 - * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
263 - * base instead.
264 - * Be careful not to clobber r7 when maintaing this code.
265 - */
266 -
267 is_dll_in_lock_mode:
268 /* Is dll in lock mode? */
269 ldr r4, sdrc_dlla_ctrl
270 @@ -302,11 +303,7 @@ is_dll_in_lock_mode:
271 tst r5, #0x4
272 bne exit_nonoff_modes @ Return if locked
273 /* wait till dll locks */
274 - adr r7, kick_counter
275 wait_dll_lock_timed:
276 - ldr r4, wait_dll_lock_counter
277 - add r4, r4, #1
278 - str r4, [r7, #wait_dll_lock_counter - kick_counter]
279 ldr r4, sdrc_dlla_status
280 /* Wait 20uS for lock */
281 mov r6, #8
282 @@ -330,9 +327,6 @@ kick_dll:
283 orr r6, r6, #(1<<3) @ enable dll
284 str r6, [r4]
285 dsb
286 - ldr r4, kick_counter
287 - add r4, r4, #1
288 - str r4, [r7] @ kick_counter
289 b wait_dll_lock_timed
290
291 exit_nonoff_modes:
292 @@ -360,15 +354,6 @@ sdrc_dlla_status:
293 .word SDRC_DLLA_STATUS_V
294 sdrc_dlla_ctrl:
295 .word SDRC_DLLA_CTRL_V
296 - /*
297 - * When exporting to userspace while the counters are in SRAM,
298 - * these 2 words need to be at the end to facilitate retrival!
299 - */
300 -kick_counter:
301 - .word 0
302 -wait_dll_lock_counter:
303 - .word 0
304 -
305 ENTRY(omap3_do_wfi_sz)
306 .word . - omap3_do_wfi
307
308 @@ -437,7 +422,9 @@ ENTRY(omap3_restore)
309 cmp r2, #0x0 @ Check if target power state was OFF or RET
310 bne logic_l1_restore
311
312 - ldr r0, l2dis_3630
313 + adr r1, l2dis_3630_offset @ address for offset
314 + ldr r0, [r1] @ value for offset
315 + ldr r0, [r1, r0] @ value at l2dis_3630
316 cmp r0, #0x1 @ should we disable L2 on 3630?
317 bne skipl2dis
318 mrc p15, 0, r0, c1, c0, 1
319 @@ -506,7 +493,9 @@ l2_inv_gp:
320 mov r12, #0x2
321 smc #0 @ Call SMI monitor (smieq)
322 logic_l1_restore:
323 - ldr r1, l2dis_3630
324 + adr r0, l2dis_3630_offset @ adress for offset
325 + ldr r1, [r0] @ value for offset
326 + ldr r1, [r0, r1] @ value at l2dis_3630
327 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
328 bne skipl2reen
329 mrc p15, 0, r1, c1, c0, 1
330 @@ -535,6 +524,10 @@ control_stat:
331 .word CONTROL_STAT
332 control_mem_rta:
333 .word CONTROL_MEM_RTA_CTRL
334 +l2dis_3630_offset:
335 + .long l2dis_3630 - .
336 +
337 + .data
338 l2dis_3630:
339 .word 0
340
341 diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
342 index ad1bb9431e94..5373a3281779 100644
343 --- a/arch/arm/mach-omap2/sleep44xx.S
344 +++ b/arch/arm/mach-omap2/sleep44xx.S
345 @@ -29,12 +29,6 @@
346 dsb
347 .endm
348
349 -ppa_zero_params:
350 - .word 0x0
351 -
352 -ppa_por_params:
353 - .word 1, 0
354 -
355 #ifdef CONFIG_ARCH_OMAP4
356
357 /*
358 @@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
359 beq skip_ns_smp_enable
360 ppa_actrl_retry:
361 mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
362 - adr r3, ppa_zero_params @ Pointer to parameters
363 + adr r1, ppa_zero_params_offset
364 + ldr r3, [r1]
365 + add r3, r3, r1 @ Pointer to ppa_zero_params
366 mov r1, #0x0 @ Process ID
367 mov r2, #0x4 @ Flag
368 mov r6, #0xff
369 @@ -303,7 +299,9 @@ skip_ns_smp_enable:
370 ldr r0, =OMAP4_PPA_L2_POR_INDEX
371 ldr r1, =OMAP44XX_SAR_RAM_BASE
372 ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
373 - adr r3, ppa_por_params
374 + adr r1, ppa_por_params_offset
375 + ldr r3, [r1]
376 + add r3, r3, r1 @ Pointer to ppa_por_params
377 str r4, [r3, #0x04]
378 mov r1, #0x0 @ Process ID
379 mov r2, #0x4 @ Flag
380 @@ -328,6 +326,8 @@ skip_l2en:
381 #endif
382
383 b cpu_resume @ Jump to generic resume
384 +ppa_por_params_offset:
385 + .long ppa_por_params - .
386 ENDPROC(omap4_cpu_resume)
387 #endif /* CONFIG_ARCH_OMAP4 */
388
389 @@ -382,4 +382,13 @@ ENTRY(omap_do_wfi)
390 nop
391
392 ldmfd sp!, {pc}
393 +ppa_zero_params_offset:
394 + .long ppa_zero_params - .
395 ENDPROC(omap_do_wfi)
396 +
397 + .data
398 +ppa_zero_params:
399 + .word 0
400 +
401 +ppa_por_params:
402 + .word 1, 0
403 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
404 index 36aa31ff2c06..cc7435c9676e 100644
405 --- a/arch/arm64/kernel/head.S
406 +++ b/arch/arm64/kernel/head.S
407 @@ -566,9 +566,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
408 #endif
409
410 /* EL2 debug */
411 + mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
412 + sbfx x0, x0, #8, #4
413 + cmp x0, #1
414 + b.lt 4f // Skip if no PMU present
415 mrs x0, pmcr_el0 // Disable debug access traps
416 ubfx x0, x0, #11, #5 // to EL2 and allow access to
417 msr mdcr_el2, x0 // all PMU counters from EL1
418 +4:
419
420 /* Stage-2 translation */
421 msr vttbr_el2, xzr
422 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
423 index 7778453762d8..b67b01cb5109 100644
424 --- a/arch/arm64/kernel/perf_event.c
425 +++ b/arch/arm64/kernel/perf_event.c
426 @@ -1242,9 +1242,6 @@ static void armv8pmu_reset(void *info)
427
428 /* Initialize & Reset PMNC: C and P bits. */
429 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
430 -
431 - /* Disable access from userspace. */
432 - asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
433 }
434
435 static int armv8_pmuv3_map_event(struct perf_event *event)
436 diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
437 index e47ed1c5dce1..545710f854f8 100644
438 --- a/arch/arm64/mm/pageattr.c
439 +++ b/arch/arm64/mm/pageattr.c
440 @@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
441 if (end < MODULES_VADDR || end >= MODULES_END)
442 return -EINVAL;
443
444 + if (!numpages)
445 + return 0;
446 +
447 data.set_mask = set_mask;
448 data.clear_mask = clear_mask;
449
450 diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
451 index 4c4d93c4bf65..d69dffffaa89 100644
452 --- a/arch/arm64/mm/proc-macros.S
453 +++ b/arch/arm64/mm/proc-macros.S
454 @@ -62,3 +62,15 @@
455 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
456 #endif
457 .endm
458 +
459 +/*
460 + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
461 + */
462 + .macro reset_pmuserenr_el0, tmpreg
463 + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
464 + sbfx \tmpreg, \tmpreg, #8, #4
465 + cmp \tmpreg, #1 // Skip if no PMU present
466 + b.lt 9000f
467 + msr pmuserenr_el0, xzr // Disable PMU access from EL0
468 +9000:
469 + .endm
470 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
471 index cdd754e19b9b..d253908a988d 100644
472 --- a/arch/arm64/mm/proc.S
473 +++ b/arch/arm64/mm/proc.S
474 @@ -165,6 +165,7 @@ ENTRY(cpu_do_resume)
475 */
476 ubfx x11, x11, #1, #1
477 msr oslar_el1, x11
478 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0
479 mov x0, x12
480 dsb nsh // Make sure local tlb invalidation completed
481 isb
482 @@ -202,7 +203,9 @@ ENTRY(__cpu_setup)
483
484 mov x0, #3 << 20
485 msr cpacr_el1, x0 // Enable FP/ASIMD
486 - msr mdscr_el1, xzr // Reset mdscr_el1
487 + mov x0, #1 << 12 // Reset mdscr_el1 and disable
488 + msr mdscr_el1, x0 // access to the DCC from EL0
489 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0
490 /*
491 * Memory region attributes for LPAE:
492 *
493 diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
494 index 0392112a5d70..a5ecef7188ba 100644
495 --- a/arch/m32r/kernel/setup.c
496 +++ b/arch/m32r/kernel/setup.c
497 @@ -81,7 +81,10 @@ static struct resource code_resource = {
498 };
499
500 unsigned long memory_start;
501 +EXPORT_SYMBOL(memory_start);
502 +
503 unsigned long memory_end;
504 +EXPORT_SYMBOL(memory_end);
505
506 void __init setup_arch(char **);
507 int get_cpuinfo(char *);
508 diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
509 index 70f6e7f073b0..7fe24aef7fdc 100644
510 --- a/arch/mips/include/asm/pgtable.h
511 +++ b/arch/mips/include/asm/pgtable.h
512 @@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
513 static inline pte_t pte_mkyoung(pte_t pte)
514 {
515 pte_val(pte) |= _PAGE_ACCESSED;
516 -#ifdef CONFIG_CPU_MIPSR2
517 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
518 if (!(pte_val(pte) & _PAGE_NO_READ))
519 pte_val(pte) |= _PAGE_SILENT_READ;
520 else
521 @@ -558,7 +558,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
522 {
523 pmd_val(pmd) |= _PAGE_ACCESSED;
524
525 -#ifdef CONFIG_CPU_MIPSR2
526 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
527 if (!(pmd_val(pmd) & _PAGE_NO_READ))
528 pmd_val(pmd) |= _PAGE_SILENT_READ;
529 else
530 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
531 index 97c87027c17f..90b0e8316790 100644
532 --- a/arch/mips/mm/tlbex.c
533 +++ b/arch/mips/mm/tlbex.c
534 @@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
535 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
536 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
537 #endif
538 -#ifdef CONFIG_CPU_MIPSR2
539 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
540 if (cpu_has_rixi) {
541 #ifdef _PAGE_NO_EXEC_SHIFT
542 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
543 diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
544 index d7034728f377..1c75565d984b 100644
545 --- a/arch/parisc/include/uapi/asm/siginfo.h
546 +++ b/arch/parisc/include/uapi/asm/siginfo.h
547 @@ -1,6 +1,10 @@
548 #ifndef _PARISC_SIGINFO_H
549 #define _PARISC_SIGINFO_H
550
551 +#if defined(__LP64__)
552 +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
553 +#endif
554 +
555 #include <asm-generic/siginfo.h>
556
557 #undef NSIGTRAP
558 diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
559 index 35f0b62259bb..22f6d954ef89 100644
560 --- a/arch/powerpc/kernel/eeh_pe.c
561 +++ b/arch/powerpc/kernel/eeh_pe.c
562 @@ -861,32 +861,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
563 const char *eeh_pe_loc_get(struct eeh_pe *pe)
564 {
565 struct pci_bus *bus = eeh_pe_bus_get(pe);
566 - struct device_node *dn = pci_bus_to_OF_node(bus);
567 + struct device_node *dn;
568 const char *loc = NULL;
569
570 - if (!dn)
571 - goto out;
572 + while (bus) {
573 + dn = pci_bus_to_OF_node(bus);
574 + if (!dn) {
575 + bus = bus->parent;
576 + continue;
577 + }
578
579 - /* PHB PE or root PE ? */
580 - if (pci_is_root_bus(bus)) {
581 - loc = of_get_property(dn, "ibm,loc-code", NULL);
582 - if (!loc)
583 + if (pci_is_root_bus(bus))
584 loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
585 + else
586 + loc = of_get_property(dn, "ibm,slot-location-code",
587 + NULL);
588 +
589 if (loc)
590 - goto out;
591 + return loc;
592
593 - /* Check the root port */
594 - dn = dn->child;
595 - if (!dn)
596 - goto out;
597 + bus = bus->parent;
598 }
599
600 - loc = of_get_property(dn, "ibm,loc-code", NULL);
601 - if (!loc)
602 - loc = of_get_property(dn, "ibm,slot-location-code", NULL);
603 -
604 -out:
605 - return loc ? loc : "N/A";
606 + return "N/A";
607 }
608
609 /**
610 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
611 index ffd98b2bfa16..f8338e6d3dd7 100644
612 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
613 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
614 @@ -2047,7 +2047,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
615
616 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
617 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
618 - rlwimi r5, r4, 1, DAWRX_WT
619 + rlwimi r5, r4, 2, DAWRX_WT
620 clrrdi r4, r4, 3
621 std r4, VCPU_DAWR(r3)
622 std r5, VCPU_DAWRX(r3)
623 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
624 index ac3ddf115f3d..c8fe9ab10792 100644
625 --- a/arch/powerpc/kvm/powerpc.c
626 +++ b/arch/powerpc/kvm/powerpc.c
627 @@ -915,21 +915,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
628 r = -ENXIO;
629 break;
630 }
631 - vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
632 + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
633 break;
634 case KVM_REG_PPC_VSCR:
635 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
636 r = -ENXIO;
637 break;
638 }
639 - vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
640 + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
641 break;
642 case KVM_REG_PPC_VRSAVE:
643 - if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
644 - r = -ENXIO;
645 - break;
646 - }
647 - vcpu->arch.vrsave = set_reg_val(reg->id, val);
648 + val = get_reg_val(reg->id, vcpu->arch.vrsave);
649 break;
650 #endif /* CONFIG_ALTIVEC */
651 default:
652 @@ -970,17 +966,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
653 r = -ENXIO;
654 break;
655 }
656 - val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
657 + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
658 break;
659 case KVM_REG_PPC_VSCR:
660 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
661 r = -ENXIO;
662 break;
663 }
664 - val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
665 + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
666 break;
667 case KVM_REG_PPC_VRSAVE:
668 - val = get_reg_val(reg->id, vcpu->arch.vrsave);
669 + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
670 + r = -ENXIO;
671 + break;
672 + }
673 + vcpu->arch.vrsave = set_reg_val(reg->id, val);
674 break;
675 #endif /* CONFIG_ALTIVEC */
676 default:
677 diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
678 index 4d1ee88864e8..18c8b819b0aa 100644
679 --- a/arch/s390/mm/extable.c
680 +++ b/arch/s390/mm/extable.c
681 @@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
682 int i;
683
684 /* Normalize entries to being relative to the start of the section */
685 - for (p = start, i = 0; p < finish; p++, i += 8)
686 + for (p = start, i = 0; p < finish; p++, i += 8) {
687 p->insn += i;
688 + p->fixup += i + 4;
689 + }
690 sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
691 /* Denormalize all entries */
692 - for (p = start, i = 0; p < finish; p++, i += 8)
693 + for (p = start, i = 0; p < finish; p++, i += 8) {
694 p->insn -= i;
695 + p->fixup -= i + 4;
696 + }
697 }
698
699 #ifdef CONFIG_MODULES
700 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
701 index 30e7ddb27a3a..c690c8e16a96 100644
702 --- a/arch/sparc/kernel/sys_sparc_64.c
703 +++ b/arch/sparc/kernel/sys_sparc_64.c
704 @@ -413,7 +413,7 @@ out:
705
706 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
707 {
708 - int ret;
709 + long ret;
710
711 if (personality(current->personality) == PER_LINUX32 &&
712 personality(personality) == PER_LINUX)
713 diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
714 index 47f1ff056a54..22a358ef1b0c 100644
715 --- a/arch/um/os-Linux/start_up.c
716 +++ b/arch/um/os-Linux/start_up.c
717 @@ -94,6 +94,8 @@ static int start_ptraced_child(void)
718 {
719 int pid, n, status;
720
721 + fflush(stdout);
722 +
723 pid = fork();
724 if (pid == 0)
725 ptrace_child();
726 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
727 index 78f0c8cbe316..74fcdf3f1534 100644
728 --- a/arch/x86/include/asm/pgtable_types.h
729 +++ b/arch/x86/include/asm/pgtable_types.h
730 @@ -337,20 +337,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
731 }
732 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
733 {
734 + pgprotval_t val = pgprot_val(pgprot);
735 pgprot_t new;
736 - unsigned long val;
737
738 - val = pgprot_val(pgprot);
739 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
740 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
741 return new;
742 }
743 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
744 {
745 + pgprotval_t val = pgprot_val(pgprot);
746 pgprot_t new;
747 - unsigned long val;
748
749 - val = pgprot_val(pgprot);
750 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
751 ((val & _PAGE_PAT_LARGE) >>
752 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
753 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
754 index 89af288ec674..2dd9b3ad3bb5 100644
755 --- a/arch/x86/mm/pageattr.c
756 +++ b/arch/x86/mm/pageattr.c
757 @@ -33,7 +33,7 @@ struct cpa_data {
758 pgd_t *pgd;
759 pgprot_t mask_set;
760 pgprot_t mask_clr;
761 - int numpages;
762 + unsigned long numpages;
763 int flags;
764 unsigned long pfn;
765 unsigned force_split : 1;
766 @@ -1324,7 +1324,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
767 * CPA operation. Either a large page has been
768 * preserved or a single page update happened.
769 */
770 - BUG_ON(cpa->numpages > numpages);
771 + BUG_ON(cpa->numpages > numpages || !cpa->numpages);
772 numpages -= cpa->numpages;
773 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
774 cpa->curpage++;
775 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
776 index f22cc56fd1b3..9641b74b53ef 100644
777 --- a/crypto/af_alg.c
778 +++ b/crypto/af_alg.c
779 @@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
780 goto unlock;
781
782 type->ops->owner = THIS_MODULE;
783 + if (type->ops_nokey)
784 + type->ops_nokey->owner = THIS_MODULE;
785 node->type = type;
786 list_add(&node->list, &alg_types);
787 err = 0;
788 @@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
789 }
790 EXPORT_SYMBOL_GPL(af_alg_release);
791
792 +void af_alg_release_parent(struct sock *sk)
793 +{
794 + struct alg_sock *ask = alg_sk(sk);
795 + unsigned int nokey = ask->nokey_refcnt;
796 + bool last = nokey && !ask->refcnt;
797 +
798 + sk = ask->parent;
799 + ask = alg_sk(sk);
800 +
801 + lock_sock(sk);
802 + ask->nokey_refcnt -= nokey;
803 + if (!last)
804 + last = !--ask->refcnt;
805 + release_sock(sk);
806 +
807 + if (last)
808 + sock_put(sk);
809 +}
810 +EXPORT_SYMBOL_GPL(af_alg_release_parent);
811 +
812 static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
813 {
814 struct sock *sk = sock->sk;
815 @@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
816 struct sockaddr_alg *sa = (void *)uaddr;
817 const struct af_alg_type *type;
818 void *private;
819 + int err;
820
821 if (sock->state == SS_CONNECTED)
822 return -EINVAL;
823 @@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
824 return PTR_ERR(private);
825 }
826
827 + err = -EBUSY;
828 lock_sock(sk);
829 + if (ask->refcnt | ask->nokey_refcnt)
830 + goto unlock;
831
832 swap(ask->type, type);
833 swap(ask->private, private);
834
835 + err = 0;
836 +
837 +unlock:
838 release_sock(sk);
839
840 alg_do_release(type, private);
841
842 - return 0;
843 + return err;
844 }
845
846 static int alg_setkey(struct sock *sk, char __user *ukey,
847 @@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
848 struct sock *sk = sock->sk;
849 struct alg_sock *ask = alg_sk(sk);
850 const struct af_alg_type *type;
851 - int err = -ENOPROTOOPT;
852 + int err = -EBUSY;
853
854 lock_sock(sk);
855 + if (ask->refcnt)
856 + goto unlock;
857 +
858 type = ask->type;
859
860 + err = -ENOPROTOOPT;
861 if (level != SOL_ALG || !type)
862 goto unlock;
863
864 @@ -235,6 +268,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
865 struct alg_sock *ask = alg_sk(sk);
866 const struct af_alg_type *type;
867 struct sock *sk2;
868 + unsigned int nokey;
869 int err;
870
871 lock_sock(sk);
872 @@ -254,20 +288,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
873 security_sk_clone(sk, sk2);
874
875 err = type->accept(ask->private, sk2);
876 - if (err) {
877 - sk_free(sk2);
878 +
879 + nokey = err == -ENOKEY;
880 + if (nokey && type->accept_nokey)
881 + err = type->accept_nokey(ask->private, sk2);
882 +
883 + if (err)
884 goto unlock;
885 - }
886
887 sk2->sk_family = PF_ALG;
888
889 - sock_hold(sk);
890 + if (nokey || !ask->refcnt++)
891 + sock_hold(sk);
892 + ask->nokey_refcnt += nokey;
893 alg_sk(sk2)->parent = sk;
894 alg_sk(sk2)->type = type;
895 + alg_sk(sk2)->nokey_refcnt = nokey;
896
897 newsock->ops = type->ops;
898 newsock->state = SS_CONNECTED;
899
900 + if (nokey)
901 + newsock->ops = type->ops_nokey;
902 +
903 err = 0;
904
905 unlock:
906 diff --git a/crypto/ahash.c b/crypto/ahash.c
907 index 9c1dc8d6106a..d19b52324cf5 100644
908 --- a/crypto/ahash.c
909 +++ b/crypto/ahash.c
910 @@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
911 struct ahash_alg *alg = crypto_ahash_alg(hash);
912
913 hash->setkey = ahash_nosetkey;
914 + hash->has_setkey = false;
915 hash->export = ahash_no_export;
916 hash->import = ahash_no_import;
917
918 @@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
919 hash->finup = alg->finup ?: ahash_def_finup;
920 hash->digest = alg->digest;
921
922 - if (alg->setkey)
923 + if (alg->setkey) {
924 hash->setkey = alg->setkey;
925 + hash->has_setkey = true;
926 + }
927 if (alg->export)
928 hash->export = alg->export;
929 if (alg->import)
930 diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
931 index 1396ad0787fc..d7a3435280d8 100644
932 --- a/crypto/algif_hash.c
933 +++ b/crypto/algif_hash.c
934 @@ -34,6 +34,11 @@ struct hash_ctx {
935 struct ahash_request req;
936 };
937
938 +struct algif_hash_tfm {
939 + struct crypto_ahash *hash;
940 + bool has_key;
941 +};
942 +
943 static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
944 size_t ignored)
945 {
946 @@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
947
948 lock_sock(sk);
949 if (!ctx->more) {
950 - err = crypto_ahash_init(&ctx->req);
951 + err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
952 + &ctx->completion);
953 if (err)
954 goto unlock;
955 }
956 @@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
957 } else {
958 if (!ctx->more) {
959 err = crypto_ahash_init(&ctx->req);
960 + err = af_alg_wait_for_completion(err, &ctx->completion);
961 if (err)
962 goto unlock;
963 }
964 @@ -227,19 +234,151 @@ static struct proto_ops algif_hash_ops = {
965 .accept = hash_accept,
966 };
967
968 +static int hash_check_key(struct socket *sock)
969 +{
970 + int err = 0;
971 + struct sock *psk;
972 + struct alg_sock *pask;
973 + struct algif_hash_tfm *tfm;
974 + struct sock *sk = sock->sk;
975 + struct alg_sock *ask = alg_sk(sk);
976 +
977 + lock_sock(sk);
978 + if (ask->refcnt)
979 + goto unlock_child;
980 +
981 + psk = ask->parent;
982 + pask = alg_sk(ask->parent);
983 + tfm = pask->private;
984 +
985 + err = -ENOKEY;
986 + lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
987 + if (!tfm->has_key)
988 + goto unlock;
989 +
990 + if (!pask->refcnt++)
991 + sock_hold(psk);
992 +
993 + ask->refcnt = 1;
994 + sock_put(psk);
995 +
996 + err = 0;
997 +
998 +unlock:
999 + release_sock(psk);
1000 +unlock_child:
1001 + release_sock(sk);
1002 +
1003 + return err;
1004 +}
1005 +
1006 +static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
1007 + size_t size)
1008 +{
1009 + int err;
1010 +
1011 + err = hash_check_key(sock);
1012 + if (err)
1013 + return err;
1014 +
1015 + return hash_sendmsg(sock, msg, size);
1016 +}
1017 +
1018 +static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
1019 + int offset, size_t size, int flags)
1020 +{
1021 + int err;
1022 +
1023 + err = hash_check_key(sock);
1024 + if (err)
1025 + return err;
1026 +
1027 + return hash_sendpage(sock, page, offset, size, flags);
1028 +}
1029 +
1030 +static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
1031 + size_t ignored, int flags)
1032 +{
1033 + int err;
1034 +
1035 + err = hash_check_key(sock);
1036 + if (err)
1037 + return err;
1038 +
1039 + return hash_recvmsg(sock, msg, ignored, flags);
1040 +}
1041 +
1042 +static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
1043 + int flags)
1044 +{
1045 + int err;
1046 +
1047 + err = hash_check_key(sock);
1048 + if (err)
1049 + return err;
1050 +
1051 + return hash_accept(sock, newsock, flags);
1052 +}
1053 +
1054 +static struct proto_ops algif_hash_ops_nokey = {
1055 + .family = PF_ALG,
1056 +
1057 + .connect = sock_no_connect,
1058 + .socketpair = sock_no_socketpair,
1059 + .getname = sock_no_getname,
1060 + .ioctl = sock_no_ioctl,
1061 + .listen = sock_no_listen,
1062 + .shutdown = sock_no_shutdown,
1063 + .getsockopt = sock_no_getsockopt,
1064 + .mmap = sock_no_mmap,
1065 + .bind = sock_no_bind,
1066 + .setsockopt = sock_no_setsockopt,
1067 + .poll = sock_no_poll,
1068 +
1069 + .release = af_alg_release,
1070 + .sendmsg = hash_sendmsg_nokey,
1071 + .sendpage = hash_sendpage_nokey,
1072 + .recvmsg = hash_recvmsg_nokey,
1073 + .accept = hash_accept_nokey,
1074 +};
1075 +
1076 static void *hash_bind(const char *name, u32 type, u32 mask)
1077 {
1078 - return crypto_alloc_ahash(name, type, mask);
1079 + struct algif_hash_tfm *tfm;
1080 + struct crypto_ahash *hash;
1081 +
1082 + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
1083 + if (!tfm)
1084 + return ERR_PTR(-ENOMEM);
1085 +
1086 + hash = crypto_alloc_ahash(name, type, mask);
1087 + if (IS_ERR(hash)) {
1088 + kfree(tfm);
1089 + return ERR_CAST(hash);
1090 + }
1091 +
1092 + tfm->hash = hash;
1093 +
1094 + return tfm;
1095 }
1096
1097 static void hash_release(void *private)
1098 {
1099 - crypto_free_ahash(private);
1100 + struct algif_hash_tfm *tfm = private;
1101 +
1102 + crypto_free_ahash(tfm->hash);
1103 + kfree(tfm);
1104 }
1105
1106 static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
1107 {
1108 - return crypto_ahash_setkey(private, key, keylen);
1109 + struct algif_hash_tfm *tfm = private;
1110 + int err;
1111 +
1112 + err = crypto_ahash_setkey(tfm->hash, key, keylen);
1113 + tfm->has_key = !err;
1114 +
1115 + return err;
1116 }
1117
1118 static void hash_sock_destruct(struct sock *sk)
1119 @@ -253,12 +392,14 @@ static void hash_sock_destruct(struct sock *sk)
1120 af_alg_release_parent(sk);
1121 }
1122
1123 -static int hash_accept_parent(void *private, struct sock *sk)
1124 +static int hash_accept_parent_nokey(void *private, struct sock *sk)
1125 {
1126 struct hash_ctx *ctx;
1127 struct alg_sock *ask = alg_sk(sk);
1128 - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
1129 - unsigned ds = crypto_ahash_digestsize(private);
1130 + struct algif_hash_tfm *tfm = private;
1131 + struct crypto_ahash *hash = tfm->hash;
1132 + unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
1133 + unsigned ds = crypto_ahash_digestsize(hash);
1134
1135 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
1136 if (!ctx)
1137 @@ -278,7 +419,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
1138
1139 ask->private = ctx;
1140
1141 - ahash_request_set_tfm(&ctx->req, private);
1142 + ahash_request_set_tfm(&ctx->req, hash);
1143 ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1144 af_alg_complete, &ctx->completion);
1145
1146 @@ -287,12 +428,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
1147 return 0;
1148 }
1149
1150 +static int hash_accept_parent(void *private, struct sock *sk)
1151 +{
1152 + struct algif_hash_tfm *tfm = private;
1153 +
1154 + if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
1155 + return -ENOKEY;
1156 +
1157 + return hash_accept_parent_nokey(private, sk);
1158 +}
1159 +
1160 static const struct af_alg_type algif_type_hash = {
1161 .bind = hash_bind,
1162 .release = hash_release,
1163 .setkey = hash_setkey,
1164 .accept = hash_accept_parent,
1165 + .accept_nokey = hash_accept_parent_nokey,
1166 .ops = &algif_hash_ops,
1167 + .ops_nokey = &algif_hash_ops_nokey,
1168 .name = "hash",
1169 .owner = THIS_MODULE
1170 };
1171 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
1172 index 945075292bc9..5bc42f9b23f0 100644
1173 --- a/crypto/algif_skcipher.c
1174 +++ b/crypto/algif_skcipher.c
1175 @@ -387,7 +387,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
1176
1177 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
1178 sg = sgl->sg;
1179 - sg_unmark_end(sg + sgl->cur);
1180 + if (sgl->cur)
1181 + sg_unmark_end(sg + sgl->cur - 1);
1182 do {
1183 i = sgl->cur;
1184 plen = min_t(int, len, PAGE_SIZE);
1185 diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
1186 index 24f17e6c5904..4c850ac474e2 100644
1187 --- a/crypto/asymmetric_keys/x509_public_key.c
1188 +++ b/crypto/asymmetric_keys/x509_public_key.c
1189 @@ -307,10 +307,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
1190 srlen = cert->raw_serial_size;
1191 q = cert->raw_serial;
1192 }
1193 - if (srlen > 1 && *q == 0) {
1194 - srlen--;
1195 - q++;
1196 - }
1197
1198 ret = -ENOMEM;
1199 desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
1200 diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
1201 index 06f1b60f02b2..4c0a0e271876 100644
1202 --- a/crypto/crc32c_generic.c
1203 +++ b/crypto/crc32c_generic.c
1204 @@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
1205 MODULE_LICENSE("GPL");
1206 MODULE_ALIAS_CRYPTO("crc32c");
1207 MODULE_ALIAS_CRYPTO("crc32c-generic");
1208 -MODULE_SOFTDEP("pre: crc32c");
1209 diff --git a/crypto/shash.c b/crypto/shash.c
1210 index 47c713954bf3..03fbcd4a82c4 100644
1211 --- a/crypto/shash.c
1212 +++ b/crypto/shash.c
1213 @@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
1214 crt->final = shash_async_final;
1215 crt->finup = shash_async_finup;
1216 crt->digest = shash_async_digest;
1217 + crt->setkey = shash_async_setkey;
1218 +
1219 + crt->has_setkey = alg->setkey != shash_no_setkey;
1220
1221 - if (alg->setkey)
1222 - crt->setkey = shash_async_setkey;
1223 if (alg->export)
1224 crt->export = shash_async_export;
1225 if (alg->import)
1226 diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
1227 index 54d946a9eee6..6fbb10ca73b1 100644
1228 --- a/drivers/block/zram/zcomp.c
1229 +++ b/drivers/block/zram/zcomp.c
1230 @@ -76,7 +76,7 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
1231 */
1232 static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
1233 {
1234 - struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
1235 + struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
1236 if (!zstrm)
1237 return NULL;
1238
1239 @@ -85,7 +85,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
1240 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
1241 * case when compressed size is larger than the original one
1242 */
1243 - zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
1244 + zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
1245 if (!zstrm->private || !zstrm->buffer) {
1246 zcomp_strm_free(comp, zstrm);
1247 zstrm = NULL;
1248 diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
1249 index f2afb7e988c3..dd6083124276 100644
1250 --- a/drivers/block/zram/zcomp_lz4.c
1251 +++ b/drivers/block/zram/zcomp_lz4.c
1252 @@ -10,17 +10,36 @@
1253 #include <linux/kernel.h>
1254 #include <linux/slab.h>
1255 #include <linux/lz4.h>
1256 +#include <linux/vmalloc.h>
1257 +#include <linux/mm.h>
1258
1259 #include "zcomp_lz4.h"
1260
1261 static void *zcomp_lz4_create(void)
1262 {
1263 - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
1264 + void *ret;
1265 +
1266 + /*
1267 + * This function can be called in swapout/fs write path
1268 + * so we can't use GFP_FS|IO. And it assumes we already
1269 + * have at least one stream in zram initialization so we
1270 + * don't do best effort to allocate more stream in here.
1271 + * A default stream will work well without further multiple
1272 + * streams. That's why we use NORETRY | NOWARN.
1273 + */
1274 + ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
1275 + __GFP_NOWARN);
1276 + if (!ret)
1277 + ret = __vmalloc(LZ4_MEM_COMPRESS,
1278 + GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
1279 + __GFP_ZERO | __GFP_HIGHMEM,
1280 + PAGE_KERNEL);
1281 + return ret;
1282 }
1283
1284 static void zcomp_lz4_destroy(void *private)
1285 {
1286 - kfree(private);
1287 + kvfree(private);
1288 }
1289
1290 static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
1291 diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
1292 index da1bc47d588e..edc549920fa0 100644
1293 --- a/drivers/block/zram/zcomp_lzo.c
1294 +++ b/drivers/block/zram/zcomp_lzo.c
1295 @@ -10,17 +10,36 @@
1296 #include <linux/kernel.h>
1297 #include <linux/slab.h>
1298 #include <linux/lzo.h>
1299 +#include <linux/vmalloc.h>
1300 +#include <linux/mm.h>
1301
1302 #include "zcomp_lzo.h"
1303
1304 static void *lzo_create(void)
1305 {
1306 - return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
1307 + void *ret;
1308 +
1309 + /*
1310 + * This function can be called in swapout/fs write path
1311 + * so we can't use GFP_FS|IO. And it assumes we already
1312 + * have at least one stream in zram initialization so we
1313 + * don't do best effort to allocate more stream in here.
1314 + * A default stream will work well without further multiple
1315 + * streams. That's why we use NORETRY | NOWARN.
1316 + */
1317 + ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
1318 + __GFP_NOWARN);
1319 + if (!ret)
1320 + ret = __vmalloc(LZO1X_MEM_COMPRESS,
1321 + GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
1322 + __GFP_ZERO | __GFP_HIGHMEM,
1323 + PAGE_KERNEL);
1324 + return ret;
1325 }
1326
1327 static void lzo_destroy(void *private)
1328 {
1329 - kfree(private);
1330 + kvfree(private);
1331 }
1332
1333 static int lzo_compress(const unsigned char *src, unsigned char *dst,
1334 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1335 index 7bf87d9bfd7d..fdba79c3877c 100644
1336 --- a/drivers/bluetooth/btusb.c
1337 +++ b/drivers/bluetooth/btusb.c
1338 @@ -144,6 +144,10 @@ static const struct usb_device_id btusb_table[] = {
1339 { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
1340 .driver_info = BTUSB_BCM_PATCHRAM },
1341
1342 + /* Toshiba Corp - Broadcom based */
1343 + { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
1344 + .driver_info = BTUSB_BCM_PATCHRAM },
1345 +
1346 /* Intel Bluetooth USB Bootloader (RAM module) */
1347 { USB_DEVICE(0x8087, 0x0a5a),
1348 .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
1349 diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
1350 index 1098ed3b9b89..dc45ddb36117 100644
1351 --- a/drivers/clocksource/vt8500_timer.c
1352 +++ b/drivers/clocksource/vt8500_timer.c
1353 @@ -50,6 +50,8 @@
1354
1355 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1356
1357 +#define MIN_OSCR_DELTA 16
1358 +
1359 static void __iomem *regbase;
1360
1361 static cycle_t vt8500_timer_read(struct clocksource *cs)
1362 @@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
1363 cpu_relax();
1364 writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
1365
1366 - if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
1367 + if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
1368 return -ETIME;
1369
1370 writel(1, regbase + TIMER_IER_VAL);
1371 @@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np)
1372 pr_err("%s: setup_irq failed for %s\n", __func__,
1373 clockevent.name);
1374 clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
1375 - 4, 0xf0000000);
1376 + MIN_OSCR_DELTA * 2, 0xf0000000);
1377 }
1378
1379 CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
1380 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1381 index c89a7abb523f..8d8c35623f2a 100644
1382 --- a/drivers/dma/at_xdmac.c
1383 +++ b/drivers/dma/at_xdmac.c
1384 @@ -1230,6 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1385 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1386 at_xdmac_remove_xfer(atchan, desc);
1387
1388 + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1389 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1390 spin_unlock_irqrestore(&atchan->lock, flags);
1391
1392 @@ -1362,6 +1363,8 @@ static int atmel_xdmac_resume(struct device *dev)
1393 atchan = to_at_xdmac_chan(chan);
1394 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1395 if (at_xdmac_chan_is_cyclic(atchan)) {
1396 + if (at_xdmac_chan_is_paused(atchan))
1397 + at_xdmac_device_resume(chan);
1398 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1399 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1400 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1401 diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1402 index 9e504d3b0d4f..303d937d63c7 100644
1403 --- a/drivers/dma/dw/core.c
1404 +++ b/drivers/dma/dw/core.c
1405 @@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
1406
1407 /* Enable interrupts */
1408 channel_set_bit(dw, MASK.XFER, dwc->mask);
1409 + channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1410 channel_set_bit(dw, MASK.ERROR, dwc->mask);
1411
1412 dwc->initialized = true;
1413 @@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
1414
1415 /* Called with dwc->lock held and all DMAC interrupts disabled */
1416 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1417 - u32 status_err, u32 status_xfer)
1418 + u32 status_block, u32 status_err, u32 status_xfer)
1419 {
1420 unsigned long flags;
1421
1422 - if (dwc->mask) {
1423 + if (status_block & dwc->mask) {
1424 void (*callback)(void *param);
1425 void *callback_param;
1426
1427 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
1428 channel_readl(dwc, LLP));
1429 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1430
1431 callback = dwc->cdesc->period_callback;
1432 callback_param = dwc->cdesc->period_callback_param;
1433 @@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1434 channel_writel(dwc, CTL_LO, 0);
1435 channel_writel(dwc, CTL_HI, 0);
1436
1437 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1438 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1439 dma_writel(dw, CLEAR.XFER, dwc->mask);
1440
1441 @@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
1442 {
1443 struct dw_dma *dw = (struct dw_dma *)data;
1444 struct dw_dma_chan *dwc;
1445 + u32 status_block;
1446 u32 status_xfer;
1447 u32 status_err;
1448 int i;
1449
1450 + status_block = dma_readl(dw, RAW.BLOCK);
1451 status_xfer = dma_readl(dw, RAW.XFER);
1452 status_err = dma_readl(dw, RAW.ERROR);
1453
1454 @@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
1455 for (i = 0; i < dw->dma.chancnt; i++) {
1456 dwc = &dw->chan[i];
1457 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
1458 - dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
1459 + dwc_handle_cyclic(dw, dwc, status_block, status_err,
1460 + status_xfer);
1461 else if (status_err & (1 << i))
1462 dwc_handle_error(dw, dwc);
1463 else if (status_xfer & (1 << i))
1464 @@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
1465 * Re-enable interrupts.
1466 */
1467 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
1468 + channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1469 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
1470 }
1471
1472 @@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
1473 * softirq handler.
1474 */
1475 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1476 + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1477 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1478
1479 status = dma_readl(dw, STATUS_INT);
1480 @@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
1481
1482 /* Try to recover */
1483 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
1484 + channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
1485 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
1486 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
1487 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
1488 @@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
1489 dma_writel(dw, CFG, 0);
1490
1491 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1492 + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1493 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1494 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1495 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1496 @@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1497
1498 /* Disable interrupts */
1499 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1500 + channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1501 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1502
1503 spin_unlock_irqrestore(&dwc->lock, flags);
1504 @@ -1245,7 +1256,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1505 int dw_dma_cyclic_start(struct dma_chan *chan)
1506 {
1507 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1508 - struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1509 unsigned long flags;
1510
1511 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1512 @@ -1254,27 +1264,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1513 }
1514
1515 spin_lock_irqsave(&dwc->lock, flags);
1516 -
1517 - /* Assert channel is idle */
1518 - if (dma_readl(dw, CH_EN) & dwc->mask) {
1519 - dev_err(chan2dev(&dwc->chan),
1520 - "%s: BUG: Attempted to start non-idle channel\n",
1521 - __func__);
1522 - dwc_dump_chan_regs(dwc);
1523 - spin_unlock_irqrestore(&dwc->lock, flags);
1524 - return -EBUSY;
1525 - }
1526 -
1527 - dma_writel(dw, CLEAR.ERROR, dwc->mask);
1528 - dma_writel(dw, CLEAR.XFER, dwc->mask);
1529 -
1530 - /* Setup DMAC channel registers */
1531 - channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1532 - channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1533 - channel_writel(dwc, CTL_HI, 0);
1534 -
1535 - channel_set_bit(dw, CH_EN, dwc->mask);
1536 -
1537 + dwc_dostart(dwc, dwc->cdesc->desc[0]);
1538 spin_unlock_irqrestore(&dwc->lock, flags);
1539
1540 return 0;
1541 @@ -1479,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1542
1543 dwc_chan_disable(dw, dwc);
1544
1545 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1546 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1547 dma_writel(dw, CLEAR.XFER, dwc->mask);
1548
1549 @@ -1569,9 +1560,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1550 /* Force dma off, just in case */
1551 dw_dma_off(dw);
1552
1553 - /* Disable BLOCK interrupts as well */
1554 - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1555 -
1556 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0);
1559 diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
1560 index 592af5f0cf39..53587377e672 100644
1561 --- a/drivers/edac/edac_device.c
1562 +++ b/drivers/edac/edac_device.c
1563 @@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
1564 */
1565 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
1566 {
1567 - int status;
1568 -
1569 if (!edac_dev->edac_check)
1570 return;
1571
1572 - status = cancel_delayed_work(&edac_dev->work);
1573 - if (status == 0) {
1574 - /* workq instance might be running, wait for it */
1575 - flush_workqueue(edac_workqueue);
1576 - }
1577 + edac_dev->op_state = OP_OFFLINE;
1578 +
1579 + cancel_delayed_work_sync(&edac_dev->work);
1580 + flush_workqueue(edac_workqueue);
1581 }
1582
1583 /*
1584 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
1585 index af3be1914dbb..63ceb2d98565 100644
1586 --- a/drivers/edac/edac_mc.c
1587 +++ b/drivers/edac/edac_mc.c
1588 @@ -581,18 +581,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
1589 */
1590 static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
1591 {
1592 - int status;
1593 -
1594 - if (mci->op_state != OP_RUNNING_POLL)
1595 - return;
1596 -
1597 - status = cancel_delayed_work(&mci->work);
1598 - if (status == 0) {
1599 - edac_dbg(0, "not canceled, flush the queue\n");
1600 + mci->op_state = OP_OFFLINE;
1601
1602 - /* workq instance might be running, wait for it */
1603 - flush_workqueue(edac_workqueue);
1604 - }
1605 + cancel_delayed_work_sync(&mci->work);
1606 + flush_workqueue(edac_workqueue);
1607 }
1608
1609 /*
1610 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
1611 index 112d63ad1154..67dc90365389 100644
1612 --- a/drivers/edac/edac_mc_sysfs.c
1613 +++ b/drivers/edac/edac_mc_sysfs.c
1614 @@ -977,21 +977,26 @@ nomem:
1615 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
1616 const struct attribute_group **groups)
1617 {
1618 + char *name;
1619 int i, err;
1620
1621 /*
1622 * The memory controller needs its own bus, in order to avoid
1623 * namespace conflicts at /sys/bus/edac.
1624 */
1625 - mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
1626 - if (!mci->bus->name)
1627 + name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
1628 + if (!name)
1629 return -ENOMEM;
1630
1631 + mci->bus->name = name;
1632 +
1633 edac_dbg(0, "creating bus %s\n", mci->bus->name);
1634
1635 err = bus_register(mci->bus);
1636 - if (err < 0)
1637 - goto fail_free_name;
1638 + if (err < 0) {
1639 + kfree(name);
1640 + return err;
1641 + }
1642
1643 /* get the /sys/devices/system/edac subsys reference */
1644 mci->dev.type = &mci_attr_type;
1645 @@ -1060,8 +1065,8 @@ fail_unregister_dimm:
1646 device_unregister(&mci->dev);
1647 fail_unregister_bus:
1648 bus_unregister(mci->bus);
1649 -fail_free_name:
1650 - kfree(mci->bus->name);
1651 + kfree(name);
1652 +
1653 return err;
1654 }
1655
1656 @@ -1092,10 +1097,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1657
1658 void edac_unregister_sysfs(struct mem_ctl_info *mci)
1659 {
1660 + const char *name = mci->bus->name;
1661 +
1662 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1663 device_unregister(&mci->dev);
1664 bus_unregister(mci->bus);
1665 - kfree(mci->bus->name);
1666 + kfree(name);
1667 }
1668
1669 static void mc_attr_release(struct device *dev)
1670 diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
1671 index 2cf44b4db80c..b4b38603b804 100644
1672 --- a/drivers/edac/edac_pci.c
1673 +++ b/drivers/edac/edac_pci.c
1674 @@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
1675 */
1676 static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
1677 {
1678 - int status;
1679 -
1680 edac_dbg(0, "\n");
1681
1682 - status = cancel_delayed_work(&pci->work);
1683 - if (status == 0)
1684 - flush_workqueue(edac_workqueue);
1685 + pci->op_state = OP_OFFLINE;
1686 +
1687 + cancel_delayed_work_sync(&pci->work);
1688 + flush_workqueue(edac_workqueue);
1689 }
1690
1691 /*
1692 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1693 index 109e776345d3..0ec9ad50ba7c 100644
1694 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1695 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1696 @@ -861,28 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
1697 {
1698 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
1699 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1700 +
1701 if (!port->input) {
1702 port->vcpi.num_slots = 0;
1703
1704 kfree(port->cached_edid);
1705
1706 - /* we can't destroy the connector here, as
1707 - we might be holding the mode_config.mutex
1708 - from an EDID retrieval */
1709 + /*
1710 + * The only time we don't have a connector
1711 + * on an output port is if the connector init
1712 + * fails.
1713 + */
1714 if (port->connector) {
1715 + /* we can't destroy the connector here, as
1716 + * we might be holding the mode_config.mutex
1717 + * from an EDID retrieval */
1718 +
1719 mutex_lock(&mgr->destroy_connector_lock);
1720 - list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
1721 + list_add(&port->next, &mgr->destroy_connector_list);
1722 mutex_unlock(&mgr->destroy_connector_lock);
1723 schedule_work(&mgr->destroy_connector_work);
1724 + return;
1725 }
1726 + /* no need to clean up vcpi
1727 + * as if we have no connector we never setup a vcpi */
1728 drm_dp_port_teardown_pdt(port, port->pdt);
1729 -
1730 - if (!port->input && port->vcpi.vcpi > 0)
1731 - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1732 }
1733 kfree(port);
1734 -
1735 - (*mgr->cbs->hotplug)(mgr);
1736 }
1737
1738 static void drm_dp_put_port(struct drm_dp_mst_port *port)
1739 @@ -968,17 +973,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
1740 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1741 u8 *rad)
1742 {
1743 - int lct = port->parent->lct;
1744 + int parent_lct = port->parent->lct;
1745 int shift = 4;
1746 - int idx = lct / 2;
1747 - if (lct > 1) {
1748 - memcpy(rad, port->parent->rad, idx);
1749 - shift = (lct % 2) ? 4 : 0;
1750 + int idx = (parent_lct - 1) / 2;
1751 + if (parent_lct > 1) {
1752 + memcpy(rad, port->parent->rad, idx + 1);
1753 + shift = (parent_lct % 2) ? 4 : 0;
1754 } else
1755 rad[0] = 0;
1756
1757 rad[idx] |= port->port_num << shift;
1758 - return lct + 1;
1759 + return parent_lct + 1;
1760 }
1761
1762 /*
1763 @@ -1034,7 +1039,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
1764 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1765 for (i = 0; i < (mstb->lct - 1); i++) {
1766 int shift = (i % 2) ? 0 : 4;
1767 - int port_num = mstb->rad[i / 2] >> shift;
1768 + int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1769 snprintf(temp, sizeof(temp), "-%d", port_num);
1770 strlcat(proppath, temp, proppath_size);
1771 }
1772 @@ -1112,12 +1117,21 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1773 char proppath[255];
1774 build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
1775 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1776 -
1777 + if (!port->connector) {
1778 + /* remove it from the port list */
1779 + mutex_lock(&mstb->mgr->lock);
1780 + list_del(&port->next);
1781 + mutex_unlock(&mstb->mgr->lock);
1782 + /* drop port list reference */
1783 + drm_dp_put_port(port);
1784 + goto out;
1785 + }
1786 if (port->port_num >= 8) {
1787 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1788 }
1789 }
1790
1791 +out:
1792 /* put reference to this port */
1793 drm_dp_put_port(port);
1794 }
1795 @@ -1175,7 +1189,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1796
1797 for (i = 0; i < lct - 1; i++) {
1798 int shift = (i % 2) ? 0 : 4;
1799 - int port_num = rad[i / 2] >> shift;
1800 + int port_num = (rad[i / 2] >> shift) & 0xf;
1801
1802 list_for_each_entry(port, &mstb->ports, next) {
1803 if (port->port_num == port_num) {
1804 @@ -1195,6 +1209,50 @@ out:
1805 return mstb;
1806 }
1807
1808 +static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1809 + struct drm_dp_mst_branch *mstb,
1810 + uint8_t *guid)
1811 +{
1812 + struct drm_dp_mst_branch *found_mstb;
1813 + struct drm_dp_mst_port *port;
1814 +
1815 + list_for_each_entry(port, &mstb->ports, next) {
1816 + if (!port->mstb)
1817 + continue;
1818 +
1819 + if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
1820 + return port->mstb;
1821 +
1822 + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1823 +
1824 + if (found_mstb)
1825 + return found_mstb;
1826 + }
1827 +
1828 + return NULL;
1829 +}
1830 +
1831 +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1832 + struct drm_dp_mst_topology_mgr *mgr,
1833 + uint8_t *guid)
1834 +{
1835 + struct drm_dp_mst_branch *mstb;
1836 +
1837 + /* find the port by iterating down */
1838 + mutex_lock(&mgr->lock);
1839 +
1840 + if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
1841 + mstb = mgr->mst_primary;
1842 + else
1843 + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1844 +
1845 + if (mstb)
1846 + kref_get(&mstb->kref);
1847 +
1848 + mutex_unlock(&mgr->lock);
1849 + return mstb;
1850 +}
1851 +
1852 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1853 struct drm_dp_mst_branch *mstb)
1854 {
1855 @@ -1306,6 +1364,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1856 struct drm_dp_sideband_msg_tx *txmsg)
1857 {
1858 struct drm_dp_mst_branch *mstb = txmsg->dst;
1859 + u8 req_type;
1860
1861 /* both msg slots are full */
1862 if (txmsg->seqno == -1) {
1863 @@ -1322,7 +1381,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1864 txmsg->seqno = 1;
1865 mstb->tx_slots[txmsg->seqno] = txmsg;
1866 }
1867 - hdr->broadcast = 0;
1868 +
1869 + req_type = txmsg->msg[0] & 0x7f;
1870 + if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1871 + req_type == DP_RESOURCE_STATUS_NOTIFY)
1872 + hdr->broadcast = 1;
1873 + else
1874 + hdr->broadcast = 0;
1875 hdr->path_msg = txmsg->path_msg;
1876 hdr->lct = mstb->lct;
1877 hdr->lcr = mstb->lct - 1;
1878 @@ -1424,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1879 }
1880
1881 /* called holding qlock */
1882 -static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1883 +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1884 + struct drm_dp_sideband_msg_tx *txmsg)
1885 {
1886 - struct drm_dp_sideband_msg_tx *txmsg;
1887 int ret;
1888
1889 /* construct a chunk from the first msg in the tx_msg queue */
1890 - if (list_empty(&mgr->tx_msg_upq)) {
1891 - mgr->tx_up_in_progress = false;
1892 - return;
1893 - }
1894 -
1895 - txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1896 ret = process_single_tx_qlock(mgr, txmsg, true);
1897 - if (ret == 1) {
1898 - /* up txmsgs aren't put in slots - so free after we send it */
1899 - list_del(&txmsg->next);
1900 - kfree(txmsg);
1901 - } else if (ret)
1902 +
1903 + if (ret != 1)
1904 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1905 - mgr->tx_up_in_progress = true;
1906 +
1907 + txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1908 }
1909
1910 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1911 @@ -1828,11 +1885,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1912 drm_dp_encode_up_ack_reply(txmsg, req_type);
1913
1914 mutex_lock(&mgr->qlock);
1915 - list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1916 - if (!mgr->tx_up_in_progress) {
1917 - process_single_up_tx_qlock(mgr);
1918 - }
1919 +
1920 + process_single_up_tx_qlock(mgr, txmsg);
1921 +
1922 mutex_unlock(&mgr->qlock);
1923 +
1924 + kfree(txmsg);
1925 return 0;
1926 }
1927
1928 @@ -2129,28 +2187,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
1929
1930 if (mgr->up_req_recv.have_eomt) {
1931 struct drm_dp_sideband_msg_req_body msg;
1932 - struct drm_dp_mst_branch *mstb;
1933 + struct drm_dp_mst_branch *mstb = NULL;
1934 bool seqno;
1935 - mstb = drm_dp_get_mst_branch_device(mgr,
1936 - mgr->up_req_recv.initial_hdr.lct,
1937 - mgr->up_req_recv.initial_hdr.rad);
1938 - if (!mstb) {
1939 - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
1940 - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1941 - return 0;
1942 +
1943 + if (!mgr->up_req_recv.initial_hdr.broadcast) {
1944 + mstb = drm_dp_get_mst_branch_device(mgr,
1945 + mgr->up_req_recv.initial_hdr.lct,
1946 + mgr->up_req_recv.initial_hdr.rad);
1947 + if (!mstb) {
1948 + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
1949 + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1950 + return 0;
1951 + }
1952 }
1953
1954 seqno = mgr->up_req_recv.initial_hdr.seqno;
1955 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
1956
1957 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
1958 - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
1959 + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
1960 +
1961 + if (!mstb)
1962 + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
1963 +
1964 + if (!mstb) {
1965 + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
1966 + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1967 + return 0;
1968 + }
1969 +
1970 drm_dp_update_port(mstb, &msg.u.conn_stat);
1971 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
1972 (*mgr->cbs->hotplug)(mgr);
1973
1974 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
1975 - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
1976 + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
1977 + if (!mstb)
1978 + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
1979 +
1980 + if (!mstb) {
1981 + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
1982 + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1983 + return 0;
1984 + }
1985 +
1986 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
1987 }
1988
1989 @@ -2330,6 +2410,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
1990 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
1991 if (pbn == port->vcpi.pbn) {
1992 *slots = port->vcpi.num_slots;
1993 + drm_dp_put_port(port);
1994 return true;
1995 }
1996 }
1997 @@ -2489,32 +2570,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
1998 */
1999 int drm_dp_calc_pbn_mode(int clock, int bpp)
2000 {
2001 - fixed20_12 pix_bw;
2002 - fixed20_12 fbpp;
2003 - fixed20_12 result;
2004 - fixed20_12 margin, tmp;
2005 - u32 res;
2006 -
2007 - pix_bw.full = dfixed_const(clock);
2008 - fbpp.full = dfixed_const(bpp);
2009 - tmp.full = dfixed_const(8);
2010 - fbpp.full = dfixed_div(fbpp, tmp);
2011 -
2012 - result.full = dfixed_mul(pix_bw, fbpp);
2013 - margin.full = dfixed_const(54);
2014 - tmp.full = dfixed_const(64);
2015 - margin.full = dfixed_div(margin, tmp);
2016 - result.full = dfixed_div(result, margin);
2017 -
2018 - margin.full = dfixed_const(1006);
2019 - tmp.full = dfixed_const(1000);
2020 - margin.full = dfixed_div(margin, tmp);
2021 - result.full = dfixed_mul(result, margin);
2022 -
2023 - result.full = dfixed_div(result, tmp);
2024 - result.full = dfixed_ceil(result);
2025 - res = dfixed_trunc(result);
2026 - return res;
2027 + u64 kbps;
2028 + s64 peak_kbps;
2029 + u32 numerator;
2030 + u32 denominator;
2031 +
2032 + kbps = clock * bpp;
2033 +
2034 + /*
2035 + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2036 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2037 + * common multiplier to render an integer PBN for all link rate/lane
2038 + * counts combinations
2039 + * calculate
2040 + * peak_kbps *= (1006/1000)
2041 + * peak_kbps *= (64/54)
2042 + * peak_kbps *= 8 convert to bytes
2043 + */
2044 +
2045 + numerator = 64 * 1006;
2046 + denominator = 54 * 8 * 1000 * 1000;
2047 +
2048 + kbps *= numerator;
2049 + peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2050 +
2051 + return drm_fixp2int_ceil(peak_kbps);
2052 }
2053 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2054
2055 @@ -2522,11 +2602,23 @@ static int test_calc_pbn_mode(void)
2056 {
2057 int ret;
2058 ret = drm_dp_calc_pbn_mode(154000, 30);
2059 - if (ret != 689)
2060 + if (ret != 689) {
2061 + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2062 + 154000, 30, 689, ret);
2063 return -EINVAL;
2064 + }
2065 ret = drm_dp_calc_pbn_mode(234000, 30);
2066 - if (ret != 1047)
2067 + if (ret != 1047) {
2068 + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2069 + 234000, 30, 1047, ret);
2070 return -EINVAL;
2071 + }
2072 + ret = drm_dp_calc_pbn_mode(297000, 24);
2073 + if (ret != 1063) {
2074 + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2075 + 297000, 24, 1063, ret);
2076 + return -EINVAL;
2077 + }
2078 return 0;
2079 }
2080
2081 @@ -2660,8 +2752,8 @@ static void drm_dp_tx_work(struct work_struct *work)
2082 static void drm_dp_destroy_connector_work(struct work_struct *work)
2083 {
2084 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2085 - struct drm_connector *connector;
2086 -
2087 + struct drm_dp_mst_port *port;
2088 + bool send_hotplug = false;
2089 /*
2090 * Not a regular list traverse as we have to drop the destroy
2091 * connector lock before destroying the connector, to avoid AB->BA
2092 @@ -2669,16 +2761,25 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2093 */
2094 for (;;) {
2095 mutex_lock(&mgr->destroy_connector_lock);
2096 - connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
2097 - if (!connector) {
2098 + port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2099 + if (!port) {
2100 mutex_unlock(&mgr->destroy_connector_lock);
2101 break;
2102 }
2103 - list_del(&connector->destroy_list);
2104 + list_del(&port->next);
2105 mutex_unlock(&mgr->destroy_connector_lock);
2106
2107 - mgr->cbs->destroy_connector(mgr, connector);
2108 + mgr->cbs->destroy_connector(mgr, port->connector);
2109 +
2110 + drm_dp_port_teardown_pdt(port, port->pdt);
2111 +
2112 + if (!port->input && port->vcpi.vcpi > 0)
2113 + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2114 + kfree(port);
2115 + send_hotplug = true;
2116 }
2117 + if (send_hotplug)
2118 + (*mgr->cbs->hotplug)(mgr);
2119 }
2120
2121 /**
2122 @@ -2701,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2123 mutex_init(&mgr->qlock);
2124 mutex_init(&mgr->payload_lock);
2125 mutex_init(&mgr->destroy_connector_lock);
2126 - INIT_LIST_HEAD(&mgr->tx_msg_upq);
2127 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2128 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2129 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2130 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
2131 index 63503879a676..0d75e75b1da3 100644
2132 --- a/drivers/gpu/drm/drm_probe_helper.c
2133 +++ b/drivers/gpu/drm/drm_probe_helper.c
2134 @@ -195,7 +195,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
2135 mode_flags |= DRM_MODE_FLAG_3D_MASK;
2136
2137 list_for_each_entry(mode, &connector->modes, head) {
2138 - mode->status = drm_mode_validate_basic(mode);
2139 + if (mode->status == MODE_OK)
2140 + mode->status = drm_mode_validate_basic(mode);
2141
2142 if (mode->status == MODE_OK)
2143 mode->status = drm_mode_validate_size(mode, maxX, maxY);
2144 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
2145 index f3e84c44d009..4decf518d106 100644
2146 --- a/drivers/gpu/drm/i915/i915_gem_context.c
2147 +++ b/drivers/gpu/drm/i915/i915_gem_context.c
2148 @@ -317,6 +317,10 @@ void i915_gem_context_reset(struct drm_device *dev)
2149 i915_gem_context_unreference(lctx);
2150 ring->last_context = NULL;
2151 }
2152 +
2153 + /* Force the GPU state to be reinitialised on enabling */
2154 + if (ring->default_context)
2155 + ring->default_context->legacy_hw_ctx.initialized = false;
2156 }
2157 }
2158
2159 @@ -704,7 +708,7 @@ static int do_switch(struct intel_engine_cs *ring,
2160 goto unpin_out;
2161 }
2162
2163 - if (!to->legacy_hw_ctx.initialized) {
2164 + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
2165 hw_flags |= MI_RESTORE_INHIBIT;
2166 /* NB: If we inhibit the restore, the context is not allowed to
2167 * die because future work may end up depending on valid address
2168 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2169 index 7b27a114b030..b103773df2a3 100644
2170 --- a/drivers/gpu/drm/i915/intel_display.c
2171 +++ b/drivers/gpu/drm/i915/intel_display.c
2172 @@ -10391,11 +10391,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
2173 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
2174 }
2175
2176 - /* Clamp bpp to 8 on screens without EDID 1.4 */
2177 - if (connector->base.display_info.bpc == 0 && bpp > 24) {
2178 - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
2179 - bpp);
2180 - pipe_config->pipe_bpp = 24;
2181 + /* Clamp bpp to default limit on screens without EDID 1.4 */
2182 + if (connector->base.display_info.bpc == 0) {
2183 + int type = connector->base.connector_type;
2184 + int clamp_bpp = 24;
2185 +
2186 + /* Fall back to 18 bpp when DP sink capability is unknown. */
2187 + if (type == DRM_MODE_CONNECTOR_DisplayPort ||
2188 + type == DRM_MODE_CONNECTOR_eDP)
2189 + clamp_bpp = 18;
2190 +
2191 + if (bpp > clamp_bpp) {
2192 + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
2193 + bpp, clamp_bpp);
2194 + pipe_config->pipe_bpp = clamp_bpp;
2195 + }
2196 }
2197 }
2198
2199 diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
2200 index 5cb47482d29f..88c557551b89 100644
2201 --- a/drivers/gpu/drm/i915/intel_dp_mst.c
2202 +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
2203 @@ -439,9 +439,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
2204
2205 drm_mode_connector_set_path_property(connector, pathprop);
2206 drm_reinit_primary_mode_group(dev);
2207 - mutex_lock(&dev->mode_config.mutex);
2208 + drm_modeset_lock_all(dev);
2209 intel_connector_add_to_fbdev(intel_connector);
2210 - mutex_unlock(&dev->mode_config.mutex);
2211 + drm_modeset_unlock_all(dev);
2212 drm_connector_register(&intel_connector->base);
2213 return connector;
2214 }
2215 @@ -452,16 +452,16 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2216 struct intel_connector *intel_connector = to_intel_connector(connector);
2217 struct drm_device *dev = connector->dev;
2218 /* need to nuke the connector */
2219 - mutex_lock(&dev->mode_config.mutex);
2220 + drm_modeset_lock_all(dev);
2221 intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2222 - mutex_unlock(&dev->mode_config.mutex);
2223 + drm_modeset_unlock_all(dev);
2224
2225 intel_connector->unregister(intel_connector);
2226
2227 - mutex_lock(&dev->mode_config.mutex);
2228 + drm_modeset_lock_all(dev);
2229 intel_connector_remove_from_fbdev(intel_connector);
2230 drm_connector_cleanup(connector);
2231 - mutex_unlock(&dev->mode_config.mutex);
2232 + drm_modeset_unlock_all(dev);
2233
2234 drm_reinit_primary_mode_group(dev);
2235
2236 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
2237 index 3162040bc314..05490ef5a2aa 100644
2238 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
2239 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
2240 @@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
2241
2242 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
2243
2244 + mutex_lock(&drm->dev->mode_config.mutex);
2245 if (plugged)
2246 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2247 else
2248 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2249 + mutex_unlock(&drm->dev->mode_config.mutex);
2250 +
2251 drm_helper_hpd_irq_event(connector->dev);
2252 }
2253
2254 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
2255 index 5be50ef2b30e..bb292143997e 100644
2256 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
2257 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
2258 @@ -2310,8 +2310,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2259 encoder_mode = atombios_get_encoder_mode(encoder);
2260 if (connector && (radeon_audio != 0) &&
2261 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2262 - (ENCODER_MODE_IS_DP(encoder_mode) &&
2263 - drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2264 + ENCODER_MODE_IS_DP(encoder_mode)))
2265 radeon_audio_mode_set(encoder, adjusted_mode);
2266 }
2267
2268 diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
2269 index 44480c1b9738..848b1ffd5cc4 100644
2270 --- a/drivers/gpu/drm/radeon/dce6_afmt.c
2271 +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
2272 @@ -282,6 +282,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
2273 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
2274 */
2275 if (ASIC_IS_DCE8(rdev)) {
2276 + unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
2277 + DENTIST_DPREFCLK_WDIVIDER_MASK) >>
2278 + DENTIST_DPREFCLK_WDIVIDER_SHIFT;
2279 + div = radeon_audio_decode_dfs_div(div);
2280 +
2281 + if (div)
2282 + clock = clock * 100 / div;
2283 +
2284 WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
2285 WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
2286 } else {
2287 diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
2288 index 9953356fe263..3cf04a2f44bb 100644
2289 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
2290 +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
2291 @@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
2292 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
2293 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
2294 */
2295 + if (ASIC_IS_DCE41(rdev)) {
2296 + unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
2297 + DENTIST_DPREFCLK_WDIVIDER_MASK) >>
2298 + DENTIST_DPREFCLK_WDIVIDER_SHIFT;
2299 + div = radeon_audio_decode_dfs_div(div);
2300 +
2301 + if (div)
2302 + clock = 100 * clock / div;
2303 + }
2304 +
2305 WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
2306 WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
2307 }
2308 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
2309 index 4aa5f755572b..13b6029d65cc 100644
2310 --- a/drivers/gpu/drm/radeon/evergreend.h
2311 +++ b/drivers/gpu/drm/radeon/evergreend.h
2312 @@ -511,6 +511,11 @@
2313 #define DCCG_AUDIO_DTO1_CNTL 0x05cc
2314 # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
2315
2316 +#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
2317 +# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
2318 +# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
2319 +# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
2320 +
2321 /* DCE 4.0 AFMT */
2322 #define HDMI_CONTROL 0x7030
2323 # define HDMI_KEEPOUT_MODE (1 << 0)
2324 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2325 index 91c3f60f8bac..4bca29c5abfa 100644
2326 --- a/drivers/gpu/drm/radeon/radeon.h
2327 +++ b/drivers/gpu/drm/radeon/radeon.h
2328 @@ -268,6 +268,7 @@ struct radeon_clock {
2329 uint32_t current_dispclk;
2330 uint32_t dp_extclk;
2331 uint32_t max_pixel_clock;
2332 + uint32_t vco_freq;
2333 };
2334
2335 /*
2336 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2337 index 8f285244c839..de9a2ffcf5f7 100644
2338 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2339 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2340 @@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2341 }
2342
2343 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
2344 - if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
2345 + if (((dev->pdev->device == 0x9802) ||
2346 + (dev->pdev->device == 0x9805) ||
2347 + (dev->pdev->device == 0x9806)) &&
2348 (dev->pdev->subsystem_vendor == 0x1734) &&
2349 (dev->pdev->subsystem_device == 0x11bd)) {
2350 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
2351 @@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2352 }
2353 }
2354
2355 - /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
2356 - if ((dev->pdev->device == 0x9805) &&
2357 - (dev->pdev->subsystem_vendor == 0x1734) &&
2358 - (dev->pdev->subsystem_device == 0x11bd)) {
2359 - if (*connector_type == DRM_MODE_CONNECTOR_VGA)
2360 - return false;
2361 - }
2362 -
2363 return true;
2364 }
2365
2366 @@ -1112,6 +1106,31 @@ union firmware_info {
2367 ATOM_FIRMWARE_INFO_V2_2 info_22;
2368 };
2369
2370 +union igp_info {
2371 + struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2372 + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2373 + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2374 + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2375 + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2376 +};
2377 +
2378 +static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
2379 +{
2380 + struct radeon_mode_info *mode_info = &rdev->mode_info;
2381 + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2382 + union igp_info *igp_info;
2383 + u8 frev, crev;
2384 + u16 data_offset;
2385 +
2386 + if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2387 + &frev, &crev, &data_offset)) {
2388 + igp_info = (union igp_info *)(mode_info->atom_context->bios +
2389 + data_offset);
2390 + rdev->clock.vco_freq =
2391 + le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
2392 + }
2393 +}
2394 +
2395 bool radeon_atom_get_clock_info(struct drm_device *dev)
2396 {
2397 struct radeon_device *rdev = dev->dev_private;
2398 @@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
2399 rdev->mode_info.firmware_flags =
2400 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
2401
2402 + if (ASIC_IS_DCE8(rdev))
2403 + rdev->clock.vco_freq =
2404 + le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
2405 + else if (ASIC_IS_DCE5(rdev))
2406 + rdev->clock.vco_freq = rdev->clock.current_dispclk;
2407 + else if (ASIC_IS_DCE41(rdev))
2408 + radeon_atombios_get_dentist_vco_freq(rdev);
2409 + else
2410 + rdev->clock.vco_freq = rdev->clock.current_dispclk;
2411 +
2412 + if (rdev->clock.vco_freq == 0)
2413 + rdev->clock.vco_freq = 360000; /* 3.6 GHz */
2414 +
2415 return true;
2416 }
2417
2418 return false;
2419 }
2420
2421 -union igp_info {
2422 - struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2423 - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2424 - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2425 - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2426 - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2427 -};
2428 -
2429 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
2430 {
2431 struct radeon_mode_info *mode_info = &rdev->mode_info;
2432 diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
2433 index d77dd1430d58..b214663b370d 100644
2434 --- a/drivers/gpu/drm/radeon/radeon_audio.c
2435 +++ b/drivers/gpu/drm/radeon/radeon_audio.c
2436 @@ -698,26 +698,37 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
2437 {
2438 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2439 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2440 + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2441
2442 if (!dig || !dig->afmt)
2443 return;
2444
2445 - radeon_audio_set_mute(encoder, true);
2446 + if (!connector)
2447 + return;
2448 +
2449 + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
2450 + radeon_audio_set_mute(encoder, true);
2451
2452 - radeon_audio_write_speaker_allocation(encoder);
2453 - radeon_audio_write_sad_regs(encoder);
2454 - radeon_audio_write_latency_fields(encoder, mode);
2455 - radeon_audio_set_dto(encoder, mode->clock);
2456 - radeon_audio_set_vbi_packet(encoder);
2457 - radeon_hdmi_set_color_depth(encoder);
2458 - radeon_audio_update_acr(encoder, mode->clock);
2459 - radeon_audio_set_audio_packet(encoder);
2460 - radeon_audio_select_pin(encoder);
2461 + radeon_audio_write_speaker_allocation(encoder);
2462 + radeon_audio_write_sad_regs(encoder);
2463 + radeon_audio_write_latency_fields(encoder, mode);
2464 + radeon_audio_set_dto(encoder, mode->clock);
2465 + radeon_audio_set_vbi_packet(encoder);
2466 + radeon_hdmi_set_color_depth(encoder);
2467 + radeon_audio_update_acr(encoder, mode->clock);
2468 + radeon_audio_set_audio_packet(encoder);
2469 + radeon_audio_select_pin(encoder);
2470
2471 - if (radeon_audio_set_avi_packet(encoder, mode) < 0)
2472 - return;
2473 + if (radeon_audio_set_avi_packet(encoder, mode) < 0)
2474 + return;
2475
2476 - radeon_audio_set_mute(encoder, false);
2477 + radeon_audio_set_mute(encoder, false);
2478 + } else {
2479 + radeon_hdmi_set_color_depth(encoder);
2480 +
2481 + if (radeon_audio_set_avi_packet(encoder, mode) < 0)
2482 + return;
2483 + }
2484 }
2485
2486 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
2487 @@ -728,28 +739,24 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
2488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2489 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2490 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
2491 - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2492 - struct radeon_connector_atom_dig *dig_connector =
2493 - radeon_connector->con_priv;
2494
2495 - if (!connector)
2496 + if (!dig || !dig->afmt)
2497 return;
2498
2499 - if (!dig || !dig->afmt)
2500 + if (!connector)
2501 return;
2502
2503 - radeon_audio_write_speaker_allocation(encoder);
2504 - radeon_audio_write_sad_regs(encoder);
2505 - radeon_audio_write_latency_fields(encoder, mode);
2506 - if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
2507 - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
2508 - else
2509 - radeon_audio_set_dto(encoder, dig_connector->dp_clock);
2510 - radeon_audio_set_audio_packet(encoder);
2511 - radeon_audio_select_pin(encoder);
2512 + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
2513 + radeon_audio_write_speaker_allocation(encoder);
2514 + radeon_audio_write_sad_regs(encoder);
2515 + radeon_audio_write_latency_fields(encoder, mode);
2516 + radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
2517 + radeon_audio_set_audio_packet(encoder);
2518 + radeon_audio_select_pin(encoder);
2519
2520 - if (radeon_audio_set_avi_packet(encoder, mode) < 0)
2521 - return;
2522 + if (radeon_audio_set_avi_packet(encoder, mode) < 0)
2523 + return;
2524 + }
2525 }
2526
2527 void radeon_audio_mode_set(struct drm_encoder *encoder,
2528 @@ -768,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
2529 if (radeon_encoder->audio && radeon_encoder->audio->dpms)
2530 radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
2531 }
2532 +
2533 +unsigned int radeon_audio_decode_dfs_div(unsigned int div)
2534 +{
2535 + if (div >= 8 && div < 64)
2536 + return (div - 8) * 25 + 200;
2537 + else if (div >= 64 && div < 96)
2538 + return (div - 64) * 50 + 1600;
2539 + else if (div >= 96 && div < 128)
2540 + return (div - 96) * 100 + 3200;
2541 + else
2542 + return 0;
2543 +}
2544 diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
2545 index 059cc3012062..5c70cceaa4a6 100644
2546 --- a/drivers/gpu/drm/radeon/radeon_audio.h
2547 +++ b/drivers/gpu/drm/radeon/radeon_audio.h
2548 @@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
2549 void radeon_audio_mode_set(struct drm_encoder *encoder,
2550 struct drm_display_mode *mode);
2551 void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
2552 +unsigned int radeon_audio_decode_dfs_div(unsigned int div);
2553
2554 #endif
2555 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2556 index 604c44d88e7a..ccab94ed9d94 100644
2557 --- a/drivers/gpu/drm/radeon/radeon_device.c
2558 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2559 @@ -1734,6 +1734,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2560 }
2561
2562 drm_kms_helper_poll_enable(dev);
2563 + drm_helper_hpd_irq_event(dev);
2564
2565 /* set the power state here in case we are a PX system or headless */
2566 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
2567 diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
2568 index 42986130cc63..c9ff4cf4c4e7 100644
2569 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
2570 +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
2571 @@ -287,9 +287,9 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
2572 drm_mode_connector_set_path_property(connector, pathprop);
2573 drm_reinit_primary_mode_group(dev);
2574
2575 - mutex_lock(&dev->mode_config.mutex);
2576 + drm_modeset_lock_all(dev);
2577 radeon_fb_add_connector(rdev, connector);
2578 - mutex_unlock(&dev->mode_config.mutex);
2579 + drm_modeset_unlock_all(dev);
2580
2581 drm_connector_register(connector);
2582 return connector;
2583 @@ -304,12 +304,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2584
2585 drm_connector_unregister(connector);
2586 /* need to nuke the connector */
2587 - mutex_lock(&dev->mode_config.mutex);
2588 + drm_modeset_lock_all(dev);
2589 /* dpms off */
2590 radeon_fb_remove_connector(rdev, connector);
2591
2592 drm_connector_cleanup(connector);
2593 - mutex_unlock(&dev->mode_config.mutex);
2594 + drm_modeset_unlock_all(dev);
2595 drm_reinit_primary_mode_group(dev);
2596
2597
2598 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
2599 index 676362769b8d..741065bd14b3 100644
2600 --- a/drivers/gpu/drm/radeon/radeon_object.c
2601 +++ b/drivers/gpu/drm/radeon/radeon_object.c
2602 @@ -33,6 +33,7 @@
2603 #include <linux/slab.h>
2604 #include <drm/drmP.h>
2605 #include <drm/radeon_drm.h>
2606 +#include <drm/drm_cache.h>
2607 #include "radeon.h"
2608 #include "radeon_trace.h"
2609
2610 @@ -225,7 +226,7 @@ int radeon_bo_create(struct radeon_device *rdev,
2611 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
2612 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
2613 */
2614 - bo->flags &= ~RADEON_GEM_GTT_WC;
2615 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
2616 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
2617 /* Don't try to enable write-combining when it can't work, or things
2618 * may be slow
2619 @@ -237,7 +238,13 @@ int radeon_bo_create(struct radeon_device *rdev,
2620
2621 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
2622 "better performance thanks to write-combining\n");
2623 - bo->flags &= ~RADEON_GEM_GTT_WC;
2624 + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
2625 +#else
2626 + /* For architectures that don't support WC memory,
2627 + * mask out the WC flag from the BO
2628 + */
2629 + if (!drm_arch_can_wc_memory())
2630 + bo->flags &= ~RADEON_GEM_GTT_WC;
2631 #endif
2632
2633 radeon_ttm_placement_from_domain(bo, domain);
2634 diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
2635 index 9c3377ca17b7..8ec4e4591756 100644
2636 --- a/drivers/gpu/drm/radeon/radeon_vm.c
2637 +++ b/drivers/gpu/drm/radeon/radeon_vm.c
2638 @@ -456,15 +456,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2639
2640 if (soffset) {
2641 /* make sure object fit at this offset */
2642 - eoffset = soffset + size;
2643 + eoffset = soffset + size - 1;
2644 if (soffset >= eoffset) {
2645 r = -EINVAL;
2646 goto error_unreserve;
2647 }
2648
2649 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
2650 - if (last_pfn > rdev->vm_manager.max_pfn) {
2651 - dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
2652 + if (last_pfn >= rdev->vm_manager.max_pfn) {
2653 + dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
2654 last_pfn, rdev->vm_manager.max_pfn);
2655 r = -EINVAL;
2656 goto error_unreserve;
2657 @@ -479,7 +479,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2658 eoffset /= RADEON_GPU_PAGE_SIZE;
2659 if (soffset || eoffset) {
2660 struct interval_tree_node *it;
2661 - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
2662 + it = interval_tree_iter_first(&vm->va, soffset, eoffset);
2663 if (it && it != &bo_va->it) {
2664 struct radeon_bo_va *tmp;
2665 tmp = container_of(it, struct radeon_bo_va, it);
2666 @@ -522,7 +522,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2667
2668 if (soffset || eoffset) {
2669 bo_va->it.start = soffset;
2670 - bo_va->it.last = eoffset - 1;
2671 + bo_va->it.last = eoffset;
2672 interval_tree_insert(&bo_va->it, &vm->va);
2673 }
2674
2675 @@ -891,7 +891,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
2676 unsigned i;
2677
2678 start >>= radeon_vm_block_size;
2679 - end >>= radeon_vm_block_size;
2680 + end = (end - 1) >> radeon_vm_block_size;
2681
2682 for (i = start; i <= end; ++i)
2683 radeon_bo_fence(vm->page_tables[i].bo, fence, true);
2684 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
2685 index 3afac3013983..c126f6bfbed1 100644
2686 --- a/drivers/gpu/drm/radeon/sid.h
2687 +++ b/drivers/gpu/drm/radeon/sid.h
2688 @@ -915,6 +915,11 @@
2689 #define DCCG_AUDIO_DTO1_PHASE 0x05c0
2690 #define DCCG_AUDIO_DTO1_MODULE 0x05c4
2691
2692 +#define DENTIST_DISPCLK_CNTL 0x0490
2693 +# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
2694 +# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
2695 +# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
2696 +
2697 #define AFMT_AUDIO_SRC_CONTROL 0x713c
2698 #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
2699 /* AFMT_AUDIO_SRC_SELECT
2700 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2701 index 15a8d7746fd2..2aa0e927d490 100644
2702 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2703 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2704 @@ -25,6 +25,7 @@
2705 *
2706 **************************************************************************/
2707 #include <linux/module.h>
2708 +#include <linux/console.h>
2709
2710 #include <drm/drmP.h>
2711 #include "vmwgfx_drv.h"
2712 @@ -1447,6 +1448,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2713 static int __init vmwgfx_init(void)
2714 {
2715 int ret;
2716 +
2717 +#ifdef CONFIG_VGA_CONSOLE
2718 + if (vgacon_text_force())
2719 + return -EINVAL;
2720 +#endif
2721 +
2722 ret = drm_pci_init(&driver, &vmw_pci_driver);
2723 if (ret)
2724 DRM_ERROR("Failed initializing DRM.\n");
2725 diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
2726 index 894531d315b8..046144fc5aff 100644
2727 --- a/drivers/hwtracing/coresight/coresight.c
2728 +++ b/drivers/hwtracing/coresight/coresight.c
2729 @@ -543,7 +543,7 @@ static int coresight_name_match(struct device *dev, void *data)
2730 to_match = data;
2731 i_csdev = to_coresight_device(dev);
2732
2733 - if (!strcmp(to_match, dev_name(&i_csdev->dev)))
2734 + if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
2735 return 1;
2736
2737 return 0;
2738 diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
2739 index 4fa88ba2963e..131994382b22 100644
2740 --- a/drivers/infiniband/hw/qib/qib_qp.c
2741 +++ b/drivers/infiniband/hw/qib/qib_qp.c
2742 @@ -100,9 +100,10 @@ static u32 credit_table[31] = {
2743 32768 /* 1E */
2744 };
2745
2746 -static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
2747 +static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
2748 + gfp_t gfp)
2749 {
2750 - unsigned long page = get_zeroed_page(GFP_KERNEL);
2751 + unsigned long page = get_zeroed_page(gfp);
2752
2753 /*
2754 * Free the page if someone raced with us installing it.
2755 @@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
2756 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
2757 */
2758 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
2759 - enum ib_qp_type type, u8 port)
2760 + enum ib_qp_type type, u8 port, gfp_t gfp)
2761 {
2762 u32 i, offset, max_scan, qpn;
2763 struct qpn_map *map;
2764 @@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
2765 max_scan = qpt->nmaps - !offset;
2766 for (i = 0;;) {
2767 if (unlikely(!map->page)) {
2768 - get_map_page(qpt, map);
2769 + get_map_page(qpt, map, gfp);
2770 if (unlikely(!map->page))
2771 break;
2772 }
2773 @@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
2774 size_t sz;
2775 size_t sg_list_sz;
2776 struct ib_qp *ret;
2777 + gfp_t gfp;
2778 +
2779
2780 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
2781 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
2782 - init_attr->create_flags) {
2783 - ret = ERR_PTR(-EINVAL);
2784 - goto bail;
2785 - }
2786 + init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
2787 + return ERR_PTR(-EINVAL);
2788 +
2789 + /* GFP_NOIO is applicable in RC QPs only */
2790 + if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
2791 + init_attr->qp_type != IB_QPT_RC)
2792 + return ERR_PTR(-EINVAL);
2793 +
2794 + gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
2795 + GFP_NOIO : GFP_KERNEL;
2796
2797 /* Check receive queue parameters if no SRQ is specified. */
2798 if (!init_attr->srq) {
2799 @@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
2800 sz = sizeof(struct qib_sge) *
2801 init_attr->cap.max_send_sge +
2802 sizeof(struct qib_swqe);
2803 - swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
2804 + swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
2805 + gfp, PAGE_KERNEL);
2806 if (swq == NULL) {
2807 ret = ERR_PTR(-ENOMEM);
2808 goto bail;
2809 @@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
2810 } else if (init_attr->cap.max_recv_sge > 1)
2811 sg_list_sz = sizeof(*qp->r_sg_list) *
2812 (init_attr->cap.max_recv_sge - 1);
2813 - qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
2814 + qp = kzalloc(sz + sg_list_sz, gfp);
2815 if (!qp) {
2816 ret = ERR_PTR(-ENOMEM);
2817 goto bail_swq;
2818 }
2819 RCU_INIT_POINTER(qp->next, NULL);
2820 - qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
2821 + qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
2822 if (!qp->s_hdr) {
2823 ret = ERR_PTR(-ENOMEM);
2824 goto bail_qp;
2825 @@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
2826 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
2827 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
2828 sizeof(struct qib_rwqe);
2829 - qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
2830 - qp->r_rq.size * sz);
2831 + if (gfp != GFP_NOIO)
2832 + qp->r_rq.wq = vmalloc_user(
2833 + sizeof(struct qib_rwq) +
2834 + qp->r_rq.size * sz);
2835 + else
2836 + qp->r_rq.wq = __vmalloc(
2837 + sizeof(struct qib_rwq) +
2838 + qp->r_rq.size * sz,
2839 + gfp, PAGE_KERNEL);
2840 +
2841 if (!qp->r_rq.wq) {
2842 ret = ERR_PTR(-ENOMEM);
2843 goto bail_qp;
2844 @@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
2845 dev = to_idev(ibpd->device);
2846 dd = dd_from_dev(dev);
2847 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
2848 - init_attr->port_num);
2849 + init_attr->port_num, gfp);
2850 if (err < 0) {
2851 ret = ERR_PTR(err);
2852 vfree(qp->r_rq.wq);
2853 diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
2854 index f8ea069a3eaf..b2fb5286dbd9 100644
2855 --- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
2856 +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
2857 @@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2858 struct qib_ibdev *dev = to_idev(ibqp->device);
2859 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
2860 struct qib_mcast *mcast = NULL;
2861 - struct qib_mcast_qp *p, *tmp;
2862 + struct qib_mcast_qp *p, *tmp, *delp = NULL;
2863 struct rb_node *n;
2864 int last = 0;
2865 int ret;
2866
2867 - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
2868 - ret = -EINVAL;
2869 - goto bail;
2870 - }
2871 + if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
2872 + return -EINVAL;
2873
2874 spin_lock_irq(&ibp->lock);
2875
2876 @@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2877 while (1) {
2878 if (n == NULL) {
2879 spin_unlock_irq(&ibp->lock);
2880 - ret = -EINVAL;
2881 - goto bail;
2882 + return -EINVAL;
2883 }
2884
2885 mcast = rb_entry(n, struct qib_mcast, rb_node);
2886 @@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2887 */
2888 list_del_rcu(&p->list);
2889 mcast->n_attached--;
2890 + delp = p;
2891
2892 /* If this was the last attached QP, remove the GID too. */
2893 if (list_empty(&mcast->qp_list)) {
2894 @@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2895 }
2896
2897 spin_unlock_irq(&ibp->lock);
2898 + /* QP not attached */
2899 + if (!delp)
2900 + return -EINVAL;
2901 + /*
2902 + * Wait for any list walkers to finish before freeing the
2903 + * list element.
2904 + */
2905 + wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
2906 + qib_mcast_qp_free(delp);
2907
2908 - if (p) {
2909 - /*
2910 - * Wait for any list walkers to finish before freeing the
2911 - * list element.
2912 - */
2913 - wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
2914 - qib_mcast_qp_free(p);
2915 - }
2916 if (last) {
2917 atomic_dec(&mcast->refcount);
2918 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
2919 @@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2920 dev->n_mcast_grps_allocated--;
2921 spin_unlock_irq(&dev->n_mcast_grps_lock);
2922 }
2923 -
2924 - ret = 0;
2925 -
2926 -bail:
2927 - return ret;
2928 + return 0;
2929 }
2930
2931 int qib_mcast_tree_empty(struct qib_ibport *ibp)
2932 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2933 index ce3d40004458..0f5b400706d7 100644
2934 --- a/drivers/input/mouse/elantech.c
2935 +++ b/drivers/input/mouse/elantech.c
2936 @@ -1214,7 +1214,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
2937 input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
2938 ETP_WMAX_V2, 0, 0);
2939 }
2940 - input_mt_init_slots(dev, 2, 0);
2941 + input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
2942 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
2943 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
2944 break;
2945 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2946 index c11556563ef0..68f5f4a0f1e7 100644
2947 --- a/drivers/input/serio/i8042-x86ia64io.h
2948 +++ b/drivers/input/serio/i8042-x86ia64io.h
2949 @@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
2950 },
2951 },
2952 {
2953 + /* Fujitsu Lifebook U745 */
2954 + .matches = {
2955 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2956 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
2957 + },
2958 + },
2959 + {
2960 /* Fujitsu T70H */
2961 .matches = {
2962 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
2963 diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
2964 index e29d5d7fe220..937832cfa48e 100644
2965 --- a/drivers/iommu/io-pgtable-arm.c
2966 +++ b/drivers/iommu/io-pgtable-arm.c
2967 @@ -341,17 +341,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
2968 arm_lpae_iopte *start, *end;
2969 unsigned long table_size;
2970
2971 - /* Only leaf entries at the last level */
2972 - if (lvl == ARM_LPAE_MAX_LEVELS - 1)
2973 - return;
2974 -
2975 if (lvl == ARM_LPAE_START_LVL(data))
2976 table_size = data->pgd_size;
2977 else
2978 table_size = 1UL << data->pg_shift;
2979
2980 start = ptep;
2981 - end = (void *)ptep + table_size;
2982 +
2983 + /* Only leaf entries at the last level */
2984 + if (lvl == ARM_LPAE_MAX_LEVELS - 1)
2985 + end = ptep;
2986 + else
2987 + end = (void *)ptep + table_size;
2988
2989 while (ptep != end) {
2990 arm_lpae_iopte pte = *ptep++;
2991 diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
2992 index 63cd031b2c28..869d01dd4063 100644
2993 --- a/drivers/irqchip/irq-atmel-aic-common.c
2994 +++ b/drivers/irqchip/irq-atmel-aic-common.c
2995 @@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
2996 priority > AT91_AIC_IRQ_MAX_PRIORITY)
2997 return -EINVAL;
2998
2999 - *val &= AT91_AIC_PRIOR;
3000 + *val &= ~AT91_AIC_PRIOR;
3001 *val |= priority;
3002
3003 return 0;
3004 diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
3005 index 00cde40db572..43829d9493f7 100644
3006 --- a/drivers/md/bcache/btree.c
3007 +++ b/drivers/md/bcache/btree.c
3008 @@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
3009 do {
3010 ret = btree_root(gc_root, c, &op, &writes, &stats);
3011 closure_sync(&writes);
3012 + cond_resched();
3013
3014 if (ret && ret != -EAGAIN)
3015 pr_warn("gc failed!");
3016 @@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
3017 rw_lock(true, b, b->level);
3018
3019 if (b->key.ptr[0] != btree_ptr ||
3020 - b->seq != seq + 1)
3021 + b->seq != seq + 1) {
3022 + op->lock = b->level;
3023 goto out;
3024 + }
3025 }
3026
3027 SET_KEY_PTRS(check_key, 1);
3028 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3029 index 4dd2bb7167f0..42522c8f13c6 100644
3030 --- a/drivers/md/bcache/super.c
3031 +++ b/drivers/md/bcache/super.c
3032 @@ -708,6 +708,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
3033 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
3034 sysfs_create_link(&c->kobj, &d->kobj, d->name),
3035 "Couldn't create device <-> cache set symlinks");
3036 +
3037 + clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
3038 }
3039
3040 static void bcache_device_detach(struct bcache_device *d)
3041 @@ -878,8 +880,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
3042 buf[SB_LABEL_SIZE] = '\0';
3043 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
3044
3045 - if (atomic_xchg(&dc->running, 1))
3046 + if (atomic_xchg(&dc->running, 1)) {
3047 + kfree(env[1]);
3048 + kfree(env[2]);
3049 return;
3050 + }
3051
3052 if (!d->c &&
3053 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
3054 @@ -1967,6 +1972,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
3055 else
3056 err = "device busy";
3057 mutex_unlock(&bch_register_lock);
3058 + if (attr == &ksysfs_register_quiet)
3059 + goto out;
3060 }
3061 goto err;
3062 }
3063 @@ -2005,8 +2012,7 @@ out:
3064 err_close:
3065 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
3066 err:
3067 - if (attr != &ksysfs_register_quiet)
3068 - pr_info("error opening %s: %s", path, err);
3069 + pr_info("error opening %s: %s", path, err);
3070 ret = -EINVAL;
3071 goto out;
3072 }
3073 @@ -2100,8 +2106,10 @@ static int __init bcache_init(void)
3074 closure_debug_init();
3075
3076 bcache_major = register_blkdev(0, "bcache");
3077 - if (bcache_major < 0)
3078 + if (bcache_major < 0) {
3079 + unregister_reboot_notifier(&reboot);
3080 return bcache_major;
3081 + }
3082
3083 if (!(bcache_wq = create_workqueue("bcache")) ||
3084 !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
3085 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
3086 index f1986bcd1bf0..540256a0df4f 100644
3087 --- a/drivers/md/bcache/writeback.c
3088 +++ b/drivers/md/bcache/writeback.c
3089 @@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
3090
3091 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
3092 {
3093 + struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
3094 +
3095 + BUG_ON(KEY_INODE(k) != dc->disk.id);
3096 +
3097 return KEY_DIRTY(k);
3098 }
3099
3100 @@ -372,11 +376,24 @@ next:
3101 }
3102 }
3103
3104 +/*
3105 + * Returns true if we scanned the entire disk
3106 + */
3107 static bool refill_dirty(struct cached_dev *dc)
3108 {
3109 struct keybuf *buf = &dc->writeback_keys;
3110 + struct bkey start = KEY(dc->disk.id, 0, 0);
3111 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
3112 - bool searched_from_start = false;
3113 + struct bkey start_pos;
3114 +
3115 + /*
3116 + * make sure keybuf pos is inside the range for this disk - at bringup
3117 + * we might not be attached yet so this disk's inode nr isn't
3118 + * initialized then
3119 + */
3120 + if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
3121 + bkey_cmp(&buf->last_scanned, &end) > 0)
3122 + buf->last_scanned = start;
3123
3124 if (dc->partial_stripes_expensive) {
3125 refill_full_stripes(dc);
3126 @@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
3127 return false;
3128 }
3129
3130 - if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
3131 - buf->last_scanned = KEY(dc->disk.id, 0, 0);
3132 - searched_from_start = true;
3133 - }
3134 -
3135 + start_pos = buf->last_scanned;
3136 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
3137
3138 - return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
3139 + if (bkey_cmp(&buf->last_scanned, &end) < 0)
3140 + return false;
3141 +
3142 + /*
3143 + * If we get to the end start scanning again from the beginning, and
3144 + * only scan up to where we initially started scanning from:
3145 + */
3146 + buf->last_scanned = start;
3147 + bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
3148 +
3149 + return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
3150 }
3151
3152 static int bch_writeback_thread(void *arg)
3153 diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
3154 index 0a9dab187b79..073a042aed24 100644
3155 --- a/drivers/md/bcache/writeback.h
3156 +++ b/drivers/md/bcache/writeback.h
3157 @@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
3158
3159 static inline void bch_writeback_queue(struct cached_dev *dc)
3160 {
3161 - wake_up_process(dc->writeback_thread);
3162 + if (!IS_ERR_OR_NULL(dc->writeback_thread))
3163 + wake_up_process(dc->writeback_thread);
3164 }
3165
3166 static inline void bch_writeback_add(struct cached_dev *dc)
3167 diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
3168 index 0b2536247cf5..84e27708ad97 100644
3169 --- a/drivers/md/dm-exception-store.h
3170 +++ b/drivers/md/dm-exception-store.h
3171 @@ -70,7 +70,7 @@ struct dm_exception_store_type {
3172 * Update the metadata with this exception.
3173 */
3174 void (*commit_exception) (struct dm_exception_store *store,
3175 - struct dm_exception *e,
3176 + struct dm_exception *e, int valid,
3177 void (*callback) (void *, int success),
3178 void *callback_context);
3179
3180 diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
3181 index 808b8419bc48..9feb894e5565 100644
3182 --- a/drivers/md/dm-snap-persistent.c
3183 +++ b/drivers/md/dm-snap-persistent.c
3184 @@ -694,7 +694,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
3185 }
3186
3187 static void persistent_commit_exception(struct dm_exception_store *store,
3188 - struct dm_exception *e,
3189 + struct dm_exception *e, int valid,
3190 void (*callback) (void *, int success),
3191 void *callback_context)
3192 {
3193 @@ -703,6 +703,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
3194 struct core_exception ce;
3195 struct commit_callback *cb;
3196
3197 + if (!valid)
3198 + ps->valid = 0;
3199 +
3200 ce.old_chunk = e->old_chunk;
3201 ce.new_chunk = e->new_chunk;
3202 write_exception(ps, ps->current_committed++, &ce);
3203 diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
3204 index 1ce9a2586e41..31439d53cf7e 100644
3205 --- a/drivers/md/dm-snap-transient.c
3206 +++ b/drivers/md/dm-snap-transient.c
3207 @@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
3208 }
3209
3210 static void transient_commit_exception(struct dm_exception_store *store,
3211 - struct dm_exception *e,
3212 + struct dm_exception *e, int valid,
3213 void (*callback) (void *, int success),
3214 void *callback_context)
3215 {
3216 /* Just succeed */
3217 - callback(callback_context, 1);
3218 + callback(callback_context, valid);
3219 }
3220
3221 static void transient_usage(struct dm_exception_store *store,
3222 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
3223 index f83a0f3fc365..11ec9d2a27df 100644
3224 --- a/drivers/md/dm-snap.c
3225 +++ b/drivers/md/dm-snap.c
3226 @@ -1428,8 +1428,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
3227 dm_table_event(s->ti->table);
3228 }
3229
3230 -static void pending_complete(struct dm_snap_pending_exception *pe, int success)
3231 +static void pending_complete(void *context, int success)
3232 {
3233 + struct dm_snap_pending_exception *pe = context;
3234 struct dm_exception *e;
3235 struct dm_snapshot *s = pe->snap;
3236 struct bio *origin_bios = NULL;
3237 @@ -1500,24 +1501,13 @@ out:
3238 free_pending_exception(pe);
3239 }
3240
3241 -static void commit_callback(void *context, int success)
3242 -{
3243 - struct dm_snap_pending_exception *pe = context;
3244 -
3245 - pending_complete(pe, success);
3246 -}
3247 -
3248 static void complete_exception(struct dm_snap_pending_exception *pe)
3249 {
3250 struct dm_snapshot *s = pe->snap;
3251
3252 - if (unlikely(pe->copy_error))
3253 - pending_complete(pe, 0);
3254 -
3255 - else
3256 - /* Update the metadata if we are persistent */
3257 - s->store->type->commit_exception(s->store, &pe->e,
3258 - commit_callback, pe);
3259 + /* Update the metadata if we are persistent */
3260 + s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
3261 + pending_complete, pe);
3262 }
3263
3264 /*
3265 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
3266 index 7073b22d4cb4..cb58bb318782 100644
3267 --- a/drivers/md/dm-thin.c
3268 +++ b/drivers/md/dm-thin.c
3269 @@ -3210,8 +3210,8 @@ static void pool_postsuspend(struct dm_target *ti)
3270 struct pool_c *pt = ti->private;
3271 struct pool *pool = pt->pool;
3272
3273 - cancel_delayed_work(&pool->waker);
3274 - cancel_delayed_work(&pool->no_space_timeout);
3275 + cancel_delayed_work_sync(&pool->waker);
3276 + cancel_delayed_work_sync(&pool->no_space_timeout);
3277 flush_workqueue(pool->wq);
3278 (void) commit(pool);
3279 }
3280 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
3281 index 882ca417f328..3ab874703d11 100644
3282 --- a/drivers/media/dvb-core/dvb_frontend.c
3283 +++ b/drivers/media/dvb-core/dvb_frontend.c
3284 @@ -2333,9 +2333,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
3285 dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
3286 __func__, c->delivery_system, fe->ops.info.type);
3287
3288 - /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
3289 - * do it, it is done for it. */
3290 - info->caps |= FE_CAN_INVERSION_AUTO;
3291 + /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
3292 + if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
3293 + info->caps |= FE_CAN_INVERSION_AUTO;
3294 err = 0;
3295 break;
3296 }
3297 diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
3298 index a2631be7ffac..08e0f0dd8728 100644
3299 --- a/drivers/media/dvb-frontends/tda1004x.c
3300 +++ b/drivers/media/dvb-frontends/tda1004x.c
3301 @@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
3302 {
3303 struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
3304 struct tda1004x_state* state = fe->demodulator_priv;
3305 + int status;
3306
3307 dprintk("%s\n", __func__);
3308
3309 + status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
3310 + if (status == -1)
3311 + return -EIO;
3312 +
3313 + /* Only update the properties cache if device is locked */
3314 + if (!(status & 8))
3315 + return 0;
3316 +
3317 // inversion status
3318 fe_params->inversion = INVERSION_OFF;
3319 if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
3320 diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
3321 index ac3cd74e824e..067db727e685 100644
3322 --- a/drivers/media/pci/saa7134/saa7134-alsa.c
3323 +++ b/drivers/media/pci/saa7134/saa7134-alsa.c
3324 @@ -1218,6 +1218,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
3325
3326 static int alsa_device_exit(struct saa7134_dev *dev)
3327 {
3328 + if (!snd_saa7134_cards[dev->nr])
3329 + return 1;
3330
3331 snd_card_free(snd_saa7134_cards[dev->nr]);
3332 snd_saa7134_cards[dev->nr] = NULL;
3333 @@ -1267,7 +1269,8 @@ static void saa7134_alsa_exit(void)
3334 int idx;
3335
3336 for (idx = 0; idx < SNDRV_CARDS; idx++) {
3337 - snd_card_free(snd_saa7134_cards[idx]);
3338 + if (snd_saa7134_cards[idx])
3339 + snd_card_free(snd_saa7134_cards[idx]);
3340 }
3341
3342 saa7134_dmasound_init = NULL;
3343 diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
3344 index 7830aef3db45..40f77685cc4a 100644
3345 --- a/drivers/media/rc/sunxi-cir.c
3346 +++ b/drivers/media/rc/sunxi-cir.c
3347 @@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
3348 if (!ir)
3349 return -ENOMEM;
3350
3351 + spin_lock_init(&ir->ir_lock);
3352 +
3353 if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
3354 ir->fifo_size = 64;
3355 else
3356 diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
3357 index 146071b8e116..bfff1d1c70ab 100644
3358 --- a/drivers/media/usb/gspca/ov534.c
3359 +++ b/drivers/media/usb/gspca/ov534.c
3360 @@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
3361 struct v4l2_fract *tpf = &cp->timeperframe;
3362 struct sd *sd = (struct sd *) gspca_dev;
3363
3364 - /* Set requested framerate */
3365 - sd->frame_rate = tpf->denominator / tpf->numerator;
3366 + if (tpf->numerator == 0 || tpf->denominator == 0)
3367 + /* Set default framerate */
3368 + sd->frame_rate = 30;
3369 + else
3370 + /* Set requested framerate */
3371 + sd->frame_rate = tpf->denominator / tpf->numerator;
3372 +
3373 if (gspca_dev->streaming)
3374 set_frame_rate(gspca_dev);
3375
3376 diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
3377 index c70ff406b07a..c028a5c2438e 100644
3378 --- a/drivers/media/usb/gspca/topro.c
3379 +++ b/drivers/media/usb/gspca/topro.c
3380 @@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
3381 struct v4l2_fract *tpf = &cp->timeperframe;
3382 int fr, i;
3383
3384 - sd->framerate = tpf->denominator / tpf->numerator;
3385 + if (tpf->numerator == 0 || tpf->denominator == 0)
3386 + sd->framerate = 30;
3387 + else
3388 + sd->framerate = tpf->denominator / tpf->numerator;
3389 +
3390 if (gspca_dev->streaming)
3391 setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
3392
3393 diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
3394 index cf9d644a8aff..472eaad6fb78 100644
3395 --- a/drivers/media/v4l2-core/videobuf2-core.c
3396 +++ b/drivers/media/v4l2-core/videobuf2-core.c
3397 @@ -2662,10 +2662,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
3398 return res | POLLERR;
3399
3400 /*
3401 - * For output streams you can write as long as there are fewer buffers
3402 - * queued than there are buffers available.
3403 + * For output streams you can call write() as long as there are fewer
3404 + * buffers queued than there are buffers available.
3405 */
3406 - if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
3407 + if (V4L2_TYPE_IS_OUTPUT(q->type) && q->fileio && q->queued_count < q->num_buffers)
3408 return res | POLLOUT | POLLWRNORM;
3409
3410 if (list_empty(&q->done_list))
3411 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
3412 index 31a9ef256d06..ce3044883a42 100644
3413 --- a/drivers/mmc/core/sd.c
3414 +++ b/drivers/mmc/core/sd.c
3415 @@ -661,9 +661,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
3416 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
3417 */
3418 if (!mmc_host_is_spi(card->host) &&
3419 - (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
3420 - card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
3421 + (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
3422 + card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
3423 + card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
3424 err = mmc_execute_tuning(card);
3425 +
3426 + /*
3427 + * As SD Specifications Part1 Physical Layer Specification
3428 + * Version 3.01 says, CMD19 tuning is available for unlocked
3429 + * cards in transfer state of 1.8V signaling mode. The small
3430 + * difference between v3.00 and 3.01 spec means that CMD19
3431 + * tuning is also available for DDR50 mode.
3432 + */
3433 + if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
3434 + pr_warn("%s: ddr50 tuning failed\n",
3435 + mmc_hostname(card->host));
3436 + err = 0;
3437 + }
3438 + }
3439 +
3440 out:
3441 kfree(status);
3442
3443 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
3444 index 5bc6c7dbbd60..941beb3b5fa2 100644
3445 --- a/drivers/mmc/core/sdio.c
3446 +++ b/drivers/mmc/core/sdio.c
3447 @@ -566,8 +566,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
3448 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
3449 */
3450 if (!mmc_host_is_spi(card->host) &&
3451 - ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
3452 - (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
3453 + ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
3454 + (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
3455 err = mmc_execute_tuning(card);
3456 out:
3457 return err;
3458 @@ -661,7 +661,7 @@ try_again:
3459 */
3460 if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
3461 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
3462 - ocr);
3463 + ocr_card);
3464 if (err == -EAGAIN) {
3465 sdio_reset(host);
3466 mmc_go_idle(host);
3467 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
3468 index fb266745f824..acece3299756 100644
3469 --- a/drivers/mmc/host/mmci.c
3470 +++ b/drivers/mmc/host/mmci.c
3471 @@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
3472 {
3473 .id = 0x00280180,
3474 .mask = 0x00ffffff,
3475 - .data = &variant_u300,
3476 + .data = &variant_nomadik,
3477 },
3478 {
3479 .id = 0x00480180,
3480 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3481 index cbaf3df3ebd9..f47c4a8370be 100644
3482 --- a/drivers/mmc/host/sdhci.c
3483 +++ b/drivers/mmc/host/sdhci.c
3484 @@ -555,9 +555,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
3485
3486 BUG_ON(len > 65536);
3487
3488 - /* tran, valid */
3489 - sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
3490 - desc += host->desc_sz;
3491 + if (len) {
3492 + /* tran, valid */
3493 + sdhci_adma_write_desc(host, desc, addr, len,
3494 + ADMA2_TRAN_VALID);
3495 + desc += host->desc_sz;
3496 + }
3497
3498 /*
3499 * If this triggers then we have a calculation bug
3500 @@ -2790,7 +2793,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
3501
3502 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
3503 {
3504 - if (host->runtime_suspended || host->bus_on)
3505 + if (host->bus_on)
3506 return;
3507 host->bus_on = true;
3508 pm_runtime_get_noresume(host->mmc->parent);
3509 @@ -2798,7 +2801,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
3510
3511 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
3512 {
3513 - if (host->runtime_suspended || !host->bus_on)
3514 + if (!host->bus_on)
3515 return;
3516 host->bus_on = false;
3517 pm_runtime_put_noidle(host->mmc->parent);
3518 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
3519 new file mode 100644
3520 index 000000000000..d60a467a983c
3521 --- /dev/null
3522 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
3523 @@ -0,0 +1,2717 @@
3524 +/******************************************************************************
3525 + *
3526 + * This file is provided under a dual BSD/GPLv2 license. When using or
3527 + * redistributing this file, you may do so under either license.
3528 + *
3529 + * GPL LICENSE SUMMARY
3530 + *
3531 + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
3532 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
3533 + * Copyright(c) 2016 Intel Deutschland GmbH
3534 + *
3535 + * This program is free software; you can redistribute it and/or modify
3536 + * it under the terms of version 2 of the GNU General Public License as
3537 + * published by the Free Software Foundation.
3538 + *
3539 + * This program is distributed in the hope that it will be useful, but
3540 + * WITHOUT ANY WARRANTY; without even the implied warranty of
3541 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
3542 + * General Public License for more details.
3543 + *
3544 + * You should have received a copy of the GNU General Public License
3545 + * along with this program; if not, write to the Free Software
3546 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
3547 + * USA
3548 + *
3549 + * The full GNU General Public License is included in this distribution
3550 + * in the file called COPYING.
3551 + *
3552 + * Contact Information:
3553 + * Intel Linux Wireless <linuxwifi@intel.com>
3554 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
3555 + *
3556 + * BSD LICENSE
3557 + *
3558 + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
3559 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
3560 + * Copyright(c) 2016 Intel Deutschland GmbH
3561 + * All rights reserved.
3562 + *
3563 + * Redistribution and use in source and binary forms, with or without
3564 + * modification, are permitted provided that the following conditions
3565 + * are met:
3566 + *
3567 + * * Redistributions of source code must retain the above copyright
3568 + * notice, this list of conditions and the following disclaimer.
3569 + * * Redistributions in binary form must reproduce the above copyright
3570 + * notice, this list of conditions and the following disclaimer in
3571 + * the documentation and/or other materials provided with the
3572 + * distribution.
3573 + * * Neither the name Intel Corporation nor the names of its
3574 + * contributors may be used to endorse or promote products derived
3575 + * from this software without specific prior written permission.
3576 + *
3577 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
3578 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
3579 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
3580 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
3581 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
3582 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
3583 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3584 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3585 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3586 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3587 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3588 + *
3589 + *****************************************************************************/
3590 +#include <linux/pci.h>
3591 +#include <linux/pci-aspm.h>
3592 +#include <linux/interrupt.h>
3593 +#include <linux/debugfs.h>
3594 +#include <linux/sched.h>
3595 +#include <linux/bitops.h>
3596 +#include <linux/gfp.h>
3597 +#include <linux/vmalloc.h>
3598 +
3599 +#include "iwl-drv.h"
3600 +#include "iwl-trans.h"
3601 +#include "iwl-csr.h"
3602 +#include "iwl-prph.h"
3603 +#include "iwl-scd.h"
3604 +#include "iwl-agn-hw.h"
3605 +#include "iwl-fw-error-dump.h"
3606 +#include "internal.h"
3607 +#include "iwl-fh.h"
3608 +
3609 +/* extended range in FW SRAM */
3610 +#define IWL_FW_MEM_EXTENDED_START 0x40000
3611 +#define IWL_FW_MEM_EXTENDED_END 0x57FFF
3612 +
3613 +static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
3614 +{
3615 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3616 +
3617 + if (!trans_pcie->fw_mon_page)
3618 + return;
3619 +
3620 + dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
3621 + trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
3622 + __free_pages(trans_pcie->fw_mon_page,
3623 + get_order(trans_pcie->fw_mon_size));
3624 + trans_pcie->fw_mon_page = NULL;
3625 + trans_pcie->fw_mon_phys = 0;
3626 + trans_pcie->fw_mon_size = 0;
3627 +}
3628 +
3629 +static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
3630 +{
3631 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3632 + struct page *page = NULL;
3633 + dma_addr_t phys;
3634 + u32 size = 0;
3635 + u8 power;
3636 +
3637 + if (!max_power) {
3638 + /* default max_power is maximum */
3639 + max_power = 26;
3640 + } else {
3641 + max_power += 11;
3642 + }
3643 +
3644 + if (WARN(max_power > 26,
3645 + "External buffer size for monitor is too big %d, check the FW TLV\n",
3646 + max_power))
3647 + return;
3648 +
3649 + if (trans_pcie->fw_mon_page) {
3650 + dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
3651 + trans_pcie->fw_mon_size,
3652 + DMA_FROM_DEVICE);
3653 + return;
3654 + }
3655 +
3656 + phys = 0;
3657 + for (power = max_power; power >= 11; power--) {
3658 + int order;
3659 +
3660 + size = BIT(power);
3661 + order = get_order(size);
3662 + page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
3663 + order);
3664 + if (!page)
3665 + continue;
3666 +
3667 + phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
3668 + DMA_FROM_DEVICE);
3669 + if (dma_mapping_error(trans->dev, phys)) {
3670 + __free_pages(page, order);
3671 + page = NULL;
3672 + continue;
3673 + }
3674 + IWL_INFO(trans,
3675 + "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
3676 + size, order);
3677 + break;
3678 + }
3679 +
3680 + if (WARN_ON_ONCE(!page))
3681 + return;
3682 +
3683 + if (power != max_power)
3684 + IWL_ERR(trans,
3685 + "Sorry - debug buffer is only %luK while you requested %luK\n",
3686 + (unsigned long)BIT(power - 10),
3687 + (unsigned long)BIT(max_power - 10));
3688 +
3689 + trans_pcie->fw_mon_page = page;
3690 + trans_pcie->fw_mon_phys = phys;
3691 + trans_pcie->fw_mon_size = size;
3692 +}
3693 +
3694 +static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
3695 +{
3696 + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
3697 + ((reg & 0x0000ffff) | (2 << 28)));
3698 + return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
3699 +}
3700 +
3701 +static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
3702 +{
3703 + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
3704 + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
3705 + ((reg & 0x0000ffff) | (3 << 28)));
3706 +}
3707 +
3708 +static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
3709 +{
3710 + if (trans->cfg->apmg_not_supported)
3711 + return;
3712 +
3713 + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
3714 + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
3715 + APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
3716 + ~APMG_PS_CTRL_MSK_PWR_SRC);
3717 + else
3718 + iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
3719 + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
3720 + ~APMG_PS_CTRL_MSK_PWR_SRC);
3721 +}
3722 +
3723 +/* PCI registers */
3724 +#define PCI_CFG_RETRY_TIMEOUT 0x041
3725 +
3726 +static void iwl_pcie_apm_config(struct iwl_trans *trans)
3727 +{
3728 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3729 + u16 lctl;
3730 + u16 cap;
3731 +
3732 + /*
3733 + * HW bug W/A for instability in PCIe bus L0S->L1 transition.
3734 + * Check if BIOS (or OS) enabled L1-ASPM on this device.
3735 + * If so (likely), disable L0S, so device moves directly L0->L1;
3736 + * costs negligible amount of power savings.
3737 + * If not (unlikely), enable L0S, so there is at least some
3738 + * power savings, even without L1.
3739 + */
3740 + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
3741 + if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
3742 + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
3743 + else
3744 + iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
3745 + trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
3746 +
3747 + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
3748 + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
3749 + dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
3750 + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
3751 + trans->ltr_enabled ? "En" : "Dis");
3752 +}
3753 +
3754 +/*
3755 + * Start up NIC's basic functionality after it has been reset
3756 + * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
3757 + * NOTE: This does not load uCode nor start the embedded processor
3758 + */
3759 +static int iwl_pcie_apm_init(struct iwl_trans *trans)
3760 +{
3761 + int ret = 0;
3762 + IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
3763 +
3764 + /*
3765 + * Use "set_bit" below rather than "write", to preserve any hardware
3766 + * bits already set by default after reset.
3767 + */
3768 +
3769 + /* Disable L0S exit timer (platform NMI Work/Around) */
3770 + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
3771 + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
3772 + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3773 +
3774 + /*
3775 + * Disable L0s without affecting L1;
3776 + * don't wait for ICH L0s (ICH bug W/A)
3777 + */
3778 + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
3779 + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
3780 +
3781 + /* Set FH wait threshold to maximum (HW error during stress W/A) */
3782 + iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
3783 +
3784 + /*
3785 + * Enable HAP INTA (interrupt from management bus) to
3786 + * wake device's PCI Express link L1a -> L0s
3787 + */
3788 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
3789 + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
3790 +
3791 + iwl_pcie_apm_config(trans);
3792 +
3793 + /* Configure analog phase-lock-loop before activating to D0A */
3794 + if (trans->cfg->base_params->pll_cfg_val)
3795 + iwl_set_bit(trans, CSR_ANA_PLL_CFG,
3796 + trans->cfg->base_params->pll_cfg_val);
3797 +
3798 + /*
3799 + * Set "initialization complete" bit to move adapter from
3800 + * D0U* --> D0A* (powered-up active) state.
3801 + */
3802 + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3803 +
3804 + /*
3805 + * Wait for clock stabilization; once stabilized, access to
3806 + * device-internal resources is supported, e.g. iwl_write_prph()
3807 + * and accesses to uCode SRAM.
3808 + */
3809 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
3810 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
3811 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
3812 + if (ret < 0) {
3813 + IWL_DEBUG_INFO(trans, "Failed to init the card\n");
3814 + goto out;
3815 + }
3816 +
3817 + if (trans->cfg->host_interrupt_operation_mode) {
3818 + /*
3819 + * This is a bit of an abuse - This is needed for 7260 / 3160
3820 + * only check host_interrupt_operation_mode even if this is
3821 + * not related to host_interrupt_operation_mode.
3822 + *
3823 + * Enable the oscillator to count wake up time for L1 exit. This
3824 + * consumes slightly more power (100uA) - but allows to be sure
3825 + * that we wake up from L1 on time.
3826 + *
3827 + * This looks weird: read twice the same register, discard the
3828 + * value, set a bit, and yet again, read that same register
3829 + * just to discard the value. But that's the way the hardware
3830 + * seems to like it.
3831 + */
3832 + iwl_read_prph(trans, OSC_CLK);
3833 + iwl_read_prph(trans, OSC_CLK);
3834 + iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
3835 + iwl_read_prph(trans, OSC_CLK);
3836 + iwl_read_prph(trans, OSC_CLK);
3837 + }
3838 +
3839 + /*
3840 + * Enable DMA clock and wait for it to stabilize.
3841 + *
3842 + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
3843 + * bits do not disable clocks. This preserves any hardware
3844 + * bits already set by default in "CLK_CTRL_REG" after reset.
3845 + */
3846 + if (!trans->cfg->apmg_not_supported) {
3847 + iwl_write_prph(trans, APMG_CLK_EN_REG,
3848 + APMG_CLK_VAL_DMA_CLK_RQT);
3849 + udelay(20);
3850 +
3851 + /* Disable L1-Active */
3852 + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
3853 + APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
3854 +
3855 + /* Clear the interrupt in APMG if the NIC is in RFKILL */
3856 + iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
3857 + APMG_RTC_INT_STT_RFKILL);
3858 + }
3859 +
3860 + set_bit(STATUS_DEVICE_ENABLED, &trans->status);
3861 +
3862 +out:
3863 + return ret;
3864 +}
3865 +
3866 +/*
3867 + * Enable LP XTAL to avoid HW bug where device may consume much power if
3868 + * FW is not loaded after device reset. LP XTAL is disabled by default
3869 + * after device HW reset. Do it only if XTAL is fed by internal source.
3870 + * Configure device's "persistence" mode to avoid resetting XTAL again when
3871 + * SHRD_HW_RST occurs in S3.
3872 + */
3873 +static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
3874 +{
3875 + int ret;
3876 + u32 apmg_gp1_reg;
3877 + u32 apmg_xtal_cfg_reg;
3878 + u32 dl_cfg_reg;
3879 +
3880 + /* Force XTAL ON */
3881 + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
3882 + CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
3883 +
3884 + /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
3885 + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
3886 +
3887 + udelay(10);
3888 +
3889 + /*
3890 + * Set "initialization complete" bit to move adapter from
3891 + * D0U* --> D0A* (powered-up active) state.
3892 + */
3893 + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3894 +
3895 + /*
3896 + * Wait for clock stabilization; once stabilized, access to
3897 + * device-internal resources is possible.
3898 + */
3899 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
3900 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
3901 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
3902 + 25000);
3903 + if (WARN_ON(ret < 0)) {
3904 + IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
3905 + /* Release XTAL ON request */
3906 + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
3907 + CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
3908 + return;
3909 + }
3910 +
3911 + /*
3912 + * Clear "disable persistence" to avoid LP XTAL resetting when
3913 + * SHRD_HW_RST is applied in S3.
3914 + */
3915 + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
3916 + APMG_PCIDEV_STT_VAL_PERSIST_DIS);
3917 +
3918 + /*
3919 + * Force APMG XTAL to be active to prevent its disabling by HW
3920 + * caused by APMG idle state.
3921 + */
3922 + apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
3923 + SHR_APMG_XTAL_CFG_REG);
3924 + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
3925 + apmg_xtal_cfg_reg |
3926 + SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
3927 +
3928 + /*
3929 + * Reset entire device again - do controller reset (results in
3930 + * SHRD_HW_RST). Turn MAC off before proceeding.
3931 + */
3932 + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
3933 +
3934 + udelay(10);
3935 +
3936 + /* Enable LP XTAL by indirect access through CSR */
3937 + apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
3938 + iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
3939 + SHR_APMG_GP1_WF_XTAL_LP_EN |
3940 + SHR_APMG_GP1_CHICKEN_BIT_SELECT);
3941 +
3942 + /* Clear delay line clock power up */
3943 + dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
3944 + iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
3945 + ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
3946 +
3947 + /*
3948 + * Enable persistence mode to avoid LP XTAL resetting when
3949 + * SHRD_HW_RST is applied in S3.
3950 + */
3951 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
3952 + CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
3953 +
3954 + /*
3955 + * Clear "initialization complete" bit to move adapter from
3956 + * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
3957 + */
3958 + iwl_clear_bit(trans, CSR_GP_CNTRL,
3959 + CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3960 +
3961 + /* Activates XTAL resources monitor */
3962 + __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
3963 + CSR_MONITOR_XTAL_RESOURCES);
3964 +
3965 + /* Release XTAL ON request */
3966 + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
3967 + CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
3968 + udelay(10);
3969 +
3970 + /* Release APMG XTAL */
3971 + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
3972 + apmg_xtal_cfg_reg &
3973 + ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
3974 +}
3975 +
3976 +static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
3977 +{
3978 + int ret = 0;
3979 +
3980 + /* stop device's busmaster DMA activity */
3981 + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
3982 +
3983 + ret = iwl_poll_bit(trans, CSR_RESET,
3984 + CSR_RESET_REG_FLAG_MASTER_DISABLED,
3985 + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
3986 + if (ret < 0)
3987 + IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
3988 +
3989 + IWL_DEBUG_INFO(trans, "stop master\n");
3990 +
3991 + return ret;
3992 +}
3993 +
3994 +static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
3995 +{
3996 + IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
3997 +
3998 + if (op_mode_leave) {
3999 + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
4000 + iwl_pcie_apm_init(trans);
4001 +
4002 + /* inform ME that we are leaving */
4003 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
4004 + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
4005 + APMG_PCIDEV_STT_VAL_WAKE_ME);
4006 + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
4007 + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
4008 + CSR_RESET_LINK_PWR_MGMT_DISABLED);
4009 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
4010 + CSR_HW_IF_CONFIG_REG_PREPARE |
4011 + CSR_HW_IF_CONFIG_REG_ENABLE_PME);
4012 + mdelay(1);
4013 + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
4014 + CSR_RESET_LINK_PWR_MGMT_DISABLED);
4015 + }
4016 + mdelay(5);
4017 + }
4018 +
4019 + clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
4020 +
4021 + /* Stop device's DMA activity */
4022 + iwl_pcie_apm_stop_master(trans);
4023 +
4024 + if (trans->cfg->lp_xtal_workaround) {
4025 + iwl_pcie_apm_lp_xtal_enable(trans);
4026 + return;
4027 + }
4028 +
4029 + /* Reset the entire device */
4030 + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4031 +
4032 + udelay(10);
4033 +
4034 + /*
4035 + * Clear "initialization complete" bit to move adapter from
4036 + * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4037 + */
4038 + iwl_clear_bit(trans, CSR_GP_CNTRL,
4039 + CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4040 +}
4041 +
4042 +static int iwl_pcie_nic_init(struct iwl_trans *trans)
4043 +{
4044 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4045 +
4046 + /* nic_init */
4047 + spin_lock(&trans_pcie->irq_lock);
4048 + iwl_pcie_apm_init(trans);
4049 +
4050 + spin_unlock(&trans_pcie->irq_lock);
4051 +
4052 + iwl_pcie_set_pwr(trans, false);
4053 +
4054 + iwl_op_mode_nic_config(trans->op_mode);
4055 +
4056 + /* Allocate the RX queue, or reset if it is already allocated */
4057 + iwl_pcie_rx_init(trans);
4058 +
4059 + /* Allocate or reset and init all Tx and Command queues */
4060 + if (iwl_pcie_tx_init(trans))
4061 + return -ENOMEM;
4062 +
4063 + if (trans->cfg->base_params->shadow_reg_enable) {
4064 + /* enable shadow regs in HW */
4065 + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
4066 + IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
4067 + }
4068 +
4069 + return 0;
4070 +}
4071 +
4072 +#define HW_READY_TIMEOUT (50)
4073 +
4074 +/* Note: returns poll_bit return value, which is >= 0 if success */
4075 +static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
4076 +{
4077 + int ret;
4078 +
4079 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
4080 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
4081 +
4082 + /* See if we got it */
4083 + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
4084 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
4085 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
4086 + HW_READY_TIMEOUT);
4087 +
4088 + if (ret >= 0)
4089 + iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
4090 +
4091 + IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
4092 + return ret;
4093 +}
4094 +
4095 +/* Note: returns standard 0/-ERROR code */
4096 +static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
4097 +{
4098 + int ret;
4099 + int t = 0;
4100 + int iter;
4101 +
4102 + IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
4103 +
4104 + ret = iwl_pcie_set_hw_ready(trans);
4105 + /* If the card is ready, exit 0 */
4106 + if (ret >= 0)
4107 + return 0;
4108 +
4109 + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
4110 + CSR_RESET_LINK_PWR_MGMT_DISABLED);
4111 + msleep(1);
4112 +
4113 + for (iter = 0; iter < 10; iter++) {
4114 + /* If HW is not ready, prepare the conditions to check again */
4115 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
4116 + CSR_HW_IF_CONFIG_REG_PREPARE);
4117 +
4118 + do {
4119 + ret = iwl_pcie_set_hw_ready(trans);
4120 + if (ret >= 0)
4121 + return 0;
4122 +
4123 + usleep_range(200, 1000);
4124 + t += 200;
4125 + } while (t < 150000);
4126 + msleep(25);
4127 + }
4128 +
4129 + IWL_ERR(trans, "Couldn't prepare the card\n");
4130 +
4131 + return ret;
4132 +}
4133 +
4134 +/*
4135 + * ucode
4136 + */
4137 +static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
4138 + dma_addr_t phy_addr, u32 byte_cnt)
4139 +{
4140 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4141 + int ret;
4142 +
4143 + trans_pcie->ucode_write_complete = false;
4144 +
4145 + iwl_write_direct32(trans,
4146 + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
4147 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4148 +
4149 + iwl_write_direct32(trans,
4150 + FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
4151 + dst_addr);
4152 +
4153 + iwl_write_direct32(trans,
4154 + FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
4155 + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
4156 +
4157 + iwl_write_direct32(trans,
4158 + FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
4159 + (iwl_get_dma_hi_addr(phy_addr)
4160 + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
4161 +
4162 + iwl_write_direct32(trans,
4163 + FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
4164 + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
4165 + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
4166 + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4167 +
4168 + iwl_write_direct32(trans,
4169 + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
4170 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4171 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
4172 + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4173 +
4174 + ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
4175 + trans_pcie->ucode_write_complete, 5 * HZ);
4176 + if (!ret) {
4177 + IWL_ERR(trans, "Failed to load firmware chunk!\n");
4178 + return -ETIMEDOUT;
4179 + }
4180 +
4181 + return 0;
4182 +}
4183 +
4184 +static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
4185 + const struct fw_desc *section)
4186 +{
4187 + u8 *v_addr;
4188 + dma_addr_t p_addr;
4189 + u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
4190 + int ret = 0;
4191 +
4192 + IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
4193 + section_num);
4194 +
4195 + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
4196 + GFP_KERNEL | __GFP_NOWARN);
4197 + if (!v_addr) {
4198 + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
4199 + chunk_sz = PAGE_SIZE;
4200 + v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
4201 + &p_addr, GFP_KERNEL);
4202 + if (!v_addr)
4203 + return -ENOMEM;
4204 + }
4205 +
4206 + for (offset = 0; offset < section->len; offset += chunk_sz) {
4207 + u32 copy_size, dst_addr;
4208 + bool extended_addr = false;
4209 +
4210 + copy_size = min_t(u32, chunk_sz, section->len - offset);
4211 + dst_addr = section->offset + offset;
4212 +
4213 + if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
4214 + dst_addr <= IWL_FW_MEM_EXTENDED_END)
4215 + extended_addr = true;
4216 +
4217 + if (extended_addr)
4218 + iwl_set_bits_prph(trans, LMPM_CHICK,
4219 + LMPM_CHICK_EXTENDED_ADDR_SPACE);
4220 +
4221 + memcpy(v_addr, (u8 *)section->data + offset, copy_size);
4222 + ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
4223 + copy_size);
4224 +
4225 + if (extended_addr)
4226 + iwl_clear_bits_prph(trans, LMPM_CHICK,
4227 + LMPM_CHICK_EXTENDED_ADDR_SPACE);
4228 +
4229 + if (ret) {
4230 + IWL_ERR(trans,
4231 + "Could not load the [%d] uCode section\n",
4232 + section_num);
4233 + break;
4234 + }
4235 + }
4236 +
4237 + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
4238 + return ret;
4239 +}
4240 +
4241 +/*
4242 + * Driver Takes the ownership on secure machine before FW load
4243 + * and prevent race with the BT load.
4244 + * W/A for ROM bug. (should be remove in the next Si step)
4245 + */
4246 +static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
4247 +{
4248 + u32 val, loop = 1000;
4249 +
4250 + /*
4251 + * Check the RSA semaphore is accessible.
4252 + * If the HW isn't locked and the rsa semaphore isn't accessible,
4253 + * we are in trouble.
4254 + */
4255 + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
4256 + if (val & (BIT(1) | BIT(17))) {
4257 + IWL_INFO(trans,
4258 + "can't access the RSA semaphore it is write protected\n");
4259 + return 0;
4260 + }
4261 +
4262 + /* take ownership on the AUX IF */
4263 + iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
4264 + iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
4265 +
4266 + do {
4267 + iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
4268 + val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
4269 + if (val == 0x1) {
4270 + iwl_write_prph(trans, RSA_ENABLE, 0);
4271 + return 0;
4272 + }
4273 +
4274 + udelay(10);
4275 + loop--;
4276 + } while (loop > 0);
4277 +
4278 + IWL_ERR(trans, "Failed to take ownership on secure machine\n");
4279 + return -EIO;
4280 +}
4281 +
4282 +static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
4283 + const struct fw_img *image,
4284 + int cpu,
4285 + int *first_ucode_section)
4286 +{
4287 + int shift_param;
4288 + int i, ret = 0, sec_num = 0x1;
4289 + u32 val, last_read_idx = 0;
4290 +
4291 + if (cpu == 1) {
4292 + shift_param = 0;
4293 + *first_ucode_section = 0;
4294 + } else {
4295 + shift_param = 16;
4296 + (*first_ucode_section)++;
4297 + }
4298 +
4299 + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
4300 + last_read_idx = i;
4301 +
4302 + /*
4303 + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4304 + * CPU1 to CPU2.
4305 + * PAGING_SEPARATOR_SECTION delimiter - separate between
4306 + * CPU2 non paged to CPU2 paging sec.
4307 + */
4308 + if (!image->sec[i].data ||
4309 + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
4310 + image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
4311 + IWL_DEBUG_FW(trans,
4312 + "Break since Data not valid or Empty section, sec = %d\n",
4313 + i);
4314 + break;
4315 + }
4316 +
4317 + ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
4318 + if (ret)
4319 + return ret;
4320 +
4321 + /* Notify the ucode of the loaded section number and status */
4322 + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
4323 + val = val | (sec_num << shift_param);
4324 + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
4325 + sec_num = (sec_num << 1) | 0x1;
4326 + }
4327 +
4328 + *first_ucode_section = last_read_idx;
4329 +
4330 + if (cpu == 1)
4331 + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
4332 + else
4333 + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4334 +
4335 + return 0;
4336 +}
4337 +
4338 +static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
4339 + const struct fw_img *image,
4340 + int cpu,
4341 + int *first_ucode_section)
4342 +{
4343 + int shift_param;
4344 + int i, ret = 0;
4345 + u32 last_read_idx = 0;
4346 +
4347 + if (cpu == 1) {
4348 + shift_param = 0;
4349 + *first_ucode_section = 0;
4350 + } else {
4351 + shift_param = 16;
4352 + (*first_ucode_section)++;
4353 + }
4354 +
4355 + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
4356 + last_read_idx = i;
4357 +
4358 + /*
4359 + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
4360 + * CPU1 to CPU2.
4361 + * PAGING_SEPARATOR_SECTION delimiter - separate between
4362 + * CPU2 non paged to CPU2 paging sec.
4363 + */
4364 + if (!image->sec[i].data ||
4365 + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
4366 + image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
4367 + IWL_DEBUG_FW(trans,
4368 + "Break since Data not valid or Empty section, sec = %d\n",
4369 + i);
4370 + break;
4371 + }
4372 +
4373 + ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
4374 + if (ret)
4375 + return ret;
4376 + }
4377 +
4378 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
4379 + iwl_set_bits_prph(trans,
4380 + CSR_UCODE_LOAD_STATUS_ADDR,
4381 + (LMPM_CPU_UCODE_LOADING_COMPLETED |
4382 + LMPM_CPU_HDRS_LOADING_COMPLETED |
4383 + LMPM_CPU_UCODE_LOADING_STARTED) <<
4384 + shift_param);
4385 +
4386 + *first_ucode_section = last_read_idx;
4387 +
4388 + return 0;
4389 +}
4390 +
4391 +static void iwl_pcie_apply_destination(struct iwl_trans *trans)
4392 +{
4393 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4394 + const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
4395 + int i;
4396 +
4397 + if (dest->version)
4398 + IWL_ERR(trans,
4399 + "DBG DEST version is %d - expect issues\n",
4400 + dest->version);
4401 +
4402 + IWL_INFO(trans, "Applying debug destination %s\n",
4403 + get_fw_dbg_mode_string(dest->monitor_mode));
4404 +
4405 + if (dest->monitor_mode == EXTERNAL_MODE)
4406 + iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
4407 + else
4408 + IWL_WARN(trans, "PCI should have external buffer debug\n");
4409 +
4410 + for (i = 0; i < trans->dbg_dest_reg_num; i++) {
4411 + u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
4412 + u32 val = le32_to_cpu(dest->reg_ops[i].val);
4413 +
4414 + switch (dest->reg_ops[i].op) {
4415 + case CSR_ASSIGN:
4416 + iwl_write32(trans, addr, val);
4417 + break;
4418 + case CSR_SETBIT:
4419 + iwl_set_bit(trans, addr, BIT(val));
4420 + break;
4421 + case CSR_CLEARBIT:
4422 + iwl_clear_bit(trans, addr, BIT(val));
4423 + break;
4424 + case PRPH_ASSIGN:
4425 + iwl_write_prph(trans, addr, val);
4426 + break;
4427 + case PRPH_SETBIT:
4428 + iwl_set_bits_prph(trans, addr, BIT(val));
4429 + break;
4430 + case PRPH_CLEARBIT:
4431 + iwl_clear_bits_prph(trans, addr, BIT(val));
4432 + break;
4433 + case PRPH_BLOCKBIT:
4434 + if (iwl_read_prph(trans, addr) & BIT(val)) {
4435 + IWL_ERR(trans,
4436 + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
4437 + val, addr);
4438 + goto monitor;
4439 + }
4440 + break;
4441 + default:
4442 + IWL_ERR(trans, "FW debug - unknown OP %d\n",
4443 + dest->reg_ops[i].op);
4444 + break;
4445 + }
4446 + }
4447 +
4448 +monitor:
4449 + if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
4450 + iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
4451 + trans_pcie->fw_mon_phys >> dest->base_shift);
4452 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
4453 + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
4454 + (trans_pcie->fw_mon_phys +
4455 + trans_pcie->fw_mon_size - 256) >>
4456 + dest->end_shift);
4457 + else
4458 + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
4459 + (trans_pcie->fw_mon_phys +
4460 + trans_pcie->fw_mon_size) >>
4461 + dest->end_shift);
4462 + }
4463 +}
4464 +
4465 +static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
4466 + const struct fw_img *image)
4467 +{
4468 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4469 + int ret = 0;
4470 + int first_ucode_section;
4471 +
4472 + IWL_DEBUG_FW(trans, "working with %s CPU\n",
4473 + image->is_dual_cpus ? "Dual" : "Single");
4474 +
4475 + /* load to FW the binary non secured sections of CPU1 */
4476 + ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
4477 + if (ret)
4478 + return ret;
4479 +
4480 + if (image->is_dual_cpus) {
4481 + /* set CPU2 header address */
4482 + iwl_write_prph(trans,
4483 + LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
4484 + LMPM_SECURE_CPU2_HDR_MEM_SPACE);
4485 +
4486 + /* load to FW the binary sections of CPU2 */
4487 + ret = iwl_pcie_load_cpu_sections(trans, image, 2,
4488 + &first_ucode_section);
4489 + if (ret)
4490 + return ret;
4491 + }
4492 +
4493 + /* supported for 7000 only for the moment */
4494 + if (iwlwifi_mod_params.fw_monitor &&
4495 + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
4496 + iwl_pcie_alloc_fw_monitor(trans, 0);
4497 +
4498 + if (trans_pcie->fw_mon_size) {
4499 + iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
4500 + trans_pcie->fw_mon_phys >> 4);
4501 + iwl_write_prph(trans, MON_BUFF_END_ADDR,
4502 + (trans_pcie->fw_mon_phys +
4503 + trans_pcie->fw_mon_size) >> 4);
4504 + }
4505 + } else if (trans->dbg_dest_tlv) {
4506 + iwl_pcie_apply_destination(trans);
4507 + }
4508 +
4509 + /* release CPU reset */
4510 + iwl_write32(trans, CSR_RESET, 0);
4511 +
4512 + return 0;
4513 +}
4514 +
4515 +static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
4516 + const struct fw_img *image)
4517 +{
4518 + int ret = 0;
4519 + int first_ucode_section;
4520 +
4521 + IWL_DEBUG_FW(trans, "working with %s CPU\n",
4522 + image->is_dual_cpus ? "Dual" : "Single");
4523 +
4524 + if (trans->dbg_dest_tlv)
4525 + iwl_pcie_apply_destination(trans);
4526 +
4527 + /* TODO: remove in the next Si step */
4528 + ret = iwl_pcie_rsa_race_bug_wa(trans);
4529 + if (ret)
4530 + return ret;
4531 +
4532 + /* configure the ucode to be ready to get the secured image */
4533 + /* release CPU reset */
4534 + iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
4535 +
4536 + /* load to FW the binary Secured sections of CPU1 */
4537 + ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
4538 + &first_ucode_section);
4539 + if (ret)
4540 + return ret;
4541 +
4542 + /* load to FW the binary sections of CPU2 */
4543 + return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
4544 + &first_ucode_section);
4545 +}
4546 +
4547 +static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
4548 + const struct fw_img *fw, bool run_in_rfkill)
4549 +{
4550 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4551 + bool hw_rfkill;
4552 + int ret;
4553 +
4554 + mutex_lock(&trans_pcie->mutex);
4555 +
4556 + /* Someone called stop_device, don't try to start_fw */
4557 + if (trans_pcie->is_down) {
4558 + IWL_WARN(trans,
4559 + "Can't start_fw since the HW hasn't been started\n");
4560 + ret = EIO;
4561 + goto out;
4562 + }
4563 +
4564 + /* This may fail if AMT took ownership of the device */
4565 + if (iwl_pcie_prepare_card_hw(trans)) {
4566 + IWL_WARN(trans, "Exit HW not ready\n");
4567 + ret = -EIO;
4568 + goto out;
4569 + }
4570 +
4571 + iwl_enable_rfkill_int(trans);
4572 +
4573 + /* If platform's RF_KILL switch is NOT set to KILL */
4574 + hw_rfkill = iwl_is_rfkill_set(trans);
4575 + if (hw_rfkill)
4576 + set_bit(STATUS_RFKILL, &trans->status);
4577 + else
4578 + clear_bit(STATUS_RFKILL, &trans->status);
4579 + iwl_trans_pcie_rf_kill(trans, hw_rfkill);
4580 + if (hw_rfkill && !run_in_rfkill) {
4581 + ret = -ERFKILL;
4582 + goto out;
4583 + }
4584 +
4585 + iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
4586 +
4587 + ret = iwl_pcie_nic_init(trans);
4588 + if (ret) {
4589 + IWL_ERR(trans, "Unable to init nic\n");
4590 + goto out;
4591 + }
4592 +
4593 + /* make sure rfkill handshake bits are cleared */
4594 + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4595 + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
4596 + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4597 +
4598 + /* clear (again), then enable host interrupts */
4599 + iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
4600 + iwl_enable_interrupts(trans);
4601 +
4602 + /* really make sure rfkill handshake bits are cleared */
4603 + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4604 + iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4605 +
4606 + /* Load the given image to the HW */
4607 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
4608 + ret = iwl_pcie_load_given_ucode_8000(trans, fw);
4609 + else
4610 + ret = iwl_pcie_load_given_ucode(trans, fw);
4611 +
4612 +out:
4613 + mutex_unlock(&trans_pcie->mutex);
4614 + return ret;
4615 +}
4616 +
4617 +static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
4618 +{
4619 + iwl_pcie_reset_ict(trans);
4620 + iwl_pcie_tx_start(trans, scd_addr);
4621 +}
4622 +
4623 +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
4624 +{
4625 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4626 + bool hw_rfkill, was_hw_rfkill;
4627 +
4628 + lockdep_assert_held(&trans_pcie->mutex);
4629 +
4630 + if (trans_pcie->is_down)
4631 + return;
4632 +
4633 + trans_pcie->is_down = true;
4634 +
4635 + was_hw_rfkill = iwl_is_rfkill_set(trans);
4636 +
4637 + /* tell the device to stop sending interrupts */
4638 + spin_lock(&trans_pcie->irq_lock);
4639 + iwl_disable_interrupts(trans);
4640 + spin_unlock(&trans_pcie->irq_lock);
4641 +
4642 + /* device going down, Stop using ICT table */
4643 + iwl_pcie_disable_ict(trans);
4644 +
4645 + /*
4646 + * If a HW restart happens during firmware loading,
4647 + * then the firmware loading might call this function
4648 + * and later it might be called again due to the
4649 + * restart. So don't process again if the device is
4650 + * already dead.
4651 + */
4652 + if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
4653 + IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
4654 + iwl_pcie_tx_stop(trans);
4655 + iwl_pcie_rx_stop(trans);
4656 +
4657 + /* Power-down device's busmaster DMA clocks */
4658 + if (!trans->cfg->apmg_not_supported) {
4659 + iwl_write_prph(trans, APMG_CLK_DIS_REG,
4660 + APMG_CLK_VAL_DMA_CLK_RQT);
4661 + udelay(5);
4662 + }
4663 + }
4664 +
4665 + /* Make sure (redundant) we've released our request to stay awake */
4666 + iwl_clear_bit(trans, CSR_GP_CNTRL,
4667 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4668 +
4669 + /* Stop the device, and put it in low power state */
4670 + iwl_pcie_apm_stop(trans, false);
4671 +
4672 + /* stop and reset the on-board processor */
4673 + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4674 + udelay(20);
4675 +
4676 + /*
4677 + * Upon stop, the APM issues an interrupt if HW RF kill is set.
4678 + * This is a bug in certain verions of the hardware.
4679 + * Certain devices also keep sending HW RF kill interrupt all
4680 + * the time, unless the interrupt is ACKed even if the interrupt
4681 + * should be masked. Re-ACK all the interrupts here.
4682 + */
4683 + spin_lock(&trans_pcie->irq_lock);
4684 + iwl_disable_interrupts(trans);
4685 + spin_unlock(&trans_pcie->irq_lock);
4686 +
4687 +
4688 + /* clear all status bits */
4689 + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
4690 + clear_bit(STATUS_INT_ENABLED, &trans->status);
4691 + clear_bit(STATUS_TPOWER_PMI, &trans->status);
4692 + clear_bit(STATUS_RFKILL, &trans->status);
4693 +
4694 + /*
4695 + * Even if we stop the HW, we still want the RF kill
4696 + * interrupt
4697 + */
4698 + iwl_enable_rfkill_int(trans);
4699 +
4700 + /*
4701 + * Check again since the RF kill state may have changed while
4702 + * all the interrupts were disabled, in this case we couldn't
4703 + * receive the RF kill interrupt and update the state in the
4704 + * op_mode.
4705 + * Don't call the op_mode if the rkfill state hasn't changed.
4706 + * This allows the op_mode to call stop_device from the rfkill
4707 + * notification without endless recursion. Under very rare
4708 + * circumstances, we might have a small recursion if the rfkill
4709 + * state changed exactly now while we were called from stop_device.
4710 + * This is very unlikely but can happen and is supported.
4711 + */
4712 + hw_rfkill = iwl_is_rfkill_set(trans);
4713 + if (hw_rfkill)
4714 + set_bit(STATUS_RFKILL, &trans->status);
4715 + else
4716 + clear_bit(STATUS_RFKILL, &trans->status);
4717 + if (hw_rfkill != was_hw_rfkill)
4718 + iwl_trans_pcie_rf_kill(trans, hw_rfkill);
4719 +
4720 + /* re-take ownership to prevent other users from stealing the deivce */
4721 + iwl_pcie_prepare_card_hw(trans);
4722 +}
4723 +
4724 +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
4725 +{
4726 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4727 +
4728 + mutex_lock(&trans_pcie->mutex);
4729 + _iwl_trans_pcie_stop_device(trans, low_power);
4730 + mutex_unlock(&trans_pcie->mutex);
4731 +}
4732 +
4733 +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
4734 +{
4735 + struct iwl_trans_pcie __maybe_unused *trans_pcie =
4736 + IWL_TRANS_GET_PCIE_TRANS(trans);
4737 +
4738 + lockdep_assert_held(&trans_pcie->mutex);
4739 +
4740 + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
4741 + _iwl_trans_pcie_stop_device(trans, true);
4742 +}
4743 +
4744 +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
4745 +{
4746 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4747 +
4748 + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
4749 + /* Enable persistence mode to avoid reset */
4750 + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
4751 + CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
4752 + }
4753 +
4754 + iwl_disable_interrupts(trans);
4755 +
4756 + /*
4757 + * in testing mode, the host stays awake and the
4758 + * hardware won't be reset (not even partially)
4759 + */
4760 + if (test)
4761 + return;
4762 +
4763 + iwl_pcie_disable_ict(trans);
4764 +
4765 + synchronize_irq(trans_pcie->pci_dev->irq);
4766 +
4767 + iwl_clear_bit(trans, CSR_GP_CNTRL,
4768 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4769 + iwl_clear_bit(trans, CSR_GP_CNTRL,
4770 + CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4771 +
4772 + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
4773 + /*
4774 + * reset TX queues -- some of their registers reset during S3
4775 + * so if we don't reset everything here the D3 image would try
4776 + * to execute some invalid memory upon resume
4777 + */
4778 + iwl_trans_pcie_tx_reset(trans);
4779 + }
4780 +
4781 + iwl_pcie_set_pwr(trans, true);
4782 +}
4783 +
4784 +static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
4785 + enum iwl_d3_status *status,
4786 + bool test)
4787 +{
4788 + u32 val;
4789 + int ret;
4790 +
4791 + if (test) {
4792 + iwl_enable_interrupts(trans);
4793 + *status = IWL_D3_STATUS_ALIVE;
4794 + return 0;
4795 + }
4796 +
4797 + /*
4798 + * Also enables interrupts - none will happen as the device doesn't
4799 + * know we're waking it up, only when the opmode actually tells it
4800 + * after this call.
4801 + */
4802 + iwl_pcie_reset_ict(trans);
4803 +
4804 + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4805 + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4806 +
4807 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
4808 + udelay(2);
4809 +
4810 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
4811 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4812 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4813 + 25000);
4814 + if (ret < 0) {
4815 + IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
4816 + return ret;
4817 + }
4818 +
4819 + iwl_pcie_set_pwr(trans, false);
4820 +
4821 + if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
4822 + iwl_clear_bit(trans, CSR_GP_CNTRL,
4823 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4824 + } else {
4825 + iwl_trans_pcie_tx_reset(trans);
4826 +
4827 + ret = iwl_pcie_rx_init(trans);
4828 + if (ret) {
4829 + IWL_ERR(trans,
4830 + "Failed to resume the device (RX reset)\n");
4831 + return ret;
4832 + }
4833 + }
4834 +
4835 + val = iwl_read32(trans, CSR_RESET);
4836 + if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
4837 + *status = IWL_D3_STATUS_RESET;
4838 + else
4839 + *status = IWL_D3_STATUS_ALIVE;
4840 +
4841 + return 0;
4842 +}
4843 +
4844 +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
4845 +{
4846 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4847 + bool hw_rfkill;
4848 + int err;
4849 +
4850 + lockdep_assert_held(&trans_pcie->mutex);
4851 +
4852 + err = iwl_pcie_prepare_card_hw(trans);
4853 + if (err) {
4854 + IWL_ERR(trans, "Error while preparing HW: %d\n", err);
4855 + return err;
4856 + }
4857 +
4858 + /* Reset the entire device */
4859 + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4860 +
4861 + usleep_range(10, 15);
4862 +
4863 + iwl_pcie_apm_init(trans);
4864 +
4865 + /* From now on, the op_mode will be kept updated about RF kill state */
4866 + iwl_enable_rfkill_int(trans);
4867 +
4868 + /* Set is_down to false here so that...*/
4869 + trans_pcie->is_down = false;
4870 +
4871 + hw_rfkill = iwl_is_rfkill_set(trans);
4872 + if (hw_rfkill)
4873 + set_bit(STATUS_RFKILL, &trans->status);
4874 + else
4875 + clear_bit(STATUS_RFKILL, &trans->status);
4876 + /* ... rfkill can call stop_device and set it false if needed */
4877 + iwl_trans_pcie_rf_kill(trans, hw_rfkill);
4878 +
4879 + return 0;
4880 +}
4881 +
4882 +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
4883 +{
4884 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4885 + int ret;
4886 +
4887 + mutex_lock(&trans_pcie->mutex);
4888 + ret = _iwl_trans_pcie_start_hw(trans, low_power);
4889 + mutex_unlock(&trans_pcie->mutex);
4890 +
4891 + return ret;
4892 +}
4893 +
4894 +static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
4895 +{
4896 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4897 +
4898 + mutex_lock(&trans_pcie->mutex);
4899 +
4900 + /* disable interrupts - don't enable HW RF kill interrupt */
4901 + spin_lock(&trans_pcie->irq_lock);
4902 + iwl_disable_interrupts(trans);
4903 + spin_unlock(&trans_pcie->irq_lock);
4904 +
4905 + iwl_pcie_apm_stop(trans, true);
4906 +
4907 + spin_lock(&trans_pcie->irq_lock);
4908 + iwl_disable_interrupts(trans);
4909 + spin_unlock(&trans_pcie->irq_lock);
4910 +
4911 + iwl_pcie_disable_ict(trans);
4912 +
4913 + mutex_unlock(&trans_pcie->mutex);
4914 +
4915 + synchronize_irq(trans_pcie->pci_dev->irq);
4916 +}
4917 +
4918 +static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
4919 +{
4920 + writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
4921 +}
4922 +
4923 +static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
4924 +{
4925 + writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
4926 +}
4927 +
4928 +static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
4929 +{
4930 + return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
4931 +}
4932 +
4933 +static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
4934 +{
4935 + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
4936 + ((reg & 0x000FFFFF) | (3 << 24)));
4937 + return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
4938 +}
4939 +
4940 +static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
4941 + u32 val)
4942 +{
4943 + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
4944 + ((addr & 0x000FFFFF) | (3 << 24)));
4945 + iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
4946 +}
4947 +
4948 +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
4949 +{
4950 + WARN_ON(1);
4951 + return 0;
4952 +}
4953 +
4954 +static void iwl_trans_pcie_configure(struct iwl_trans *trans,
4955 + const struct iwl_trans_config *trans_cfg)
4956 +{
4957 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4958 +
4959 + trans_pcie->cmd_queue = trans_cfg->cmd_queue;
4960 + trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
4961 + trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
4962 + if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
4963 + trans_pcie->n_no_reclaim_cmds = 0;
4964 + else
4965 + trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
4966 + if (trans_pcie->n_no_reclaim_cmds)
4967 + memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
4968 + trans_pcie->n_no_reclaim_cmds * sizeof(u8));
4969 +
4970 + trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
4971 + trans_pcie->rx_page_order =
4972 + iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
4973 +
4974 + trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
4975 + trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
4976 + trans_pcie->scd_set_active = trans_cfg->scd_set_active;
4977 + trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
4978 +
4979 + trans->command_groups = trans_cfg->command_groups;
4980 + trans->command_groups_size = trans_cfg->command_groups_size;
4981 +
4982 + /* init ref_count to 1 (should be cleared when ucode is loaded) */
4983 + trans_pcie->ref_count = 1;
4984 +
4985 + /* Initialize NAPI here - it should be before registering to mac80211
4986 + * in the opmode but after the HW struct is allocated.
4987 + * As this function may be called again in some corner cases don't
4988 + * do anything if NAPI was already initialized.
4989 + */
4990 + if (!trans_pcie->napi.poll) {
4991 + init_dummy_netdev(&trans_pcie->napi_dev);
4992 + netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
4993 + iwl_pcie_dummy_napi_poll, 64);
4994 + }
4995 +}
4996 +
4997 +void iwl_trans_pcie_free(struct iwl_trans *trans)
4998 +{
4999 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5000 + int i;
5001 +
5002 + synchronize_irq(trans_pcie->pci_dev->irq);
5003 +
5004 + iwl_pcie_tx_free(trans);
5005 + iwl_pcie_rx_free(trans);
5006 +
5007 + free_irq(trans_pcie->pci_dev->irq, trans);
5008 + iwl_pcie_free_ict(trans);
5009 +
5010 + pci_disable_msi(trans_pcie->pci_dev);
5011 + iounmap(trans_pcie->hw_base);
5012 + pci_release_regions(trans_pcie->pci_dev);
5013 + pci_disable_device(trans_pcie->pci_dev);
5014 +
5015 + if (trans_pcie->napi.poll)
5016 + netif_napi_del(&trans_pcie->napi);
5017 +
5018 + iwl_pcie_free_fw_monitor(trans);
5019 +
5020 + for_each_possible_cpu(i) {
5021 + struct iwl_tso_hdr_page *p =
5022 + per_cpu_ptr(trans_pcie->tso_hdr_page, i);
5023 +
5024 + if (p->page)
5025 + __free_page(p->page);
5026 + }
5027 +
5028 + free_percpu(trans_pcie->tso_hdr_page);
5029 + iwl_trans_free(trans);
5030 +}
5031 +
5032 +static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
5033 +{
5034 + if (state)
5035 + set_bit(STATUS_TPOWER_PMI, &trans->status);
5036 + else
5037 + clear_bit(STATUS_TPOWER_PMI, &trans->status);
5038 +}
5039 +
5040 +static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
5041 + unsigned long *flags)
5042 +{
5043 + int ret;
5044 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5045 +
5046 + spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
5047 +
5048 + if (trans_pcie->cmd_hold_nic_awake)
5049 + goto out;
5050 +
5051 + /* this bit wakes up the NIC */
5052 + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
5053 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5054 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
5055 + udelay(2);
5056 +
5057 + /*
5058 + * These bits say the device is running, and should keep running for
5059 + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
5060 + * but they do not indicate that embedded SRAM is restored yet;
5061 + * 3945 and 4965 have volatile SRAM, and must save/restore contents
5062 + * to/from host DRAM when sleeping/waking for power-saving.
5063 + * Each direction takes approximately 1/4 millisecond; with this
5064 + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
5065 + * series of register accesses are expected (e.g. reading Event Log),
5066 + * to keep device from sleeping.
5067 + *
5068 + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
5069 + * SRAM is okay/restored. We don't check that here because this call
5070 + * is just for hardware register access; but GP1 MAC_SLEEP check is a
5071 + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
5072 + *
5073 + * 5000 series and later (including 1000 series) have non-volatile SRAM,
5074 + * and do not save/restore SRAM when power cycling.
5075 + */
5076 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
5077 + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
5078 + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
5079 + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
5080 + if (unlikely(ret < 0)) {
5081 + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
5082 + WARN_ONCE(1,
5083 + "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
5084 + iwl_read32(trans, CSR_GP_CNTRL));
5085 + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
5086 + return false;
5087 + }
5088 +
5089 +out:
5090 + /*
5091 + * Fool sparse by faking we release the lock - sparse will
5092 + * track nic_access anyway.
5093 + */
5094 + __release(&trans_pcie->reg_lock);
5095 + return true;
5096 +}
5097 +
5098 +static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
5099 + unsigned long *flags)
5100 +{
5101 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5102 +
5103 + lockdep_assert_held(&trans_pcie->reg_lock);
5104 +
5105 + /*
5106 + * Fool sparse by faking we acquiring the lock - sparse will
5107 + * track nic_access anyway.
5108 + */
5109 + __acquire(&trans_pcie->reg_lock);
5110 +
5111 + if (trans_pcie->cmd_hold_nic_awake)
5112 + goto out;
5113 +
5114 + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
5115 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5116 + /*
5117 + * Above we read the CSR_GP_CNTRL register, which will flush
5118 + * any previous writes, but we need the write that clears the
5119 + * MAC_ACCESS_REQ bit to be performed before any other writes
5120 + * scheduled on different CPUs (after we drop reg_lock).
5121 + */
5122 + mmiowb();
5123 +out:
5124 + spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
5125 +}
5126 +
5127 +static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
5128 + void *buf, int dwords)
5129 +{
5130 + unsigned long flags;
5131 + int offs, ret = 0;
5132 + u32 *vals = buf;
5133 +
5134 + if (iwl_trans_grab_nic_access(trans, &flags)) {
5135 + iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
5136 + for (offs = 0; offs < dwords; offs++)
5137 + vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
5138 + iwl_trans_release_nic_access(trans, &flags);
5139 + } else {
5140 + ret = -EBUSY;
5141 + }
5142 + return ret;
5143 +}
5144 +
5145 +static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
5146 + const void *buf, int dwords)
5147 +{
5148 + unsigned long flags;
5149 + int offs, ret = 0;
5150 + const u32 *vals = buf;
5151 +
5152 + if (iwl_trans_grab_nic_access(trans, &flags)) {
5153 + iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
5154 + for (offs = 0; offs < dwords; offs++)
5155 + iwl_write32(trans, HBUS_TARG_MEM_WDAT,
5156 + vals ? vals[offs] : 0);
5157 + iwl_trans_release_nic_access(trans, &flags);
5158 + } else {
5159 + ret = -EBUSY;
5160 + }
5161 + return ret;
5162 +}
5163 +
5164 +static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
5165 + unsigned long txqs,
5166 + bool freeze)
5167 +{
5168 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5169 + int queue;
5170 +
5171 + for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
5172 + struct iwl_txq *txq = &trans_pcie->txq[queue];
5173 + unsigned long now;
5174 +
5175 + spin_lock_bh(&txq->lock);
5176 +
5177 + now = jiffies;
5178 +
5179 + if (txq->frozen == freeze)
5180 + goto next_queue;
5181 +
5182 + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
5183 + freeze ? "Freezing" : "Waking", queue);
5184 +
5185 + txq->frozen = freeze;
5186 +
5187 + if (txq->q.read_ptr == txq->q.write_ptr)
5188 + goto next_queue;
5189 +
5190 + if (freeze) {
5191 + if (unlikely(time_after(now,
5192 + txq->stuck_timer.expires))) {
5193 + /*
5194 + * The timer should have fired, maybe it is
5195 + * spinning right now on the lock.
5196 + */
5197 + goto next_queue;
5198 + }
5199 + /* remember how long until the timer fires */
5200 + txq->frozen_expiry_remainder =
5201 + txq->stuck_timer.expires - now;
5202 + del_timer(&txq->stuck_timer);
5203 + goto next_queue;
5204 + }
5205 +
5206 + /*
5207 + * Wake a non-empty queue -> arm timer with the
5208 + * remainder before it froze
5209 + */
5210 + mod_timer(&txq->stuck_timer,
5211 + now + txq->frozen_expiry_remainder);
5212 +
5213 +next_queue:
5214 + spin_unlock_bh(&txq->lock);
5215 + }
5216 +}
5217 +
5218 +static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
5219 +{
5220 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5221 + int i;
5222 +
5223 + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
5224 + struct iwl_txq *txq = &trans_pcie->txq[i];
5225 +
5226 + if (i == trans_pcie->cmd_queue)
5227 + continue;
5228 +
5229 + spin_lock_bh(&txq->lock);
5230 +
5231 + if (!block && !(WARN_ON_ONCE(!txq->block))) {
5232 + txq->block--;
5233 + if (!txq->block) {
5234 + iwl_write32(trans, HBUS_TARG_WRPTR,
5235 + txq->q.write_ptr | (i << 8));
5236 + }
5237 + } else if (block) {
5238 + txq->block++;
5239 + }
5240 +
5241 + spin_unlock_bh(&txq->lock);
5242 + }
5243 +}
5244 +
5245 +#define IWL_FLUSH_WAIT_MS 2000
5246 +
5247 +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
5248 +{
5249 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5250 + struct iwl_txq *txq;
5251 + struct iwl_queue *q;
5252 + int cnt;
5253 + unsigned long now = jiffies;
5254 + u32 scd_sram_addr;
5255 + u8 buf[16];
5256 + int ret = 0;
5257 +
5258 + /* waiting for all the tx frames complete might take a while */
5259 + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
5260 + u8 wr_ptr;
5261 +
5262 + if (cnt == trans_pcie->cmd_queue)
5263 + continue;
5264 + if (!test_bit(cnt, trans_pcie->queue_used))
5265 + continue;
5266 + if (!(BIT(cnt) & txq_bm))
5267 + continue;
5268 +
5269 + IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
5270 + txq = &trans_pcie->txq[cnt];
5271 + q = &txq->q;
5272 + wr_ptr = ACCESS_ONCE(q->write_ptr);
5273 +
5274 + while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
5275 + !time_after(jiffies,
5276 + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
5277 + u8 write_ptr = ACCESS_ONCE(q->write_ptr);
5278 +
5279 + if (WARN_ONCE(wr_ptr != write_ptr,
5280 + "WR pointer moved while flushing %d -> %d\n",
5281 + wr_ptr, write_ptr))
5282 + return -ETIMEDOUT;
5283 + msleep(1);
5284 + }
5285 +
5286 + if (q->read_ptr != q->write_ptr) {
5287 + IWL_ERR(trans,
5288 + "fail to flush all tx fifo queues Q %d\n", cnt);
5289 + ret = -ETIMEDOUT;
5290 + break;
5291 + }
5292 + IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
5293 + }
5294 +
5295 + if (!ret)
5296 + return 0;
5297 +
5298 + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
5299 + txq->q.read_ptr, txq->q.write_ptr);
5300 +
5301 + scd_sram_addr = trans_pcie->scd_base_addr +
5302 + SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
5303 + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
5304 +
5305 + iwl_print_hex_error(trans, buf, sizeof(buf));
5306 +
5307 + for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
5308 + IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
5309 + iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
5310 +
5311 + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
5312 + u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
5313 + u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
5314 + bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
5315 + u32 tbl_dw =
5316 + iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
5317 + SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
5318 +
5319 + if (cnt & 0x1)
5320 + tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
5321 + else
5322 + tbl_dw = tbl_dw & 0x0000FFFF;
5323 +
5324 + IWL_ERR(trans,
5325 + "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
5326 + cnt, active ? "" : "in", fifo, tbl_dw,
5327 + iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
5328 + (TFD_QUEUE_SIZE_MAX - 1),
5329 + iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
5330 + }
5331 +
5332 + return ret;
5333 +}
5334 +
5335 +static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
5336 + u32 mask, u32 value)
5337 +{
5338 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5339 + unsigned long flags;
5340 +
5341 + spin_lock_irqsave(&trans_pcie->reg_lock, flags);
5342 + __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
5343 + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
5344 +}
5345 +
5346 +void iwl_trans_pcie_ref(struct iwl_trans *trans)
5347 +{
5348 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5349 + unsigned long flags;
5350 +
5351 + if (iwlwifi_mod_params.d0i3_disable)
5352 + return;
5353 +
5354 + spin_lock_irqsave(&trans_pcie->ref_lock, flags);
5355 + IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
5356 + trans_pcie->ref_count++;
5357 + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
5358 +}
5359 +
5360 +void iwl_trans_pcie_unref(struct iwl_trans *trans)
5361 +{
5362 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5363 + unsigned long flags;
5364 +
5365 + if (iwlwifi_mod_params.d0i3_disable)
5366 + return;
5367 +
5368 + spin_lock_irqsave(&trans_pcie->ref_lock, flags);
5369 + IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
5370 + if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
5371 + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
5372 + return;
5373 + }
5374 + trans_pcie->ref_count--;
5375 + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
5376 +}
5377 +
5378 +static const char *get_csr_string(int cmd)
5379 +{
5380 +#define IWL_CMD(x) case x: return #x
5381 + switch (cmd) {
5382 + IWL_CMD(CSR_HW_IF_CONFIG_REG);
5383 + IWL_CMD(CSR_INT_COALESCING);
5384 + IWL_CMD(CSR_INT);
5385 + IWL_CMD(CSR_INT_MASK);
5386 + IWL_CMD(CSR_FH_INT_STATUS);
5387 + IWL_CMD(CSR_GPIO_IN);
5388 + IWL_CMD(CSR_RESET);
5389 + IWL_CMD(CSR_GP_CNTRL);
5390 + IWL_CMD(CSR_HW_REV);
5391 + IWL_CMD(CSR_EEPROM_REG);
5392 + IWL_CMD(CSR_EEPROM_GP);
5393 + IWL_CMD(CSR_OTP_GP_REG);
5394 + IWL_CMD(CSR_GIO_REG);
5395 + IWL_CMD(CSR_GP_UCODE_REG);
5396 + IWL_CMD(CSR_GP_DRIVER_REG);
5397 + IWL_CMD(CSR_UCODE_DRV_GP1);
5398 + IWL_CMD(CSR_UCODE_DRV_GP2);
5399 + IWL_CMD(CSR_LED_REG);
5400 + IWL_CMD(CSR_DRAM_INT_TBL_REG);
5401 + IWL_CMD(CSR_GIO_CHICKEN_BITS);
5402 + IWL_CMD(CSR_ANA_PLL_CFG);
5403 + IWL_CMD(CSR_HW_REV_WA_REG);
5404 + IWL_CMD(CSR_MONITOR_STATUS_REG);
5405 + IWL_CMD(CSR_DBG_HPET_MEM_REG);
5406 + default:
5407 + return "UNKNOWN";
5408 + }
5409 +#undef IWL_CMD
5410 +}
5411 +
5412 +void iwl_pcie_dump_csr(struct iwl_trans *trans)
5413 +{
5414 + int i;
5415 + static const u32 csr_tbl[] = {
5416 + CSR_HW_IF_CONFIG_REG,
5417 + CSR_INT_COALESCING,
5418 + CSR_INT,
5419 + CSR_INT_MASK,
5420 + CSR_FH_INT_STATUS,
5421 + CSR_GPIO_IN,
5422 + CSR_RESET,
5423 + CSR_GP_CNTRL,
5424 + CSR_HW_REV,
5425 + CSR_EEPROM_REG,
5426 + CSR_EEPROM_GP,
5427 + CSR_OTP_GP_REG,
5428 + CSR_GIO_REG,
5429 + CSR_GP_UCODE_REG,
5430 + CSR_GP_DRIVER_REG,
5431 + CSR_UCODE_DRV_GP1,
5432 + CSR_UCODE_DRV_GP2,
5433 + CSR_LED_REG,
5434 + CSR_DRAM_INT_TBL_REG,
5435 + CSR_GIO_CHICKEN_BITS,
5436 + CSR_ANA_PLL_CFG,
5437 + CSR_MONITOR_STATUS_REG,
5438 + CSR_HW_REV_WA_REG,
5439 + CSR_DBG_HPET_MEM_REG
5440 + };
5441 + IWL_ERR(trans, "CSR values:\n");
5442 + IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
5443 + "CSR_INT_PERIODIC_REG)\n");
5444 + for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
5445 + IWL_ERR(trans, " %25s: 0X%08x\n",
5446 + get_csr_string(csr_tbl[i]),
5447 + iwl_read32(trans, csr_tbl[i]));
5448 + }
5449 +}
5450 +
5451 +#ifdef CONFIG_IWLWIFI_DEBUGFS
5452 +/* create and remove of files */
5453 +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
5454 + if (!debugfs_create_file(#name, mode, parent, trans, \
5455 + &iwl_dbgfs_##name##_ops)) \
5456 + goto err; \
5457 +} while (0)
5458 +
5459 +/* file operation */
5460 +#define DEBUGFS_READ_FILE_OPS(name) \
5461 +static const struct file_operations iwl_dbgfs_##name##_ops = { \
5462 + .read = iwl_dbgfs_##name##_read, \
5463 + .open = simple_open, \
5464 + .llseek = generic_file_llseek, \
5465 +};
5466 +
5467 +#define DEBUGFS_WRITE_FILE_OPS(name) \
5468 +static const struct file_operations iwl_dbgfs_##name##_ops = { \
5469 + .write = iwl_dbgfs_##name##_write, \
5470 + .open = simple_open, \
5471 + .llseek = generic_file_llseek, \
5472 +};
5473 +
5474 +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
5475 +static const struct file_operations iwl_dbgfs_##name##_ops = { \
5476 + .write = iwl_dbgfs_##name##_write, \
5477 + .read = iwl_dbgfs_##name##_read, \
5478 + .open = simple_open, \
5479 + .llseek = generic_file_llseek, \
5480 +};
5481 +
5482 +static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
5483 + char __user *user_buf,
5484 + size_t count, loff_t *ppos)
5485 +{
5486 + struct iwl_trans *trans = file->private_data;
5487 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5488 + struct iwl_txq *txq;
5489 + struct iwl_queue *q;
5490 + char *buf;
5491 + int pos = 0;
5492 + int cnt;
5493 + int ret;
5494 + size_t bufsz;
5495 +
5496 + bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
5497 +
5498 + if (!trans_pcie->txq)
5499 + return -EAGAIN;
5500 +
5501 + buf = kzalloc(bufsz, GFP_KERNEL);
5502 + if (!buf)
5503 + return -ENOMEM;
5504 +
5505 + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
5506 + txq = &trans_pcie->txq[cnt];
5507 + q = &txq->q;
5508 + pos += scnprintf(buf + pos, bufsz - pos,
5509 + "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
5510 + cnt, q->read_ptr, q->write_ptr,
5511 + !!test_bit(cnt, trans_pcie->queue_used),
5512 + !!test_bit(cnt, trans_pcie->queue_stopped),
5513 + txq->need_update, txq->frozen,
5514 + (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
5515 + }
5516 + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
5517 + kfree(buf);
5518 + return ret;
5519 +}
5520 +
5521 +static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
5522 + char __user *user_buf,
5523 + size_t count, loff_t *ppos)
5524 +{
5525 + struct iwl_trans *trans = file->private_data;
5526 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5527 + struct iwl_rxq *rxq = &trans_pcie->rxq;
5528 + char buf[256];
5529 + int pos = 0;
5530 + const size_t bufsz = sizeof(buf);
5531 +
5532 + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
5533 + rxq->read);
5534 + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
5535 + rxq->write);
5536 + pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
5537 + rxq->write_actual);
5538 + pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
5539 + rxq->need_update);
5540 + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
5541 + rxq->free_count);
5542 + if (rxq->rb_stts) {
5543 + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
5544 + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
5545 + } else {
5546 + pos += scnprintf(buf + pos, bufsz - pos,
5547 + "closed_rb_num: Not Allocated\n");
5548 + }
5549 + return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
5550 +}
5551 +
5552 +static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
5553 + char __user *user_buf,
5554 + size_t count, loff_t *ppos)
5555 +{
5556 + struct iwl_trans *trans = file->private_data;
5557 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5558 + struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
5559 +
5560 + int pos = 0;
5561 + char *buf;
5562 + int bufsz = 24 * 64; /* 24 items * 64 char per item */
5563 + ssize_t ret;
5564 +
5565 + buf = kzalloc(bufsz, GFP_KERNEL);
5566 + if (!buf)
5567 + return -ENOMEM;
5568 +
5569 + pos += scnprintf(buf + pos, bufsz - pos,
5570 + "Interrupt Statistics Report:\n");
5571 +
5572 + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
5573 + isr_stats->hw);
5574 + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
5575 + isr_stats->sw);
5576 + if (isr_stats->sw || isr_stats->hw) {
5577 + pos += scnprintf(buf + pos, bufsz - pos,
5578 + "\tLast Restarting Code: 0x%X\n",
5579 + isr_stats->err_code);
5580 + }
5581 +#ifdef CONFIG_IWLWIFI_DEBUG
5582 + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
5583 + isr_stats->sch);
5584 + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
5585 + isr_stats->alive);
5586 +#endif
5587 + pos += scnprintf(buf + pos, bufsz - pos,
5588 + "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
5589 +
5590 + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
5591 + isr_stats->ctkill);
5592 +
5593 + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
5594 + isr_stats->wakeup);
5595 +
5596 + pos += scnprintf(buf + pos, bufsz - pos,
5597 + "Rx command responses:\t\t %u\n", isr_stats->rx);
5598 +
5599 + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
5600 + isr_stats->tx);
5601 +
5602 + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
5603 + isr_stats->unhandled);
5604 +
5605 + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
5606 + kfree(buf);
5607 + return ret;
5608 +}
5609 +
5610 +static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
5611 + const char __user *user_buf,
5612 + size_t count, loff_t *ppos)
5613 +{
5614 + struct iwl_trans *trans = file->private_data;
5615 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5616 + struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
5617 +
5618 + char buf[8];
5619 + int buf_size;
5620 + u32 reset_flag;
5621 +
5622 + memset(buf, 0, sizeof(buf));
5623 + buf_size = min(count, sizeof(buf) - 1);
5624 + if (copy_from_user(buf, user_buf, buf_size))
5625 + return -EFAULT;
5626 + if (sscanf(buf, "%x", &reset_flag) != 1)
5627 + return -EFAULT;
5628 + if (reset_flag == 0)
5629 + memset(isr_stats, 0, sizeof(*isr_stats));
5630 +
5631 + return count;
5632 +}
5633 +
5634 +static ssize_t iwl_dbgfs_csr_write(struct file *file,
5635 + const char __user *user_buf,
5636 + size_t count, loff_t *ppos)
5637 +{
5638 + struct iwl_trans *trans = file->private_data;
5639 + char buf[8];
5640 + int buf_size;
5641 + int csr;
5642 +
5643 + memset(buf, 0, sizeof(buf));
5644 + buf_size = min(count, sizeof(buf) - 1);
5645 + if (copy_from_user(buf, user_buf, buf_size))
5646 + return -EFAULT;
5647 + if (sscanf(buf, "%d", &csr) != 1)
5648 + return -EFAULT;
5649 +
5650 + iwl_pcie_dump_csr(trans);
5651 +
5652 + return count;
5653 +}
5654 +
5655 +static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
5656 + char __user *user_buf,
5657 + size_t count, loff_t *ppos)
5658 +{
5659 + struct iwl_trans *trans = file->private_data;
5660 + char *buf = NULL;
5661 + ssize_t ret;
5662 +
5663 + ret = iwl_dump_fh(trans, &buf);
5664 + if (ret < 0)
5665 + return ret;
5666 + if (!buf)
5667 + return -EINVAL;
5668 + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
5669 + kfree(buf);
5670 + return ret;
5671 +}
5672 +
5673 +DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
5674 +DEBUGFS_READ_FILE_OPS(fh_reg);
5675 +DEBUGFS_READ_FILE_OPS(rx_queue);
5676 +DEBUGFS_READ_FILE_OPS(tx_queue);
5677 +DEBUGFS_WRITE_FILE_OPS(csr);
5678 +
5679 +/* Create the debugfs files and directories */
5680 +int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
5681 +{
5682 + struct dentry *dir = trans->dbgfs_dir;
5683 +
5684 + DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
5685 + DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
5686 + DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
5687 + DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
5688 + DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
5689 + return 0;
5690 +
5691 +err:
5692 + IWL_ERR(trans, "failed to create the trans debugfs entry\n");
5693 + return -ENOMEM;
5694 +}
5695 +#endif /*CONFIG_IWLWIFI_DEBUGFS */
5696 +
5697 +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
5698 +{
5699 + u32 cmdlen = 0;
5700 + int i;
5701 +
5702 + for (i = 0; i < IWL_NUM_OF_TBS; i++)
5703 + cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
5704 +
5705 + return cmdlen;
5706 +}
5707 +
5708 +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
5709 + struct iwl_fw_error_dump_data **data,
5710 + int allocated_rb_nums)
5711 +{
5712 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5713 + int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
5714 + struct iwl_rxq *rxq = &trans_pcie->rxq;
5715 + u32 i, r, j, rb_len = 0;
5716 +
5717 + spin_lock(&rxq->lock);
5718 +
5719 + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
5720 +
5721 + for (i = rxq->read, j = 0;
5722 + i != r && j < allocated_rb_nums;
5723 + i = (i + 1) & RX_QUEUE_MASK, j++) {
5724 + struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
5725 + struct iwl_fw_error_dump_rb *rb;
5726 +
5727 + dma_unmap_page(trans->dev, rxb->page_dma, max_len,
5728 + DMA_FROM_DEVICE);
5729 +
5730 + rb_len += sizeof(**data) + sizeof(*rb) + max_len;
5731 +
5732 + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
5733 + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
5734 + rb = (void *)(*data)->data;
5735 + rb->index = cpu_to_le32(i);
5736 + memcpy(rb->data, page_address(rxb->page), max_len);
5737 + /* remap the page for the free benefit */
5738 + rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
5739 + max_len,
5740 + DMA_FROM_DEVICE);
5741 +
5742 + *data = iwl_fw_error_next_data(*data);
5743 + }
5744 +
5745 + spin_unlock(&rxq->lock);
5746 +
5747 + return rb_len;
5748 +}
5749 +#define IWL_CSR_TO_DUMP (0x250)
5750 +
5751 +static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
5752 + struct iwl_fw_error_dump_data **data)
5753 +{
5754 + u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
5755 + __le32 *val;
5756 + int i;
5757 +
5758 + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
5759 + (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
5760 + val = (void *)(*data)->data;
5761 +
5762 + for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
5763 + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
5764 +
5765 + *data = iwl_fw_error_next_data(*data);
5766 +
5767 + return csr_len;
5768 +}
5769 +
5770 +static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
5771 + struct iwl_fw_error_dump_data **data)
5772 +{
5773 + u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
5774 + unsigned long flags;
5775 + __le32 *val;
5776 + int i;
5777 +
5778 + if (!iwl_trans_grab_nic_access(trans, &flags))
5779 + return 0;
5780 +
5781 + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
5782 + (*data)->len = cpu_to_le32(fh_regs_len);
5783 + val = (void *)(*data)->data;
5784 +
5785 + for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
5786 + *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
5787 +
5788 + iwl_trans_release_nic_access(trans, &flags);
5789 +
5790 + *data = iwl_fw_error_next_data(*data);
5791 +
5792 + return sizeof(**data) + fh_regs_len;
5793 +}
5794 +
5795 +static u32
5796 +iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
5797 + struct iwl_fw_error_dump_fw_mon *fw_mon_data,
5798 + u32 monitor_len)
5799 +{
5800 + u32 buf_size_in_dwords = (monitor_len >> 2);
5801 + u32 *buffer = (u32 *)fw_mon_data->data;
5802 + unsigned long flags;
5803 + u32 i;
5804 +
5805 + if (!iwl_trans_grab_nic_access(trans, &flags))
5806 + return 0;
5807 +
5808 + iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
5809 + for (i = 0; i < buf_size_in_dwords; i++)
5810 + buffer[i] = iwl_read_prph_no_grab(trans,
5811 + MON_DMARB_RD_DATA_ADDR);
5812 + iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
5813 +
5814 + iwl_trans_release_nic_access(trans, &flags);
5815 +
5816 + return monitor_len;
5817 +}
5818 +
5819 +static u32
5820 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
5821 + struct iwl_fw_error_dump_data **data,
5822 + u32 monitor_len)
5823 +{
5824 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5825 + u32 len = 0;
5826 +
5827 + if ((trans_pcie->fw_mon_page &&
5828 + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
5829 + trans->dbg_dest_tlv) {
5830 + struct iwl_fw_error_dump_fw_mon *fw_mon_data;
5831 + u32 base, write_ptr, wrap_cnt;
5832 +
5833 + /* If there was a dest TLV - use the values from there */
5834 + if (trans->dbg_dest_tlv) {
5835 + write_ptr =
5836 + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
5837 + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
5838 + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
5839 + } else {
5840 + base = MON_BUFF_BASE_ADDR;
5841 + write_ptr = MON_BUFF_WRPTR;
5842 + wrap_cnt = MON_BUFF_CYCLE_CNT;
5843 + }
5844 +
5845 + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
5846 + fw_mon_data = (void *)(*data)->data;
5847 + fw_mon_data->fw_mon_wr_ptr =
5848 + cpu_to_le32(iwl_read_prph(trans, write_ptr));
5849 + fw_mon_data->fw_mon_cycle_cnt =
5850 + cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
5851 + fw_mon_data->fw_mon_base_ptr =
5852 + cpu_to_le32(iwl_read_prph(trans, base));
5853 +
5854 + len += sizeof(**data) + sizeof(*fw_mon_data);
5855 + if (trans_pcie->fw_mon_page) {
5856 + /*
5857 + * The firmware is now asserted, it won't write anything
5858 + * to the buffer. CPU can take ownership to fetch the
5859 + * data. The buffer will be handed back to the device
5860 + * before the firmware will be restarted.
5861 + */
5862 + dma_sync_single_for_cpu(trans->dev,
5863 + trans_pcie->fw_mon_phys,
5864 + trans_pcie->fw_mon_size,
5865 + DMA_FROM_DEVICE);
5866 + memcpy(fw_mon_data->data,
5867 + page_address(trans_pcie->fw_mon_page),
5868 + trans_pcie->fw_mon_size);
5869 +
5870 + monitor_len = trans_pcie->fw_mon_size;
5871 + } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
5872 + /*
5873 + * Update pointers to reflect actual values after
5874 + * shifting
5875 + */
5876 + base = iwl_read_prph(trans, base) <<
5877 + trans->dbg_dest_tlv->base_shift;
5878 + iwl_trans_read_mem(trans, base, fw_mon_data->data,
5879 + monitor_len / sizeof(u32));
5880 + } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
5881 + monitor_len =
5882 + iwl_trans_pci_dump_marbh_monitor(trans,
5883 + fw_mon_data,
5884 + monitor_len);
5885 + } else {
5886 + /* Didn't match anything - output no monitor data */
5887 + monitor_len = 0;
5888 + }
5889 +
5890 + len += monitor_len;
5891 + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
5892 + }
5893 +
5894 + return len;
5895 +}
5896 +
5897 +static struct iwl_trans_dump_data
5898 +*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
5899 + const struct iwl_fw_dbg_trigger_tlv *trigger)
5900 +{
5901 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5902 + struct iwl_fw_error_dump_data *data;
5903 + struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
5904 + struct iwl_fw_error_dump_txcmd *txcmd;
5905 + struct iwl_trans_dump_data *dump_data;
5906 + u32 len, num_rbs;
5907 + u32 monitor_len;
5908 + int i, ptr;
5909 + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
5910 +
5911 + /* transport dump header */
5912 + len = sizeof(*dump_data);
5913 +
5914 + /* host commands */
5915 + len += sizeof(*data) +
5916 + cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
5917 +
5918 + /* FW monitor */
5919 + if (trans_pcie->fw_mon_page) {
5920 + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
5921 + trans_pcie->fw_mon_size;
5922 + monitor_len = trans_pcie->fw_mon_size;
5923 + } else if (trans->dbg_dest_tlv) {
5924 + u32 base, end;
5925 +
5926 + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
5927 + end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
5928 +
5929 + base = iwl_read_prph(trans, base) <<
5930 + trans->dbg_dest_tlv->base_shift;
5931 + end = iwl_read_prph(trans, end) <<
5932 + trans->dbg_dest_tlv->end_shift;
5933 +
5934 + /* Make "end" point to the actual end */
5935 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
5936 + trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
5937 + end += (1 << trans->dbg_dest_tlv->end_shift);
5938 + monitor_len = end - base;
5939 + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
5940 + monitor_len;
5941 + } else {
5942 + monitor_len = 0;
5943 + }
5944 +
5945 + if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
5946 + dump_data = vzalloc(len);
5947 + if (!dump_data)
5948 + return NULL;
5949 +
5950 + data = (void *)dump_data->data;
5951 + len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
5952 + dump_data->len = len;
5953 +
5954 + return dump_data;
5955 + }
5956 +
5957 + /* CSR registers */
5958 + len += sizeof(*data) + IWL_CSR_TO_DUMP;
5959 +
5960 + /* FH registers */
5961 + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
5962 +
5963 + if (dump_rbs) {
5964 + /* RBs */
5965 + num_rbs = le16_to_cpu(ACCESS_ONCE(
5966 + trans_pcie->rxq.rb_stts->closed_rb_num))
5967 + & 0x0FFF;
5968 + num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
5969 + len += num_rbs * (sizeof(*data) +
5970 + sizeof(struct iwl_fw_error_dump_rb) +
5971 + (PAGE_SIZE << trans_pcie->rx_page_order));
5972 + }
5973 +
5974 + dump_data = vzalloc(len);
5975 + if (!dump_data)
5976 + return NULL;
5977 +
5978 + len = 0;
5979 + data = (void *)dump_data->data;
5980 + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
5981 + txcmd = (void *)data->data;
5982 + spin_lock_bh(&cmdq->lock);
5983 + ptr = cmdq->q.write_ptr;
5984 + for (i = 0; i < cmdq->q.n_window; i++) {
5985 + u8 idx = get_cmd_index(&cmdq->q, ptr);
5986 + u32 caplen, cmdlen;
5987 +
5988 + cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
5989 + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
5990 +
5991 + if (cmdlen) {
5992 + len += sizeof(*txcmd) + caplen;
5993 + txcmd->cmdlen = cpu_to_le32(cmdlen);
5994 + txcmd->caplen = cpu_to_le32(caplen);
5995 + memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
5996 + txcmd = (void *)((u8 *)txcmd->data + caplen);
5997 + }
5998 +
5999 + ptr = iwl_queue_dec_wrap(ptr);
6000 + }
6001 + spin_unlock_bh(&cmdq->lock);
6002 +
6003 + data->len = cpu_to_le32(len);
6004 + len += sizeof(*data);
6005 + data = iwl_fw_error_next_data(data);
6006 +
6007 + len += iwl_trans_pcie_dump_csr(trans, &data);
6008 + len += iwl_trans_pcie_fh_regs_dump(trans, &data);
6009 + if (dump_rbs)
6010 + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
6011 +
6012 + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
6013 +
6014 + dump_data->len = len;
6015 +
6016 + return dump_data;
6017 +}
6018 +
6019 +static const struct iwl_trans_ops trans_ops_pcie = {
6020 + .start_hw = iwl_trans_pcie_start_hw,
6021 + .op_mode_leave = iwl_trans_pcie_op_mode_leave,
6022 + .fw_alive = iwl_trans_pcie_fw_alive,
6023 + .start_fw = iwl_trans_pcie_start_fw,
6024 + .stop_device = iwl_trans_pcie_stop_device,
6025 +
6026 + .d3_suspend = iwl_trans_pcie_d3_suspend,
6027 + .d3_resume = iwl_trans_pcie_d3_resume,
6028 +
6029 + .send_cmd = iwl_trans_pcie_send_hcmd,
6030 +
6031 + .tx = iwl_trans_pcie_tx,
6032 + .reclaim = iwl_trans_pcie_reclaim,
6033 +
6034 + .txq_disable = iwl_trans_pcie_txq_disable,
6035 + .txq_enable = iwl_trans_pcie_txq_enable,
6036 +
6037 + .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
6038 + .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
6039 + .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
6040 +
6041 + .write8 = iwl_trans_pcie_write8,
6042 + .write32 = iwl_trans_pcie_write32,
6043 + .read32 = iwl_trans_pcie_read32,
6044 + .read_prph = iwl_trans_pcie_read_prph,
6045 + .write_prph = iwl_trans_pcie_write_prph,
6046 + .read_mem = iwl_trans_pcie_read_mem,
6047 + .write_mem = iwl_trans_pcie_write_mem,
6048 + .configure = iwl_trans_pcie_configure,
6049 + .set_pmi = iwl_trans_pcie_set_pmi,
6050 + .grab_nic_access = iwl_trans_pcie_grab_nic_access,
6051 + .release_nic_access = iwl_trans_pcie_release_nic_access,
6052 + .set_bits_mask = iwl_trans_pcie_set_bits_mask,
6053 +
6054 + .ref = iwl_trans_pcie_ref,
6055 + .unref = iwl_trans_pcie_unref,
6056 +
6057 + .dump_data = iwl_trans_pcie_dump_data,
6058 +};
6059 +
6060 +struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
6061 + const struct pci_device_id *ent,
6062 + const struct iwl_cfg *cfg)
6063 +{
6064 + struct iwl_trans_pcie *trans_pcie;
6065 + struct iwl_trans *trans;
6066 + u16 pci_cmd;
6067 + int ret;
6068 +
6069 + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
6070 + &pdev->dev, cfg, &trans_ops_pcie, 0);
6071 + if (!trans)
6072 + return ERR_PTR(-ENOMEM);
6073 +
6074 + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
6075 +
6076 + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
6077 +
6078 + trans_pcie->trans = trans;
6079 + spin_lock_init(&trans_pcie->irq_lock);
6080 + spin_lock_init(&trans_pcie->reg_lock);
6081 + spin_lock_init(&trans_pcie->ref_lock);
6082 + mutex_init(&trans_pcie->mutex);
6083 + init_waitqueue_head(&trans_pcie->ucode_write_waitq);
6084 + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
6085 + if (!trans_pcie->tso_hdr_page) {
6086 + ret = -ENOMEM;
6087 + goto out_no_pci;
6088 + }
6089 +
6090 + ret = pci_enable_device(pdev);
6091 + if (ret)
6092 + goto out_no_pci;
6093 +
6094 + if (!cfg->base_params->pcie_l1_allowed) {
6095 + /*
6096 + * W/A - seems to solve weird behavior. We need to remove this
6097 + * if we don't want to stay in L1 all the time. This wastes a
6098 + * lot of power.
6099 + */
6100 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
6101 + PCIE_LINK_STATE_L1 |
6102 + PCIE_LINK_STATE_CLKPM);
6103 + }
6104 +
6105 + pci_set_master(pdev);
6106 +
6107 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6108 + if (!ret)
6109 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6110 + if (ret) {
6111 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6112 + if (!ret)
6113 + ret = pci_set_consistent_dma_mask(pdev,
6114 + DMA_BIT_MASK(32));
6115 + /* both attempts failed: */
6116 + if (ret) {
6117 + dev_err(&pdev->dev, "No suitable DMA available\n");
6118 + goto out_pci_disable_device;
6119 + }
6120 + }
6121 +
6122 + ret = pci_request_regions(pdev, DRV_NAME);
6123 + if (ret) {
6124 + dev_err(&pdev->dev, "pci_request_regions failed\n");
6125 + goto out_pci_disable_device;
6126 + }
6127 +
6128 + trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
6129 + if (!trans_pcie->hw_base) {
6130 + dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
6131 + ret = -ENODEV;
6132 + goto out_pci_release_regions;
6133 + }
6134 +
6135 + /* We disable the RETRY_TIMEOUT register (0x41) to keep
6136 + * PCI Tx retries from interfering with C3 CPU state */
6137 + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6138 +
6139 + trans->dev = &pdev->dev;
6140 + trans_pcie->pci_dev = pdev;
6141 + iwl_disable_interrupts(trans);
6142 +
6143 + ret = pci_enable_msi(pdev);
6144 + if (ret) {
6145 + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
6146 + /* enable rfkill interrupt: hw bug w/a */
6147 + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
6148 + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6149 + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6150 + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
6151 + }
6152 + }
6153 +
6154 + trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
6155 + /*
6156 + * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6157 + * changed, and now the revision step also includes bit 0-1 (no more
6158 + * "dash" value). To keep hw_rev backwards compatible - we'll store it
6159 + * in the old format.
6160 + */
6161 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
6162 + unsigned long flags;
6163 +
6164 + trans->hw_rev = (trans->hw_rev & 0xfff0) |
6165 + (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
6166 +
6167 + ret = iwl_pcie_prepare_card_hw(trans);
6168 + if (ret) {
6169 + IWL_WARN(trans, "Exit HW not ready\n");
6170 + goto out_pci_disable_msi;
6171 + }
6172 +
6173 + /*
6174 + * in-order to recognize C step driver should read chip version
6175 + * id located at the AUX bus MISC address space.
6176 + */
6177 + iwl_set_bit(trans, CSR_GP_CNTRL,
6178 + CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6179 + udelay(2);
6180 +
6181 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
6182 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6183 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6184 + 25000);
6185 + if (ret < 0) {
6186 + IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
6187 + goto out_pci_disable_msi;
6188 + }
6189 +
6190 + if (iwl_trans_grab_nic_access(trans, &flags)) {
6191 + u32 hw_step;
6192 +
6193 + hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
6194 + hw_step |= ENABLE_WFPM;
6195 + iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
6196 + hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
6197 + hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
6198 + if (hw_step == 0x3)
6199 + trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
6200 + (SILICON_C_STEP << 2);
6201 + iwl_trans_release_nic_access(trans, &flags);
6202 + }
6203 + }
6204 +
6205 + trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
6206 + snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
6207 + "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
6208 +
6209 + /* Initialize the wait queue for commands */
6210 + init_waitqueue_head(&trans_pcie->wait_command_queue);
6211 +
6212 + ret = iwl_pcie_alloc_ict(trans);
6213 + if (ret)
6214 + goto out_pci_disable_msi;
6215 +
6216 + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
6217 + iwl_pcie_irq_handler,
6218 + IRQF_SHARED, DRV_NAME, trans);
6219 + if (ret) {
6220 + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
6221 + goto out_free_ict;
6222 + }
6223 +
6224 + trans_pcie->inta_mask = CSR_INI_SET_MASK;
6225 +
6226 + return trans;
6227 +
6228 +out_free_ict:
6229 + iwl_pcie_free_ict(trans);
6230 +out_pci_disable_msi:
6231 + pci_disable_msi(pdev);
6232 +out_pci_release_regions:
6233 + pci_release_regions(pdev);
6234 +out_pci_disable_device:
6235 + pci_disable_device(pdev);
6236 +out_no_pci:
6237 + free_percpu(trans_pcie->tso_hdr_page);
6238 + iwl_trans_free(trans);
6239 + return ERR_PTR(ret);
6240 +}
6241 diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
6242 index 88bf80a942b4..9faf69875fab 100644
6243 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
6244 +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
6245 @@ -382,6 +382,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6246 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
6247 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
6248 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
6249 + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
6250 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
6251 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
6252 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
6253 @@ -399,10 +400,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6254 {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
6255 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
6256 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
6257 - {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
6258 + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
6259 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
6260 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
6261 - {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
6262 + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
6263 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
6264 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
6265 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
6266 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
6267 index 1de80a8e357a..840c47d8e2ce 100644
6268 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
6269 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
6270 @@ -7,6 +7,7 @@
6271 *
6272 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
6273 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6274 + * Copyright(c) 2016 Intel Deutschland GmbH
6275 *
6276 * This program is free software; you can redistribute it and/or modify
6277 * it under the terms of version 2 of the GNU General Public License as
6278 @@ -33,6 +34,7 @@
6279 *
6280 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
6281 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6282 + * Copyright(c) 2016 Intel Deutschland GmbH
6283 * All rights reserved.
6284 *
6285 * Redistribution and use in source and binary forms, with or without
6286 @@ -881,9 +883,16 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
6287 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
6288 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
6289 trans_pcie->fw_mon_phys >> dest->base_shift);
6290 - iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6291 - (trans_pcie->fw_mon_phys +
6292 - trans_pcie->fw_mon_size) >> dest->end_shift);
6293 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
6294 + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6295 + (trans_pcie->fw_mon_phys +
6296 + trans_pcie->fw_mon_size - 256) >>
6297 + dest->end_shift);
6298 + else
6299 + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6300 + (trans_pcie->fw_mon_phys +
6301 + trans_pcie->fw_mon_size) >>
6302 + dest->end_shift);
6303 }
6304 }
6305
6306 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
6307 index f46c9d7f6528..7f471bff435c 100644
6308 --- a/drivers/net/wireless/rtlwifi/pci.c
6309 +++ b/drivers/net/wireless/rtlwifi/pci.c
6310 @@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6311 hw_queue);
6312 if (rx_remained_cnt == 0)
6313 return;
6314 -
6315 + buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
6316 + rtlpci->rx_ring[rxring_idx].idx];
6317 + pdesc = (struct rtl_rx_desc *)skb->data;
6318 } else { /* rx descriptor */
6319 pdesc = &rtlpci->rx_ring[rxring_idx].desc[
6320 rtlpci->rx_ring[rxring_idx].idx];
6321 @@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6322 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
6323 if (unlikely(!new_skb))
6324 goto no_new;
6325 - if (rtlpriv->use_new_trx_flow) {
6326 - buffer_desc =
6327 - &rtlpci->rx_ring[rxring_idx].buffer_desc
6328 - [rtlpci->rx_ring[rxring_idx].idx];
6329 - /*means rx wifi info*/
6330 - pdesc = (struct rtl_rx_desc *)skb->data;
6331 - }
6332 memset(&rx_status , 0 , sizeof(rx_status));
6333 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
6334 &rx_status, (u8 *)pdesc, skb);
6335 diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
6336 index 11344121c55e..47e32cb0ec1a 100644
6337 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
6338 +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
6339 @@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6340 u8 tid;
6341
6342 rtl8188ee_bt_reg_init(hw);
6343 - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6344 -
6345 rtlpriv->dm.dm_initialgain_enable = 1;
6346 rtlpriv->dm.dm_flag = 0;
6347 rtlpriv->dm.disable_framebursting = 0;
6348 @@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6349 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6350 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6351 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6352 + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6353 + rtlpriv->cfg->mod_params->sw_crypto =
6354 + rtlpriv->cfg->mod_params->sw_crypto;
6355 + rtlpriv->cfg->mod_params->disable_watchdog =
6356 + rtlpriv->cfg->mod_params->disable_watchdog;
6357 if (rtlpriv->cfg->mod_params->disable_watchdog)
6358 pr_info("watchdog disabled\n");
6359 if (!rtlpriv->psc.inactiveps)
6360 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
6361 index de6cb6c3a48c..4780bdc63b2b 100644
6362 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
6363 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
6364 @@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
6365 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6366 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6367 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6368 + rtlpriv->cfg->mod_params->sw_crypto =
6369 + rtlpriv->cfg->mod_params->sw_crypto;
6370 if (!rtlpriv->psc.inactiveps)
6371 pr_info("rtl8192ce: Power Save off (module option)\n");
6372 if (!rtlpriv->psc.fwctrl_lps)
6373 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
6374 index fd4a5353d216..7c6f7f0d18c6 100644
6375 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
6376 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
6377 @@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
6378 rtlpriv->dm.disable_framebursting = false;
6379 rtlpriv->dm.thermalvalue = 0;
6380 rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
6381 + rtlpriv->cfg->mod_params->sw_crypto =
6382 + rtlpriv->cfg->mod_params->sw_crypto;
6383
6384 /* for firmware buf */
6385 rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
6386 diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
6387 index b19d0398215f..c6e09a19de1a 100644
6388 --- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
6389 +++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
6390 @@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
6391 module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
6392 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6393 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6394 -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6395 -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6396 +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6397 +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6398 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6399
6400 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6401 diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
6402 index e1fd27c888bf..31baca41ac2f 100644
6403 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
6404 +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
6405 @@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
6406 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6407 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6408 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6409 + rtlpriv->cfg->mod_params->sw_crypto =
6410 + rtlpriv->cfg->mod_params->sw_crypto;
6411 if (!rtlpriv->psc.inactiveps)
6412 pr_info("Power Save off (module option)\n");
6413 if (!rtlpriv->psc.fwctrl_lps)
6414 @@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
6415 module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
6416 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6417 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6418 -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6419 -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6420 +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6421 +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6422 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6423
6424 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6425 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
6426 index 2721cf89fb16..aac1ed3f7bb4 100644
6427 --- a/drivers/net/wireless/rtlwifi/usb.c
6428 +++ b/drivers/net/wireless/rtlwifi/usb.c
6429 @@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
6430 ieee80211_rx(hw, skb);
6431 else
6432 dev_kfree_skb_any(skb);
6433 + } else {
6434 + dev_kfree_skb_any(skb);
6435 }
6436 }
6437
6438 diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
6439 index 0305729d0986..10cf3747694d 100644
6440 --- a/drivers/net/wireless/ti/wlcore/io.h
6441 +++ b/drivers/net/wireless/ti/wlcore/io.h
6442 @@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
6443
6444 static inline void wl1271_power_off(struct wl1271 *wl)
6445 {
6446 - int ret;
6447 + int ret = 0;
6448
6449 if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
6450 return;
6451
6452 - ret = wl->if_ops->power(wl->dev, false);
6453 + if (wl->if_ops->power)
6454 + ret = wl->if_ops->power(wl->dev, false);
6455 if (!ret)
6456 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
6457 }
6458
6459 static inline int wl1271_power_on(struct wl1271 *wl)
6460 {
6461 - int ret = wl->if_ops->power(wl->dev, true);
6462 + int ret = 0;
6463 +
6464 + if (wl->if_ops->power)
6465 + ret = wl->if_ops->power(wl->dev, true);
6466 if (ret == 0)
6467 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
6468
6469 diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
6470 index f1ac2839d97c..720e4e4b5a3c 100644
6471 --- a/drivers/net/wireless/ti/wlcore/spi.c
6472 +++ b/drivers/net/wireless/ti/wlcore/spi.c
6473 @@ -73,7 +73,10 @@
6474 */
6475 #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
6476
6477 -#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
6478 +/* Maximum number of SPI write chunks */
6479 +#define WSPI_MAX_NUM_OF_CHUNKS \
6480 + ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
6481 +
6482
6483 struct wl12xx_spi_glue {
6484 struct device *dev;
6485 @@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
6486 void *buf, size_t len, bool fixed)
6487 {
6488 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
6489 - struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
6490 + /* SPI write buffers - 2 for each chunk */
6491 + struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
6492 struct spi_message m;
6493 - u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
6494 + u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
6495 u32 *cmd;
6496 u32 chunk_len;
6497 int i;
6498 diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
6499 index d3346d23963b..89b3befc7155 100644
6500 --- a/drivers/pci/bus.c
6501 +++ b/drivers/pci/bus.c
6502 @@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
6503 type_mask |= IORESOURCE_TYPE_BITS;
6504
6505 pci_bus_for_each_resource(bus, r, i) {
6506 + resource_size_t min_used = min;
6507 +
6508 if (!r)
6509 continue;
6510
6511 @@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
6512 * overrides "min".
6513 */
6514 if (avail.start)
6515 - min = avail.start;
6516 + min_used = avail.start;
6517
6518 max = avail.end;
6519
6520 /* Ok, try it out.. */
6521 - ret = allocate_resource(r, res, size, min, max,
6522 + ret = allocate_resource(r, res, size, min_used, max,
6523 align, alignf, alignf_data);
6524 if (ret == 0)
6525 return 0;
6526 diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
6527 index 2d57e19a2cd4..b5ae685aec61 100644
6528 --- a/drivers/pci/host/pci-dra7xx.c
6529 +++ b/drivers/pci/host/pci-dra7xx.c
6530 @@ -289,7 +289,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
6531 }
6532
6533 ret = devm_request_irq(&pdev->dev, pp->irq,
6534 - dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
6535 + dra7xx_pcie_msi_irq_handler,
6536 + IRQF_SHARED | IRQF_NO_THREAD,
6537 "dra7-pcie-msi", pp);
6538 if (ret) {
6539 dev_err(&pdev->dev, "failed to request irq\n");
6540 diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
6541 index c139237e0e52..5b2b83cb67ad 100644
6542 --- a/drivers/pci/host/pci-exynos.c
6543 +++ b/drivers/pci/host/pci-exynos.c
6544 @@ -527,7 +527,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
6545
6546 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
6547 exynos_pcie_msi_irq_handler,
6548 - IRQF_SHARED, "exynos-pcie", pp);
6549 + IRQF_SHARED | IRQF_NO_THREAD,
6550 + "exynos-pcie", pp);
6551 if (ret) {
6552 dev_err(&pdev->dev, "failed to request msi irq\n");
6553 return ret;
6554 diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
6555 index fdb95367721e..ebcb0ac8512b 100644
6556 --- a/drivers/pci/host/pci-imx6.c
6557 +++ b/drivers/pci/host/pci-imx6.c
6558 @@ -534,7 +534,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
6559
6560 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
6561 imx6_pcie_msi_handler,
6562 - IRQF_SHARED, "mx6-pcie-msi", pp);
6563 + IRQF_SHARED | IRQF_NO_THREAD,
6564 + "mx6-pcie-msi", pp);
6565 if (ret) {
6566 dev_err(&pdev->dev, "failed to request MSI irq\n");
6567 return -ENODEV;
6568 diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
6569 index 00e92720d7f7..d9789d6ba47d 100644
6570 --- a/drivers/pci/host/pci-tegra.c
6571 +++ b/drivers/pci/host/pci-tegra.c
6572 @@ -1304,7 +1304,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
6573
6574 msi->irq = err;
6575
6576 - err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
6577 + err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
6578 tegra_msi_irq_chip.name, pcie);
6579 if (err < 0) {
6580 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
6581 diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
6582 index c086210f2ffd..56ce5640d91a 100644
6583 --- a/drivers/pci/host/pcie-rcar.c
6584 +++ b/drivers/pci/host/pcie-rcar.c
6585 @@ -695,14 +695,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
6586
6587 /* Two irqs are for MSI, but they are also used for non-MSI irqs */
6588 err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
6589 - IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
6590 + IRQF_SHARED | IRQF_NO_THREAD,
6591 + rcar_msi_irq_chip.name, pcie);
6592 if (err < 0) {
6593 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
6594 goto err;
6595 }
6596
6597 err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
6598 - IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
6599 + IRQF_SHARED | IRQF_NO_THREAD,
6600 + rcar_msi_irq_chip.name, pcie);
6601 if (err < 0) {
6602 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
6603 goto err;
6604 diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
6605 index 020d78890719..4ea793eaa2bd 100644
6606 --- a/drivers/pci/host/pcie-spear13xx.c
6607 +++ b/drivers/pci/host/pcie-spear13xx.c
6608 @@ -281,7 +281,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
6609 return -ENODEV;
6610 }
6611 ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
6612 - IRQF_SHARED, "spear1340-pcie", pp);
6613 + IRQF_SHARED | IRQF_NO_THREAD,
6614 + "spear1340-pcie", pp);
6615 if (ret) {
6616 dev_err(dev, "failed to request irq %d\n", pp->irq);
6617 return ret;
6618 diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
6619 index f1a06a091ccb..577fe5b2f617 100644
6620 --- a/drivers/pci/host/pcie-xilinx.c
6621 +++ b/drivers/pci/host/pcie-xilinx.c
6622 @@ -776,7 +776,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
6623
6624 port->irq = irq_of_parse_and_map(node, 0);
6625 err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
6626 - IRQF_SHARED, "xilinx-pcie", port);
6627 + IRQF_SHARED | IRQF_NO_THREAD,
6628 + "xilinx-pcie", port);
6629 if (err) {
6630 dev_err(dev, "unable to request irq %d\n", port->irq);
6631 return err;
6632 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
6633 index bcb90e4888dd..b60309ee80ed 100644
6634 --- a/drivers/pci/hotplug/acpiphp_glue.c
6635 +++ b/drivers/pci/hotplug/acpiphp_glue.c
6636 @@ -954,8 +954,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
6637 {
6638 pci_lock_rescan_remove();
6639
6640 - if (slot->flags & SLOT_IS_GOING_AWAY)
6641 + if (slot->flags & SLOT_IS_GOING_AWAY) {
6642 + pci_unlock_rescan_remove();
6643 return -ENODEV;
6644 + }
6645
6646 /* configure all functions */
6647 if (!(slot->flags & SLOT_ENABLED))
6648 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
6649 index cd78f1166b33..9a92d13e3917 100644
6650 --- a/drivers/platform/x86/ideapad-laptop.c
6651 +++ b/drivers/platform/x86/ideapad-laptop.c
6652 @@ -845,6 +845,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
6653 },
6654 },
6655 {
6656 + .ident = "Lenovo ideapad Y700-17ISK",
6657 + .matches = {
6658 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6659 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
6660 + },
6661 + },
6662 + {
6663 .ident = "Lenovo Yoga 2 11 / 13 / Pro",
6664 .matches = {
6665 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6666 @@ -865,6 +872,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
6667 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"),
6668 },
6669 },
6670 + {
6671 + .ident = "Lenovo Yoga 700",
6672 + .matches = {
6673 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6674 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
6675 + },
6676 + },
6677 + {
6678 + .ident = "Lenovo Yoga 900",
6679 + .matches = {
6680 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
6681 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
6682 + },
6683 + },
6684 {}
6685 };
6686
6687 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
6688 index 9f77d23239a2..64ed88a67e6e 100644
6689 --- a/drivers/scsi/scsi_devinfo.c
6690 +++ b/drivers/scsi/scsi_devinfo.c
6691 @@ -227,6 +227,7 @@ static struct {
6692 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
6693 {"Promise", "", NULL, BLIST_SPARSELUN},
6694 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
6695 + {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
6696 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
6697 {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
6698 {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
6699 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
6700 index 11ea52b2c36b..c66fd23b3c13 100644
6701 --- a/drivers/scsi/sd.c
6702 +++ b/drivers/scsi/sd.c
6703 @@ -3141,8 +3141,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
6704 struct scsi_disk *sdkp = dev_get_drvdata(dev);
6705 int ret = 0;
6706
6707 - if (!sdkp)
6708 - return 0; /* this can happen */
6709 + if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
6710 + return 0;
6711
6712 if (sdkp->WCE && sdkp->media_present) {
6713 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
6714 @@ -3181,6 +3181,9 @@ static int sd_resume(struct device *dev)
6715 {
6716 struct scsi_disk *sdkp = dev_get_drvdata(dev);
6717
6718 + if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
6719 + return 0;
6720 +
6721 if (!sdkp->device->manage_start_stop)
6722 return 0;
6723
6724 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
6725 index 9d7b7db75e4b..3bbf4853733c 100644
6726 --- a/drivers/scsi/sg.c
6727 +++ b/drivers/scsi/sg.c
6728 @@ -1255,7 +1255,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
6729 }
6730
6731 sfp->mmap_called = 1;
6732 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
6733 + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
6734 vma->vm_private_data = sfp;
6735 vma->vm_ops = &sg_mmap_vm_ops;
6736 return 0;
6737 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
6738 index 8bd54a64efd6..64c867405ad4 100644
6739 --- a/drivers/scsi/sr.c
6740 +++ b/drivers/scsi/sr.c
6741 @@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
6742 {
6743 struct scsi_cd *cd = dev_get_drvdata(dev);
6744
6745 + if (!cd) /* E.g.: runtime suspend following sr_remove() */
6746 + return 0;
6747 +
6748 if (cd->media_present)
6749 return -EBUSY;
6750 else
6751 @@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
6752 scsi_autopm_get_device(cd->device);
6753
6754 del_gendisk(cd->disk);
6755 + dev_set_drvdata(dev, NULL);
6756
6757 mutex_lock(&sr_ref_mutex);
6758 kref_put(&cd->kref, sr_kref_release);
6759 diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
6760 index a0315701c7d9..ed68b2cfe031 100644
6761 --- a/drivers/staging/speakup/selection.c
6762 +++ b/drivers/staging/speakup/selection.c
6763 @@ -141,7 +141,9 @@ static void __speakup_paste_selection(struct work_struct *work)
6764 struct tty_ldisc *ld;
6765 DECLARE_WAITQUEUE(wait, current);
6766
6767 - ld = tty_ldisc_ref_wait(tty);
6768 + ld = tty_ldisc_ref(tty);
6769 + if (!ld)
6770 + goto tty_unref;
6771 tty_buffer_lock_exclusive(&vc->port);
6772
6773 add_wait_queue(&vc->paste_wait, &wait);
6774 @@ -161,6 +163,7 @@ static void __speakup_paste_selection(struct work_struct *work)
6775
6776 tty_buffer_unlock_exclusive(&vc->port);
6777 tty_ldisc_deref(ld);
6778 +tty_unref:
6779 tty_kref_put(tty);
6780 }
6781
6782 diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
6783 index 1d9d51bdf517..f41a7da1949d 100644
6784 --- a/drivers/staging/speakup/serialio.c
6785 +++ b/drivers/staging/speakup/serialio.c
6786 @@ -6,6 +6,11 @@
6787 #include "spk_priv.h"
6788 #include "serialio.h"
6789
6790 +#include <linux/serial_core.h>
6791 +/* WARNING: Do not change this to <linux/serial.h> without testing that
6792 + * SERIAL_PORT_DFNS does get defined to the appropriate value. */
6793 +#include <asm/serial.h>
6794 +
6795 #ifndef SERIAL_PORT_DFNS
6796 #define SERIAL_PORT_DFNS
6797 #endif
6798 @@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
6799 int baud = 9600, quot = 0;
6800 unsigned int cval = 0;
6801 int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
6802 - const struct old_serial_port *ser = rs_table + index;
6803 + const struct old_serial_port *ser;
6804 int err;
6805
6806 + if (index >= ARRAY_SIZE(rs_table)) {
6807 + pr_info("no port info for ttyS%d\n", index);
6808 + return NULL;
6809 + }
6810 + ser = rs_table + index;
6811 +
6812 /* Divisor, bytesize and parity */
6813 quot = ser->baud_base / baud;
6814 cval = cflag & (CSIZE | CSTOPB);
6815 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
6816 index 6f2fb546477e..5a8add721741 100644
6817 --- a/drivers/target/iscsi/iscsi_target_configfs.c
6818 +++ b/drivers/target/iscsi/iscsi_target_configfs.c
6819 @@ -1907,7 +1907,8 @@ static void lio_tpg_release_fabric_acl(
6820 }
6821
6822 /*
6823 - * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
6824 + * Called with spin_lock_irq(struct se_portal_group->session_lock) held
6825 + * or not held.
6826 *
6827 * Also, this function calls iscsit_inc_session_usage_count() on the
6828 * struct iscsi_session in question.
6829 @@ -1915,19 +1916,32 @@ static void lio_tpg_release_fabric_acl(
6830 static int lio_tpg_shutdown_session(struct se_session *se_sess)
6831 {
6832 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
6833 + struct se_portal_group *se_tpg = se_sess->se_tpg;
6834 + bool local_lock = false;
6835 +
6836 + if (!spin_is_locked(&se_tpg->session_lock)) {
6837 + spin_lock_irq(&se_tpg->session_lock);
6838 + local_lock = true;
6839 + }
6840
6841 spin_lock(&sess->conn_lock);
6842 if (atomic_read(&sess->session_fall_back_to_erl0) ||
6843 atomic_read(&sess->session_logout) ||
6844 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
6845 spin_unlock(&sess->conn_lock);
6846 + if (local_lock)
6847 + spin_unlock_irq(&sess->conn_lock);
6848 return 0;
6849 }
6850 atomic_set(&sess->session_reinstatement, 1);
6851 spin_unlock(&sess->conn_lock);
6852
6853 iscsit_stop_time2retain_timer(sess);
6854 + spin_unlock_irq(&se_tpg->session_lock);
6855 +
6856 iscsit_stop_session(sess, 1, 1);
6857 + if (!local_lock)
6858 + spin_lock_irq(&se_tpg->session_lock);
6859
6860 return 1;
6861 }
6862 diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
6863 index 5a0f12d08e8b..ec4ea5940bf7 100644
6864 --- a/drivers/thermal/step_wise.c
6865 +++ b/drivers/thermal/step_wise.c
6866 @@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
6867 next_target = instance->target;
6868 dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
6869
6870 + if (!instance->initialized) {
6871 + if (throttle) {
6872 + next_target = (cur_state + 1) >= instance->upper ?
6873 + instance->upper :
6874 + ((cur_state + 1) < instance->lower ?
6875 + instance->lower : (cur_state + 1));
6876 + } else {
6877 + next_target = THERMAL_NO_TARGET;
6878 + }
6879 +
6880 + return next_target;
6881 + }
6882 +
6883 switch (trend) {
6884 case THERMAL_TREND_RAISING:
6885 if (throttle) {
6886 @@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
6887 dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
6888 old_target, (int)instance->target);
6889
6890 - if (old_target == instance->target)
6891 + if (instance->initialized && old_target == instance->target)
6892 continue;
6893
6894 /* Activate a passive thermal instance */
6895 @@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
6896 instance->target == THERMAL_NO_TARGET)
6897 update_passive_instance(tz, trip_type, -1);
6898
6899 -
6900 + instance->initialized = true;
6901 instance->cdev->updated = false; /* cdev needs update */
6902 }
6903
6904 diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
6905 index 4108db7e10c1..a3282bfb343d 100644
6906 --- a/drivers/thermal/thermal_core.c
6907 +++ b/drivers/thermal/thermal_core.c
6908 @@ -37,6 +37,7 @@
6909 #include <linux/of.h>
6910 #include <net/netlink.h>
6911 #include <net/genetlink.h>
6912 +#include <linux/suspend.h>
6913
6914 #define CREATE_TRACE_POINTS
6915 #include <trace/events/thermal.h>
6916 @@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
6917 static DEFINE_MUTEX(thermal_list_lock);
6918 static DEFINE_MUTEX(thermal_governor_lock);
6919
6920 +static atomic_t in_suspend;
6921 +
6922 static struct thermal_governor *def_governor;
6923
6924 static struct thermal_governor *__find_governor(const char *name)
6925 @@ -471,14 +474,31 @@ static void update_temperature(struct thermal_zone_device *tz)
6926 mutex_unlock(&tz->lock);
6927
6928 trace_thermal_temperature(tz);
6929 - dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
6930 - tz->last_temperature, tz->temperature);
6931 + if (tz->last_temperature == THERMAL_TEMP_INVALID)
6932 + dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
6933 + tz->temperature);
6934 + else
6935 + dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
6936 + tz->last_temperature, tz->temperature);
6937 +}
6938 +
6939 +static void thermal_zone_device_reset(struct thermal_zone_device *tz)
6940 +{
6941 + struct thermal_instance *pos;
6942 +
6943 + tz->temperature = THERMAL_TEMP_INVALID;
6944 + tz->passive = 0;
6945 + list_for_each_entry(pos, &tz->thermal_instances, tz_node)
6946 + pos->initialized = false;
6947 }
6948
6949 void thermal_zone_device_update(struct thermal_zone_device *tz)
6950 {
6951 int count;
6952
6953 + if (atomic_read(&in_suspend))
6954 + return;
6955 +
6956 if (!tz->ops->get_temp)
6957 return;
6958
6959 @@ -1016,6 +1036,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
6960 if (!result) {
6961 list_add_tail(&dev->tz_node, &tz->thermal_instances);
6962 list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
6963 + atomic_set(&tz->need_update, 1);
6964 }
6965 mutex_unlock(&cdev->lock);
6966 mutex_unlock(&tz->lock);
6967 @@ -1122,6 +1143,7 @@ __thermal_cooling_device_register(struct device_node *np,
6968 const struct thermal_cooling_device_ops *ops)
6969 {
6970 struct thermal_cooling_device *cdev;
6971 + struct thermal_zone_device *pos = NULL;
6972 int result;
6973
6974 if (type && strlen(type) >= THERMAL_NAME_LENGTH)
6975 @@ -1166,6 +1188,12 @@ __thermal_cooling_device_register(struct device_node *np,
6976 /* Update binding information for 'this' new cdev */
6977 bind_cdev(cdev);
6978
6979 + mutex_lock(&thermal_list_lock);
6980 + list_for_each_entry(pos, &thermal_tz_list, node)
6981 + if (atomic_cmpxchg(&pos->need_update, 1, 0))
6982 + thermal_zone_device_update(pos);
6983 + mutex_unlock(&thermal_list_lock);
6984 +
6985 return cdev;
6986 }
6987
6988 @@ -1496,6 +1524,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
6989 tz->trips = trips;
6990 tz->passive_delay = passive_delay;
6991 tz->polling_delay = polling_delay;
6992 + /* A new thermal zone needs to be updated anyway. */
6993 + atomic_set(&tz->need_update, 1);
6994
6995 dev_set_name(&tz->device, "thermal_zone%d", tz->id);
6996 result = device_register(&tz->device);
6997 @@ -1576,7 +1606,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
6998 if (!tz->ops->get_temp)
6999 thermal_zone_device_set_polling(tz, 0);
7000
7001 - thermal_zone_device_update(tz);
7002 + thermal_zone_device_reset(tz);
7003 + /* Update the new thermal zone and mark it as already updated. */
7004 + if (atomic_cmpxchg(&tz->need_update, 1, 0))
7005 + thermal_zone_device_update(tz);
7006
7007 return tz;
7008
7009 @@ -1810,6 +1843,36 @@ static void thermal_unregister_governors(void)
7010 thermal_gov_user_space_unregister();
7011 }
7012
7013 +static int thermal_pm_notify(struct notifier_block *nb,
7014 + unsigned long mode, void *_unused)
7015 +{
7016 + struct thermal_zone_device *tz;
7017 +
7018 + switch (mode) {
7019 + case PM_HIBERNATION_PREPARE:
7020 + case PM_RESTORE_PREPARE:
7021 + case PM_SUSPEND_PREPARE:
7022 + atomic_set(&in_suspend, 1);
7023 + break;
7024 + case PM_POST_HIBERNATION:
7025 + case PM_POST_RESTORE:
7026 + case PM_POST_SUSPEND:
7027 + atomic_set(&in_suspend, 0);
7028 + list_for_each_entry(tz, &thermal_tz_list, node) {
7029 + thermal_zone_device_reset(tz);
7030 + thermal_zone_device_update(tz);
7031 + }
7032 + break;
7033 + default:
7034 + break;
7035 + }
7036 + return 0;
7037 +}
7038 +
7039 +static struct notifier_block thermal_pm_nb = {
7040 + .notifier_call = thermal_pm_notify,
7041 +};
7042 +
7043 static int __init thermal_init(void)
7044 {
7045 int result;
7046 @@ -1830,6 +1893,11 @@ static int __init thermal_init(void)
7047 if (result)
7048 goto exit_netlink;
7049
7050 + result = register_pm_notifier(&thermal_pm_nb);
7051 + if (result)
7052 + pr_warn("Thermal: Can not register suspend notifier, return %d\n",
7053 + result);
7054 +
7055 return 0;
7056
7057 exit_netlink:
7058 @@ -1849,6 +1917,7 @@ error:
7059
7060 static void __exit thermal_exit(void)
7061 {
7062 + unregister_pm_notifier(&thermal_pm_nb);
7063 of_thermal_destroy_zones();
7064 genetlink_exit();
7065 class_unregister(&thermal_class);
7066 diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
7067 index 8e391812e503..dce86ee8e9d7 100644
7068 --- a/drivers/thermal/thermal_core.h
7069 +++ b/drivers/thermal/thermal_core.h
7070 @@ -41,6 +41,7 @@ struct thermal_instance {
7071 struct thermal_zone_device *tz;
7072 struct thermal_cooling_device *cdev;
7073 int trip;
7074 + bool initialized;
7075 unsigned long upper; /* Highest cooling state for this trip point */
7076 unsigned long lower; /* Lowest cooling state for this trip point */
7077 unsigned long target; /* expected cooling state */
7078 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
7079 index e5edf45e9d4c..33088c70ef3b 100644
7080 --- a/drivers/tty/n_tty.c
7081 +++ b/drivers/tty/n_tty.c
7082 @@ -258,16 +258,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
7083
7084 static void n_tty_check_unthrottle(struct tty_struct *tty)
7085 {
7086 - if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
7087 - tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
7088 + if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
7089 if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
7090 return;
7091 if (!tty->count)
7092 return;
7093 n_tty_kick_worker(tty);
7094 - n_tty_write_wakeup(tty->link);
7095 - if (waitqueue_active(&tty->link->write_wait))
7096 - wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
7097 + tty_wakeup(tty->link);
7098 return;
7099 }
7100
7101 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
7102 index eb8adc2e68c1..2fd163b75665 100644
7103 --- a/drivers/tty/serial/8250/8250_pci.c
7104 +++ b/drivers/tty/serial/8250/8250_pci.c
7105 @@ -1380,6 +1380,9 @@ ce4100_serial_setup(struct serial_private *priv,
7106 #define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
7107 #define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
7108
7109 +#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
7110 +#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
7111 +
7112 #define BYT_PRV_CLK 0x800
7113 #define BYT_PRV_CLK_EN (1 << 0)
7114 #define BYT_PRV_CLK_M_VAL_SHIFT 1
7115 @@ -1458,11 +1461,13 @@ byt_serial_setup(struct serial_private *priv,
7116 switch (pdev->device) {
7117 case PCI_DEVICE_ID_INTEL_BYT_UART1:
7118 case PCI_DEVICE_ID_INTEL_BSW_UART1:
7119 + case PCI_DEVICE_ID_INTEL_BDW_UART1:
7120 rx_param->src_id = 3;
7121 tx_param->dst_id = 2;
7122 break;
7123 case PCI_DEVICE_ID_INTEL_BYT_UART2:
7124 case PCI_DEVICE_ID_INTEL_BSW_UART2:
7125 + case PCI_DEVICE_ID_INTEL_BDW_UART2:
7126 rx_param->src_id = 5;
7127 tx_param->dst_id = 4;
7128 break;
7129 @@ -2154,6 +2159,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
7130 .subdevice = PCI_ANY_ID,
7131 .setup = byt_serial_setup,
7132 },
7133 + {
7134 + .vendor = PCI_VENDOR_ID_INTEL,
7135 + .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
7136 + .subvendor = PCI_ANY_ID,
7137 + .subdevice = PCI_ANY_ID,
7138 + .setup = byt_serial_setup,
7139 + },
7140 + {
7141 + .vendor = PCI_VENDOR_ID_INTEL,
7142 + .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
7143 + .subvendor = PCI_ANY_ID,
7144 + .subdevice = PCI_ANY_ID,
7145 + .setup = byt_serial_setup,
7146 + },
7147 /*
7148 * ITE
7149 */
7150 @@ -5603,6 +5622,16 @@ static struct pci_device_id serial_pci_tbl[] = {
7151 PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
7152 pbn_byt },
7153
7154 + /* Intel Broadwell */
7155 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
7156 + PCI_ANY_ID, PCI_ANY_ID,
7157 + PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
7158 + pbn_byt },
7159 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
7160 + PCI_ANY_ID, PCI_ANY_ID,
7161 + PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
7162 + pbn_byt },
7163 +
7164 /*
7165 * Intel Penwell
7166 */
7167 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
7168 index 5a5c1ab5a375..be96970646a9 100644
7169 --- a/drivers/tty/tty_io.c
7170 +++ b/drivers/tty/tty_io.c
7171 @@ -2670,6 +2670,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
7172 }
7173
7174 /**
7175 + * tiocgetd - get line discipline
7176 + * @tty: tty device
7177 + * @p: pointer to user data
7178 + *
7179 + * Retrieves the line discipline id directly from the ldisc.
7180 + *
7181 + * Locking: waits for ldisc reference (in case the line discipline
7182 + * is changing or the tty is being hungup)
7183 + */
7184 +
7185 +static int tiocgetd(struct tty_struct *tty, int __user *p)
7186 +{
7187 + struct tty_ldisc *ld;
7188 + int ret;
7189 +
7190 + ld = tty_ldisc_ref_wait(tty);
7191 + ret = put_user(ld->ops->num, p);
7192 + tty_ldisc_deref(ld);
7193 + return ret;
7194 +}
7195 +
7196 +/**
7197 * send_break - performed time break
7198 * @tty: device to break on
7199 * @duration: timeout in mS
7200 @@ -2895,7 +2917,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7201 case TIOCGSID:
7202 return tiocgsid(tty, real_tty, p);
7203 case TIOCGETD:
7204 - return put_user(tty->ldisc->ops->num, (int __user *)p);
7205 + return tiocgetd(tty, p);
7206 case TIOCSETD:
7207 return tiocsetd(tty, p);
7208 case TIOCVHANGUP:
7209 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
7210 index 0fe15aec7ed0..df3deb000a80 100644
7211 --- a/drivers/usb/class/cdc-acm.c
7212 +++ b/drivers/usb/class/cdc-acm.c
7213 @@ -432,7 +432,8 @@ static void acm_read_bulk_callback(struct urb *urb)
7214 set_bit(rb->index, &acm->read_urbs_free);
7215 dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
7216 __func__, status);
7217 - return;
7218 + if ((status != -ENOENT) || (urb->actual_length == 0))
7219 + return;
7220 }
7221
7222 usb_mark_last_busy(acm->dev);
7223 @@ -1414,6 +1415,8 @@ made_compressed_probe:
7224 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
7225 NULL, acm->writesize, acm_write_bulk, snd);
7226 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
7227 + if (quirks & SEND_ZERO_PACKET)
7228 + snd->urb->transfer_flags |= URB_ZERO_PACKET;
7229 snd->instance = acm;
7230 }
7231
7232 @@ -1848,6 +1851,11 @@ static const struct usb_device_id acm_ids[] = {
7233 },
7234 #endif
7235
7236 + /*Samsung phone in firmware update mode */
7237 + { USB_DEVICE(0x04e8, 0x685d),
7238 + .driver_info = IGNORE_DEVICE,
7239 + },
7240 +
7241 /* Exclude Infineon Flash Loader utility */
7242 { USB_DEVICE(0x058b, 0x0041),
7243 .driver_info = IGNORE_DEVICE,
7244 @@ -1871,6 +1879,10 @@ static const struct usb_device_id acm_ids[] = {
7245 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
7246 USB_CDC_ACM_PROTO_AT_CDMA) },
7247
7248 + { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
7249 + .driver_info = SEND_ZERO_PACKET,
7250 + },
7251 +
7252 { }
7253 };
7254
7255 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
7256 index b3b6c9db6fe5..ac830e0ae38b 100644
7257 --- a/drivers/usb/class/cdc-acm.h
7258 +++ b/drivers/usb/class/cdc-acm.h
7259 @@ -134,3 +134,4 @@ struct acm {
7260 #define IGNORE_DEVICE BIT(5)
7261 #define QUIRK_CONTROL_LINE_STATE BIT(6)
7262 #define CLEAR_HALT_CONDITIONS BIT(7)
7263 +#define SEND_ZERO_PACKET BIT(8)
7264 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7265 index ee11b301f3da..e56ad83b35a4 100644
7266 --- a/drivers/usb/core/hub.c
7267 +++ b/drivers/usb/core/hub.c
7268 @@ -5346,7 +5346,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
7269 }
7270
7271 bos = udev->bos;
7272 - udev->bos = NULL;
7273
7274 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
7275
7276 @@ -5439,8 +5438,11 @@ done:
7277 usb_set_usb2_hardware_lpm(udev, 1);
7278 usb_unlocked_enable_lpm(udev);
7279 usb_enable_ltm(udev);
7280 - usb_release_bos_descriptor(udev);
7281 - udev->bos = bos;
7282 + /* release the new BOS descriptor allocated by hub_port_init() */
7283 + if (udev->bos != bos) {
7284 + usb_release_bos_descriptor(udev);
7285 + udev->bos = bos;
7286 + }
7287 return 0;
7288
7289 re_enumerate:
7290 diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
7291 index 65b0b6a58599..da03d8b258dd 100644
7292 --- a/drivers/usb/host/Makefile
7293 +++ b/drivers/usb/host/Makefile
7294 @@ -26,9 +26,6 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci/
7295
7296 obj-$(CONFIG_PCI) += pci-quirks.o
7297
7298 -obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
7299 -obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
7300 -
7301 obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
7302 obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
7303 obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
7304 @@ -63,6 +60,8 @@ obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
7305 obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
7306 obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
7307 obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
7308 +obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
7309 +obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
7310 obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
7311 obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
7312 obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
7313 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
7314 index 7e5c90eebb9c..3ff5fcc7c94b 100644
7315 --- a/drivers/usb/host/xhci-pci.c
7316 +++ b/drivers/usb/host/xhci-pci.c
7317 @@ -23,10 +23,17 @@
7318 #include <linux/pci.h>
7319 #include <linux/slab.h>
7320 #include <linux/module.h>
7321 +#include <linux/acpi.h>
7322
7323 #include "xhci.h"
7324 #include "xhci-trace.h"
7325
7326 +#define SSIC_PORT_NUM 2
7327 +#define SSIC_PORT_CFG2 0x880c
7328 +#define SSIC_PORT_CFG2_OFFSET 0x30
7329 +#define PROG_DONE (1 << 30)
7330 +#define SSIC_PORT_UNUSED (1 << 31)
7331 +
7332 /* Device for a quirk */
7333 #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
7334 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
7335 @@ -40,6 +47,7 @@
7336 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
7337 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
7338 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
7339 +#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
7340
7341 static const char hcd_name[] = "xhci_hcd";
7342
7343 @@ -140,9 +148,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
7344 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
7345 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
7346 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
7347 - pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
7348 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
7349 + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
7350 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
7351 }
7352 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
7353 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
7354 + xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
7355 + }
7356 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
7357 pdev->device == PCI_DEVICE_ID_EJ168) {
7358 xhci->quirks |= XHCI_RESET_ON_RESUME;
7359 @@ -169,20 +182,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
7360 "QUIRK: Resetting on resume");
7361 }
7362
7363 -/*
7364 - * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
7365 - * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
7366 - */
7367 -static void xhci_pme_quirk(struct xhci_hcd *xhci)
7368 +#ifdef CONFIG_ACPI
7369 +static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
7370 {
7371 - u32 val;
7372 - void __iomem *reg;
7373 -
7374 - reg = (void __iomem *) xhci->cap_regs + 0x80a4;
7375 - val = readl(reg);
7376 - writel(val | BIT(28), reg);
7377 - readl(reg);
7378 + static const u8 intel_dsm_uuid[] = {
7379 + 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
7380 + 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
7381 + };
7382 + acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
7383 }
7384 +#else
7385 + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
7386 +#endif /* CONFIG_ACPI */
7387
7388 /* called during probe() after chip reset completes */
7389 static int xhci_pci_setup(struct usb_hcd *hcd)
7390 @@ -263,6 +274,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
7391 HCC_MAX_PSA(xhci->hcc_params) >= 4)
7392 xhci->shared_hcd->can_do_streams = 1;
7393
7394 + if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
7395 + xhci_pme_acpi_rtd3_enable(dev);
7396 +
7397 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
7398 pm_runtime_put_noidle(&dev->dev);
7399
7400 @@ -296,10 +310,65 @@ static void xhci_pci_remove(struct pci_dev *dev)
7401 }
7402
7403 #ifdef CONFIG_PM
7404 +/*
7405 + * In some Intel xHCI controllers, in order to get D3 working,
7406 + * through a vendor specific SSIC CONFIG register at offset 0x883c,
7407 + * SSIC PORT need to be marked as "unused" before putting xHCI
7408 + * into D3. After D3 exit, the SSIC port need to be marked as "used".
7409 + * Without this change, xHCI might not enter D3 state.
7410 + */
7411 +static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
7412 +{
7413 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7414 + u32 val;
7415 + void __iomem *reg;
7416 + int i;
7417 +
7418 + for (i = 0; i < SSIC_PORT_NUM; i++) {
7419 + reg = (void __iomem *) xhci->cap_regs +
7420 + SSIC_PORT_CFG2 +
7421 + i * SSIC_PORT_CFG2_OFFSET;
7422 +
7423 + /* Notify SSIC that SSIC profile programming is not done. */
7424 + val = readl(reg) & ~PROG_DONE;
7425 + writel(val, reg);
7426 +
7427 + /* Mark SSIC port as unused(suspend) or used(resume) */
7428 + val = readl(reg);
7429 + if (suspend)
7430 + val |= SSIC_PORT_UNUSED;
7431 + else
7432 + val &= ~SSIC_PORT_UNUSED;
7433 + writel(val, reg);
7434 +
7435 + /* Notify SSIC that SSIC profile programming is done */
7436 + val = readl(reg) | PROG_DONE;
7437 + writel(val, reg);
7438 + readl(reg);
7439 + }
7440 +}
7441 +
7442 +/*
7443 + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
7444 + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
7445 + */
7446 +static void xhci_pme_quirk(struct usb_hcd *hcd)
7447 +{
7448 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7449 + void __iomem *reg;
7450 + u32 val;
7451 +
7452 + reg = (void __iomem *) xhci->cap_regs + 0x80a4;
7453 + val = readl(reg);
7454 + writel(val | BIT(28), reg);
7455 + readl(reg);
7456 +}
7457 +
7458 static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
7459 {
7460 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
7461 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
7462 + int ret;
7463
7464 /*
7465 * Systems with the TI redriver that loses port status change events
7466 @@ -309,9 +378,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
7467 pdev->no_d3cold = true;
7468
7469 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
7470 - xhci_pme_quirk(xhci);
7471 + xhci_pme_quirk(hcd);
7472 +
7473 + if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
7474 + xhci_ssic_port_unused_quirk(hcd, true);
7475
7476 - return xhci_suspend(xhci, do_wakeup);
7477 + ret = xhci_suspend(xhci, do_wakeup);
7478 + if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
7479 + xhci_ssic_port_unused_quirk(hcd, false);
7480 +
7481 + return ret;
7482 }
7483
7484 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
7485 @@ -341,8 +417,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
7486 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
7487 usb_enable_intel_xhci_ports(pdev);
7488
7489 + if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
7490 + xhci_ssic_port_unused_quirk(hcd, false);
7491 +
7492 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
7493 - xhci_pme_quirk(xhci);
7494 + xhci_pme_quirk(hcd);
7495
7496 retval = xhci_resume(xhci, hibernated);
7497 return retval;
7498 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
7499 index 41d7a05f8af4..e6d858a49d04 100644
7500 --- a/drivers/usb/host/xhci-ring.c
7501 +++ b/drivers/usb/host/xhci-ring.c
7502 @@ -3001,21 +3001,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7503 }
7504
7505 /*
7506 - * The TD size is the number of bytes remaining in the TD (including this TRB),
7507 - * right shifted by 10.
7508 - * It must fit in bits 21:17, so it can't be bigger than 31.
7509 - */
7510 -static u32 xhci_td_remainder(unsigned int remainder)
7511 -{
7512 - u32 max = (1 << (21 - 17 + 1)) - 1;
7513 -
7514 - if ((remainder >> 10) >= max)
7515 - return max << 17;
7516 - else
7517 - return (remainder >> 10) << 17;
7518 -}
7519 -
7520 -/*
7521 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
7522 * packets remaining in the TD (*not* including this TRB).
7523 *
7524 @@ -3027,30 +3012,36 @@ static u32 xhci_td_remainder(unsigned int remainder)
7525 *
7526 * TD size = total_packet_count - packets_transferred
7527 *
7528 - * It must fit in bits 21:17, so it can't be bigger than 31.
7529 + * For xHCI 0.96 and older, TD size field should be the remaining bytes
7530 + * including this TRB, right shifted by 10
7531 + *
7532 + * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
7533 + * This is taken care of in the TRB_TD_SIZE() macro
7534 + *
7535 * The last TRB in a TD must have the TD size set to zero.
7536 */
7537 -static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
7538 - unsigned int total_packet_count, struct urb *urb,
7539 - unsigned int num_trbs_left)
7540 +static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
7541 + int trb_buff_len, unsigned int td_total_len,
7542 + struct urb *urb, unsigned int num_trbs_left)
7543 {
7544 - int packets_transferred;
7545 + u32 maxp, total_packet_count;
7546 +
7547 + if (xhci->hci_version < 0x100)
7548 + return ((td_total_len - transferred) >> 10);
7549 +
7550 + maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
7551 + total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
7552
7553 /* One TRB with a zero-length data packet. */
7554 - if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
7555 + if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
7556 + trb_buff_len == td_total_len)
7557 return 0;
7558
7559 - /* All the TRB queueing functions don't count the current TRB in
7560 - * running_total.
7561 - */
7562 - packets_transferred = (running_total + trb_buff_len) /
7563 - GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
7564 -
7565 - if ((total_packet_count - packets_transferred) > 31)
7566 - return 31 << 17;
7567 - return (total_packet_count - packets_transferred) << 17;
7568 + /* Queueing functions don't count the current TRB into transferred */
7569 + return (total_packet_count - ((transferred + trb_buff_len) / maxp));
7570 }
7571
7572 +
7573 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7574 struct urb *urb, int slot_id, unsigned int ep_index)
7575 {
7576 @@ -3172,17 +3163,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7577 }
7578
7579 /* Set the TRB length, TD size, and interrupter fields. */
7580 - if (xhci->hci_version < 0x100) {
7581 - remainder = xhci_td_remainder(
7582 - urb->transfer_buffer_length -
7583 - running_total);
7584 - } else {
7585 - remainder = xhci_v1_0_td_remainder(running_total,
7586 - trb_buff_len, total_packet_count, urb,
7587 - num_trbs - 1);
7588 - }
7589 + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
7590 + urb->transfer_buffer_length,
7591 + urb, num_trbs - 1);
7592 +
7593 length_field = TRB_LEN(trb_buff_len) |
7594 - remainder |
7595 + TRB_TD_SIZE(remainder) |
7596 TRB_INTR_TARGET(0);
7597
7598 if (num_trbs > 1)
7599 @@ -3345,17 +3331,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7600 field |= TRB_ISP;
7601
7602 /* Set the TRB length, TD size, and interrupter fields. */
7603 - if (xhci->hci_version < 0x100) {
7604 - remainder = xhci_td_remainder(
7605 - urb->transfer_buffer_length -
7606 - running_total);
7607 - } else {
7608 - remainder = xhci_v1_0_td_remainder(running_total,
7609 - trb_buff_len, total_packet_count, urb,
7610 - num_trbs - 1);
7611 - }
7612 + remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
7613 + urb->transfer_buffer_length,
7614 + urb, num_trbs - 1);
7615 +
7616 length_field = TRB_LEN(trb_buff_len) |
7617 - remainder |
7618 + TRB_TD_SIZE(remainder) |
7619 TRB_INTR_TARGET(0);
7620
7621 if (num_trbs > 1)
7622 @@ -3393,7 +3374,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7623 struct usb_ctrlrequest *setup;
7624 struct xhci_generic_trb *start_trb;
7625 int start_cycle;
7626 - u32 field, length_field;
7627 + u32 field, length_field, remainder;
7628 struct urb_priv *urb_priv;
7629 struct xhci_td *td;
7630
7631 @@ -3466,9 +3447,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7632 else
7633 field = TRB_TYPE(TRB_DATA);
7634
7635 + remainder = xhci_td_remainder(xhci, 0,
7636 + urb->transfer_buffer_length,
7637 + urb->transfer_buffer_length,
7638 + urb, 1);
7639 +
7640 length_field = TRB_LEN(urb->transfer_buffer_length) |
7641 - xhci_td_remainder(urb->transfer_buffer_length) |
7642 + TRB_TD_SIZE(remainder) |
7643 TRB_INTR_TARGET(0);
7644 +
7645 if (urb->transfer_buffer_length > 0) {
7646 if (setup->bRequestType & USB_DIR_IN)
7647 field |= TRB_DIR_IN;
7648 @@ -3691,17 +3678,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
7649 trb_buff_len = td_remain_len;
7650
7651 /* Set the TRB length, TD size, & interrupter fields. */
7652 - if (xhci->hci_version < 0x100) {
7653 - remainder = xhci_td_remainder(
7654 - td_len - running_total);
7655 - } else {
7656 - remainder = xhci_v1_0_td_remainder(
7657 - running_total, trb_buff_len,
7658 - total_packet_count, urb,
7659 - (trbs_per_td - j - 1));
7660 - }
7661 + remainder = xhci_td_remainder(xhci, running_total,
7662 + trb_buff_len, td_len,
7663 + urb, trbs_per_td - j - 1);
7664 +
7665 length_field = TRB_LEN(trb_buff_len) |
7666 - remainder |
7667 + TRB_TD_SIZE(remainder) |
7668 TRB_INTR_TARGET(0);
7669
7670 queue_trb(xhci, ep_ring, more_trbs_coming,
7671 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
7672 index f6bb118e4501..910f7fac031f 100644
7673 --- a/drivers/usb/host/xhci.c
7674 +++ b/drivers/usb/host/xhci.c
7675 @@ -1559,7 +1559,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
7676 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
7677 "HW died, freeing TD.");
7678 urb_priv = urb->hcpriv;
7679 - for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
7680 + for (i = urb_priv->td_cnt;
7681 + i < urb_priv->length && xhci->devs[urb->dev->slot_id];
7682 + i++) {
7683 td = urb_priv->td[i];
7684 if (!list_empty(&td->td_list))
7685 list_del_init(&td->td_list);
7686 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
7687 index 0f26dd2697b6..f18cdf0ec795 100644
7688 --- a/drivers/usb/host/xhci.h
7689 +++ b/drivers/usb/host/xhci.h
7690 @@ -1130,6 +1130,8 @@ enum xhci_setup_dev {
7691 /* Normal TRB fields */
7692 /* transfer_len bitmasks - bits 0:16 */
7693 #define TRB_LEN(p) ((p) & 0x1ffff)
7694 +/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
7695 +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
7696 /* Interrupter Target - which MSI-X vector to target the completion event at */
7697 #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
7698 #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
7699 @@ -1568,6 +1570,7 @@ struct xhci_hcd {
7700 /* For controllers with a broken beyond repair streams implementation */
7701 #define XHCI_BROKEN_STREAMS (1 << 19)
7702 #define XHCI_PME_STUCK_QUIRK (1 << 20)
7703 +#define XHCI_SSIC_PORT_UNUSED (1 << 22)
7704 unsigned int num_active_eps;
7705 unsigned int limit_active_eps;
7706 /* There are two roothubs to keep track of bus suspend info for */
7707 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
7708 index 59b2126b21a3..1dd9919081f8 100644
7709 --- a/drivers/usb/serial/cp210x.c
7710 +++ b/drivers/usb/serial/cp210x.c
7711 @@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
7712 { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
7713 { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
7714 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
7715 + { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
7716 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
7717 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
7718 { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
7719 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
7720 index a5a0376bbd48..8c660ae401d8 100644
7721 --- a/drivers/usb/serial/ftdi_sio.c
7722 +++ b/drivers/usb/serial/ftdi_sio.c
7723 @@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
7724 { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
7725 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
7726 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
7727 + { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
7728 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
7729
7730 /* Papouch devices based on FTDI chip */
7731 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
7732 index 2943b97b2a83..7850071c0ae1 100644
7733 --- a/drivers/usb/serial/ftdi_sio_ids.h
7734 +++ b/drivers/usb/serial/ftdi_sio_ids.h
7735 @@ -615,6 +615,7 @@
7736 */
7737 #define RATOC_VENDOR_ID 0x0584
7738 #define RATOC_PRODUCT_ID_USB60F 0xb020
7739 +#define RATOC_PRODUCT_ID_SCU18 0xb03a
7740
7741 /*
7742 * Infineon Technologies
7743 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7744 index 4021846139c9..88540596973f 100644
7745 --- a/drivers/usb/serial/option.c
7746 +++ b/drivers/usb/serial/option.c
7747 @@ -271,6 +271,8 @@ static void option_instat_callback(struct urb *urb);
7748 #define TELIT_PRODUCT_CC864_SINGLE 0x1006
7749 #define TELIT_PRODUCT_DE910_DUAL 0x1010
7750 #define TELIT_PRODUCT_UE910_V2 0x1012
7751 +#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
7752 +#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
7753 #define TELIT_PRODUCT_LE920 0x1200
7754 #define TELIT_PRODUCT_LE910 0x1201
7755
7756 @@ -623,6 +625,16 @@ static const struct option_blacklist_info sierra_mc73xx_blacklist = {
7757 .reserved = BIT(8) | BIT(10) | BIT(11),
7758 };
7759
7760 +static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
7761 + .sendsetup = BIT(2),
7762 + .reserved = BIT(0) | BIT(1) | BIT(3),
7763 +};
7764 +
7765 +static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
7766 + .sendsetup = BIT(0),
7767 + .reserved = BIT(1) | BIT(2) | BIT(3),
7768 +};
7769 +
7770 static const struct usb_device_id option_ids[] = {
7771 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
7772 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
7773 @@ -1172,6 +1184,10 @@ static const struct usb_device_id option_ids[] = {
7774 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
7775 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
7776 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
7777 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
7778 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
7779 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
7780 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
7781 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
7782 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
7783 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
7784 @@ -1691,7 +1707,7 @@ static const struct usb_device_id option_ids[] = {
7785 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
7786 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
7787 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7788 - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
7789 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
7790 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
7791 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
7792 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
7793 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
7794 index 60afb39eb73c..337a0be89fcf 100644
7795 --- a/drivers/usb/serial/visor.c
7796 +++ b/drivers/usb/serial/visor.c
7797 @@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
7798 (serial->num_interrupt_in == 0))
7799 return 0;
7800
7801 + if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
7802 + dev_err(&serial->interface->dev, "missing endpoints\n");
7803 + return -ENODEV;
7804 + }
7805 +
7806 /*
7807 * It appears that Treos and Kyoceras want to use the
7808 * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
7809 @@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
7810 */
7811
7812 /* some sanity check */
7813 - if (serial->num_ports < 2)
7814 - return -1;
7815 + if (serial->num_bulk_out < 2) {
7816 + dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
7817 + return -ENODEV;
7818 + }
7819
7820 /* port 0 now uses the modified endpoint Address */
7821 port = serial->port[0];
7822 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
7823 index 82e80e034f25..89bac470f04e 100644
7824 --- a/drivers/virtio/virtio_balloon.c
7825 +++ b/drivers/virtio/virtio_balloon.c
7826 @@ -166,13 +166,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
7827 mutex_unlock(&vb->balloon_lock);
7828 }
7829
7830 -static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
7831 +static void release_pages_balloon(struct virtio_balloon *vb)
7832 {
7833 unsigned int i;
7834
7835 /* Find pfns pointing at start of each page, get pages and free them. */
7836 - for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
7837 - struct page *page = balloon_pfn_to_page(pfns[i]);
7838 + for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
7839 + struct page *page = balloon_pfn_to_page(vb->pfns[i]);
7840 adjust_managed_page_count(page, 1);
7841 put_page(page); /* balloon reference */
7842 }
7843 @@ -205,8 +205,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
7844 */
7845 if (vb->num_pfns != 0)
7846 tell_host(vb, vb->deflate_vq);
7847 + release_pages_balloon(vb);
7848 mutex_unlock(&vb->balloon_lock);
7849 - release_pages_by_pfn(vb->pfns, vb->num_pfns);
7850 return num_freed_pages;
7851 }
7852
7853 diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
7854 index eba1b7ac7294..14f767e8e5c5 100644
7855 --- a/drivers/virtio/virtio_pci_common.c
7856 +++ b/drivers/virtio/virtio_pci_common.c
7857 @@ -554,6 +554,7 @@ err_enable_device:
7858 static void virtio_pci_remove(struct pci_dev *pci_dev)
7859 {
7860 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
7861 + struct device *dev = get_device(&vp_dev->vdev.dev);
7862
7863 unregister_virtio_device(&vp_dev->vdev);
7864
7865 @@ -564,6 +565,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
7866
7867 pci_release_regions(pci_dev);
7868 pci_disable_device(pci_dev);
7869 + put_device(dev);
7870 }
7871
7872 static struct pci_driver virtio_pci_driver = {
7873 diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
7874 index 0ef5cc13fae2..61205e3bbefa 100644
7875 --- a/fs/btrfs/btrfs_inode.h
7876 +++ b/fs/btrfs/btrfs_inode.h
7877 @@ -192,6 +192,10 @@ struct btrfs_inode {
7878 /* File creation time. */
7879 struct timespec i_otime;
7880
7881 + /* Hook into fs_info->delayed_iputs */
7882 + struct list_head delayed_iput;
7883 + long delayed_iput_count;
7884 +
7885 struct inode vfs_inode;
7886 };
7887
7888 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
7889 index 6f364e1d8d3d..699944a07491 100644
7890 --- a/fs/btrfs/ctree.h
7891 +++ b/fs/btrfs/ctree.h
7892 @@ -1544,7 +1544,7 @@ struct btrfs_fs_info {
7893
7894 spinlock_t delayed_iput_lock;
7895 struct list_head delayed_iputs;
7896 - struct rw_semaphore delayed_iput_sem;
7897 + struct mutex cleaner_delayed_iput_mutex;
7898
7899 /* this protects tree_mod_seq_list */
7900 spinlock_t tree_mod_seq_lock;
7901 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
7902 index 2ef9a4b72d06..99e8f60c7962 100644
7903 --- a/fs/btrfs/disk-io.c
7904 +++ b/fs/btrfs/disk-io.c
7905 @@ -1772,8 +1772,11 @@ static int cleaner_kthread(void *arg)
7906 goto sleep;
7907 }
7908
7909 + mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
7910 btrfs_run_delayed_iputs(root);
7911 btrfs_delete_unused_bgs(root->fs_info);
7912 + mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
7913 +
7914 again = btrfs_clean_one_deleted_snapshot(root);
7915 mutex_unlock(&root->fs_info->cleaner_mutex);
7916
7917 @@ -2491,8 +2494,8 @@ int open_ctree(struct super_block *sb,
7918 mutex_init(&fs_info->unused_bg_unpin_mutex);
7919 mutex_init(&fs_info->reloc_mutex);
7920 mutex_init(&fs_info->delalloc_root_mutex);
7921 + mutex_init(&fs_info->cleaner_delayed_iput_mutex);
7922 seqlock_init(&fs_info->profiles_lock);
7923 - init_rwsem(&fs_info->delayed_iput_sem);
7924
7925 init_completion(&fs_info->kobj_unregister);
7926 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
7927 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
7928 index 0ec3acd14cbf..3c1938000a5d 100644
7929 --- a/fs/btrfs/extent-tree.c
7930 +++ b/fs/btrfs/extent-tree.c
7931 @@ -3985,11 +3985,12 @@ commit_trans:
7932 if (ret)
7933 return ret;
7934 /*
7935 - * make sure that all running delayed iput are
7936 - * done
7937 + * The cleaner kthread might still be doing iput
7938 + * operations. Wait for it to finish so that
7939 + * more space is released.
7940 */
7941 - down_write(&root->fs_info->delayed_iput_sem);
7942 - up_write(&root->fs_info->delayed_iput_sem);
7943 + mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
7944 + mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
7945 goto again;
7946 } else {
7947 btrfs_end_transaction(trans, root);
7948 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
7949 index 5136c73b3dce..df4e0462976e 100644
7950 --- a/fs/btrfs/inode.c
7951 +++ b/fs/btrfs/inode.c
7952 @@ -3080,56 +3080,46 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
7953 start, (size_t)(end - start + 1));
7954 }
7955
7956 -struct delayed_iput {
7957 - struct list_head list;
7958 - struct inode *inode;
7959 -};
7960 -
7961 -/* JDM: If this is fs-wide, why can't we add a pointer to
7962 - * btrfs_inode instead and avoid the allocation? */
7963 void btrfs_add_delayed_iput(struct inode *inode)
7964 {
7965 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
7966 - struct delayed_iput *delayed;
7967 + struct btrfs_inode *binode = BTRFS_I(inode);
7968
7969 if (atomic_add_unless(&inode->i_count, -1, 1))
7970 return;
7971
7972 - delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
7973 - delayed->inode = inode;
7974 -
7975 spin_lock(&fs_info->delayed_iput_lock);
7976 - list_add_tail(&delayed->list, &fs_info->delayed_iputs);
7977 + if (binode->delayed_iput_count == 0) {
7978 + ASSERT(list_empty(&binode->delayed_iput));
7979 + list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
7980 + } else {
7981 + binode->delayed_iput_count++;
7982 + }
7983 spin_unlock(&fs_info->delayed_iput_lock);
7984 }
7985
7986 void btrfs_run_delayed_iputs(struct btrfs_root *root)
7987 {
7988 - LIST_HEAD(list);
7989 struct btrfs_fs_info *fs_info = root->fs_info;
7990 - struct delayed_iput *delayed;
7991 - int empty;
7992 -
7993 - spin_lock(&fs_info->delayed_iput_lock);
7994 - empty = list_empty(&fs_info->delayed_iputs);
7995 - spin_unlock(&fs_info->delayed_iput_lock);
7996 - if (empty)
7997 - return;
7998 -
7999 - down_read(&fs_info->delayed_iput_sem);
8000
8001 spin_lock(&fs_info->delayed_iput_lock);
8002 - list_splice_init(&fs_info->delayed_iputs, &list);
8003 - spin_unlock(&fs_info->delayed_iput_lock);
8004 -
8005 - while (!list_empty(&list)) {
8006 - delayed = list_entry(list.next, struct delayed_iput, list);
8007 - list_del(&delayed->list);
8008 - iput(delayed->inode);
8009 - kfree(delayed);
8010 + while (!list_empty(&fs_info->delayed_iputs)) {
8011 + struct btrfs_inode *inode;
8012 +
8013 + inode = list_first_entry(&fs_info->delayed_iputs,
8014 + struct btrfs_inode, delayed_iput);
8015 + if (inode->delayed_iput_count) {
8016 + inode->delayed_iput_count--;
8017 + list_move_tail(&inode->delayed_iput,
8018 + &fs_info->delayed_iputs);
8019 + } else {
8020 + list_del_init(&inode->delayed_iput);
8021 + }
8022 + spin_unlock(&fs_info->delayed_iput_lock);
8023 + iput(&inode->vfs_inode);
8024 + spin_lock(&fs_info->delayed_iput_lock);
8025 }
8026 -
8027 - up_read(&root->fs_info->delayed_iput_sem);
8028 + spin_unlock(&fs_info->delayed_iput_lock);
8029 }
8030
8031 /*
8032 @@ -8890,6 +8880,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
8033 ei->dir_index = 0;
8034 ei->last_unlink_trans = 0;
8035 ei->last_log_commit = 0;
8036 + ei->delayed_iput_count = 0;
8037
8038 spin_lock_init(&ei->lock);
8039 ei->outstanding_extents = 0;
8040 @@ -8914,6 +8905,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
8041 mutex_init(&ei->delalloc_mutex);
8042 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8043 INIT_LIST_HEAD(&ei->delalloc_inodes);
8044 + INIT_LIST_HEAD(&ei->delayed_iput);
8045 RB_CLEAR_NODE(&ei->rb_node);
8046
8047 return inode;
8048 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
8049 index 174f5e1e00ab..5113b7257b45 100644
8050 --- a/fs/btrfs/volumes.c
8051 +++ b/fs/btrfs/volumes.c
8052 @@ -6322,6 +6322,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
8053 goto out_short_read;
8054
8055 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
8056 + if (!num_stripes) {
8057 + printk(KERN_ERR
8058 + "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
8059 + num_stripes, cur_offset);
8060 + ret = -EIO;
8061 + break;
8062 + }
8063 +
8064 len = btrfs_chunk_item_size(num_stripes);
8065 if (cur_offset + len > array_size)
8066 goto out_short_read;
8067 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
8068 index 7febcf2475c5..50b268483302 100644
8069 --- a/fs/cifs/cifs_debug.c
8070 +++ b/fs/cifs/cifs_debug.c
8071 @@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
8072 vaf.fmt = fmt;
8073 vaf.va = &args;
8074
8075 - pr_err("CIFS VFS: %pV", &vaf);
8076 + pr_err_ratelimited("CIFS VFS: %pV", &vaf);
8077
8078 va_end(args);
8079 }
8080 diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
8081 index f40fbaca1b2a..66cf0f9fff89 100644
8082 --- a/fs/cifs/cifs_debug.h
8083 +++ b/fs/cifs/cifs_debug.h
8084 @@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
8085 /* information message: e.g., configuration, major event */
8086 #define cifs_dbg(type, fmt, ...) \
8087 do { \
8088 - if (type == FYI) { \
8089 - if (cifsFYI & CIFS_INFO) { \
8090 - pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
8091 - } \
8092 + if (type == FYI && cifsFYI & CIFS_INFO) { \
8093 + pr_debug_ratelimited("%s: " \
8094 + fmt, __FILE__, ##__VA_ARGS__); \
8095 } else if (type == VFS) { \
8096 cifs_vfs_err(fmt, ##__VA_ARGS__); \
8097 } else if (type == NOISY && type != 0) { \
8098 - pr_debug(fmt, ##__VA_ARGS__); \
8099 + pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
8100 } \
8101 } while (0)
8102
8103 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8104 index 8383d5ea4202..de626b939811 100644
8105 --- a/fs/cifs/connect.c
8106 +++ b/fs/cifs/connect.c
8107 @@ -357,7 +357,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
8108 server->session_key.response = NULL;
8109 server->session_key.len = 0;
8110 server->lstrp = jiffies;
8111 - mutex_unlock(&server->srv_mutex);
8112
8113 /* mark submitted MIDs for retry and issue callback */
8114 INIT_LIST_HEAD(&retry_list);
8115 @@ -370,6 +369,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
8116 list_move(&mid_entry->qhead, &retry_list);
8117 }
8118 spin_unlock(&GlobalMid_Lock);
8119 + mutex_unlock(&server->srv_mutex);
8120
8121 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
8122 list_for_each_safe(tmp, tmp2, &retry_list) {
8123 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
8124 index b1eede3678a9..3634c7adf7d2 100644
8125 --- a/fs/cifs/readdir.c
8126 +++ b/fs/cifs/readdir.c
8127 @@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
8128 * if buggy server returns . and .. late do we want to
8129 * check for that here?
8130 */
8131 + *tmp_buf = 0;
8132 rc = cifs_filldir(current_entry, file, ctx,
8133 tmp_buf, max_len);
8134 if (rc) {
8135 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
8136 index 126f46b887cc..66106f6ed7b4 100644
8137 --- a/fs/cifs/transport.c
8138 +++ b/fs/cifs/transport.c
8139 @@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
8140 cifs_in_send_dec(server);
8141 cifs_save_when_sent(mid);
8142
8143 - if (rc < 0)
8144 + if (rc < 0) {
8145 server->sequence_number -= 2;
8146 + cifs_delete_mid(mid);
8147 + }
8148 +
8149 mutex_unlock(&server->srv_mutex);
8150
8151 if (rc == 0)
8152 return 0;
8153
8154 - cifs_delete_mid(mid);
8155 add_credits_and_wake_if(server, credits, optype);
8156 return rc;
8157 }
8158 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
8159 index de2d6245e9fa..f895a85d9304 100644
8160 --- a/fs/hostfs/hostfs_kern.c
8161 +++ b/fs/hostfs/hostfs_kern.c
8162 @@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
8163
8164 init_special_inode(inode, mode, dev);
8165 err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
8166 - if (!err)
8167 + if (err)
8168 goto out_free;
8169
8170 err = read_name(inode, name);
8171 __putname(name);
8172 if (err)
8173 goto out_put;
8174 - if (err)
8175 - goto out_put;
8176
8177 d_instantiate(dentry, inode);
8178 return 0;
8179 diff --git a/fs/locks.c b/fs/locks.c
8180 index d3d558ba4da7..8501eecb2af0 100644
8181 --- a/fs/locks.c
8182 +++ b/fs/locks.c
8183 @@ -2154,7 +2154,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
8184 goto out;
8185 }
8186
8187 -again:
8188 error = flock_to_posix_lock(filp, file_lock, &flock);
8189 if (error)
8190 goto out;
8191 @@ -2196,19 +2195,22 @@ again:
8192 * Attempt to detect a close/fcntl race and recover by
8193 * releasing the lock that was just acquired.
8194 */
8195 - /*
8196 - * we need that spin_lock here - it prevents reordering between
8197 - * update of i_flctx->flc_posix and check for it done in close().
8198 - * rcu_read_lock() wouldn't do.
8199 - */
8200 - spin_lock(&current->files->file_lock);
8201 - f = fcheck(fd);
8202 - spin_unlock(&current->files->file_lock);
8203 - if (!error && f != filp && flock.l_type != F_UNLCK) {
8204 - flock.l_type = F_UNLCK;
8205 - goto again;
8206 + if (!error && file_lock->fl_type != F_UNLCK) {
8207 + /*
8208 + * We need that spin_lock here - it prevents reordering between
8209 + * update of i_flctx->flc_posix and check for it done in
8210 + * close(). rcu_read_lock() wouldn't do.
8211 + */
8212 + spin_lock(&current->files->file_lock);
8213 + f = fcheck(fd);
8214 + spin_unlock(&current->files->file_lock);
8215 + if (f != filp) {
8216 + file_lock->fl_type = F_UNLCK;
8217 + error = do_lock_file_wait(filp, cmd, file_lock);
8218 + WARN_ON_ONCE(error);
8219 + error = -EBADF;
8220 + }
8221 }
8222 -
8223 out:
8224 locks_free_lock(file_lock);
8225 return error;
8226 @@ -2294,7 +2296,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
8227 goto out;
8228 }
8229
8230 -again:
8231 error = flock64_to_posix_lock(filp, file_lock, &flock);
8232 if (error)
8233 goto out;
8234 @@ -2336,14 +2337,22 @@ again:
8235 * Attempt to detect a close/fcntl race and recover by
8236 * releasing the lock that was just acquired.
8237 */
8238 - spin_lock(&current->files->file_lock);
8239 - f = fcheck(fd);
8240 - spin_unlock(&current->files->file_lock);
8241 - if (!error && f != filp && flock.l_type != F_UNLCK) {
8242 - flock.l_type = F_UNLCK;
8243 - goto again;
8244 + if (!error && file_lock->fl_type != F_UNLCK) {
8245 + /*
8246 + * We need that spin_lock here - it prevents reordering between
8247 + * update of i_flctx->flc_posix and check for it done in
8248 + * close(). rcu_read_lock() wouldn't do.
8249 + */
8250 + spin_lock(&current->files->file_lock);
8251 + f = fcheck(fd);
8252 + spin_unlock(&current->files->file_lock);
8253 + if (f != filp) {
8254 + file_lock->fl_type = F_UNLCK;
8255 + error = do_lock_file_wait(filp, cmd, file_lock);
8256 + WARN_ON_ONCE(error);
8257 + error = -EBADF;
8258 + }
8259 }
8260 -
8261 out:
8262 locks_free_lock(file_lock);
8263 return error;
8264 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
8265 index 892aefff3630..fdd234206dff 100644
8266 --- a/fs/nfs/client.c
8267 +++ b/fs/nfs/client.c
8268 @@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
8269 server->options = data->options;
8270 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
8271 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
8272 - NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
8273 + NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
8274
8275 if (data->rsize)
8276 server->rsize = nfs_block_size(data->rsize, NULL);
8277 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
8278 index fecd9201dbad..c2abdc7db6c3 100644
8279 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
8280 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
8281 @@ -1484,11 +1484,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
8282 start = xdr_reserve_space(xdr, 4);
8283 BUG_ON(!start);
8284
8285 - if (ff_layout_encode_ioerr(flo, xdr, args))
8286 - goto out;
8287 -
8288 + ff_layout_encode_ioerr(flo, xdr, args);
8289 ff_layout_encode_iostats(flo, xdr, args);
8290 -out:
8291 +
8292 *start = cpu_to_be32((xdr->p - start - 1) * 4);
8293 dprintk("%s: Return\n", __func__);
8294 }
8295 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
8296 index 7f22b6c6fb50..723b8922d76b 100644
8297 --- a/fs/nfs/inode.c
8298 +++ b/fs/nfs/inode.c
8299 @@ -442,7 +442,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
8300 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
8301 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
8302 inode->i_version = fattr->change_attr;
8303 - else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
8304 + else
8305 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
8306 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
8307 inode->i_size = nfs_size_to_loff_t(fattr->size);
8308 @@ -1627,6 +1627,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8309 unsigned long invalid = 0;
8310 unsigned long now = jiffies;
8311 unsigned long save_cache_validity;
8312 + bool cache_revalidated = true;
8313
8314 dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
8315 __func__, inode->i_sb->s_id, inode->i_ino,
8316 @@ -1688,22 +1689,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8317 nfs_force_lookup_revalidate(inode);
8318 inode->i_version = fattr->change_attr;
8319 }
8320 - } else if (server->caps & NFS_CAP_CHANGE_ATTR)
8321 + } else {
8322 nfsi->cache_validity |= save_cache_validity;
8323 + cache_revalidated = false;
8324 + }
8325
8326 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
8327 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
8328 - } else if (server->caps & NFS_CAP_MTIME)
8329 + } else if (server->caps & NFS_CAP_MTIME) {
8330 nfsi->cache_validity |= save_cache_validity &
8331 (NFS_INO_INVALID_ATTR
8332 | NFS_INO_REVAL_FORCED);
8333 + cache_revalidated = false;
8334 + }
8335
8336 if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
8337 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
8338 - } else if (server->caps & NFS_CAP_CTIME)
8339 + } else if (server->caps & NFS_CAP_CTIME) {
8340 nfsi->cache_validity |= save_cache_validity &
8341 (NFS_INO_INVALID_ATTR
8342 | NFS_INO_REVAL_FORCED);
8343 + cache_revalidated = false;
8344 + }
8345
8346 /* Check if our cached file size is stale */
8347 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
8348 @@ -1723,19 +1730,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8349 (long long)cur_isize,
8350 (long long)new_isize);
8351 }
8352 - } else
8353 + } else {
8354 nfsi->cache_validity |= save_cache_validity &
8355 (NFS_INO_INVALID_ATTR
8356 | NFS_INO_REVAL_PAGECACHE
8357 | NFS_INO_REVAL_FORCED);
8358 + cache_revalidated = false;
8359 + }
8360
8361
8362 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
8363 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
8364 - else if (server->caps & NFS_CAP_ATIME)
8365 + else if (server->caps & NFS_CAP_ATIME) {
8366 nfsi->cache_validity |= save_cache_validity &
8367 (NFS_INO_INVALID_ATIME
8368 | NFS_INO_REVAL_FORCED);
8369 + cache_revalidated = false;
8370 + }
8371
8372 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
8373 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
8374 @@ -1744,36 +1755,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8375 inode->i_mode = newmode;
8376 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
8377 }
8378 - } else if (server->caps & NFS_CAP_MODE)
8379 + } else if (server->caps & NFS_CAP_MODE) {
8380 nfsi->cache_validity |= save_cache_validity &
8381 (NFS_INO_INVALID_ATTR
8382 | NFS_INO_INVALID_ACCESS
8383 | NFS_INO_INVALID_ACL
8384 | NFS_INO_REVAL_FORCED);
8385 + cache_revalidated = false;
8386 + }
8387
8388 if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
8389 if (!uid_eq(inode->i_uid, fattr->uid)) {
8390 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
8391 inode->i_uid = fattr->uid;
8392 }
8393 - } else if (server->caps & NFS_CAP_OWNER)
8394 + } else if (server->caps & NFS_CAP_OWNER) {
8395 nfsi->cache_validity |= save_cache_validity &
8396 (NFS_INO_INVALID_ATTR
8397 | NFS_INO_INVALID_ACCESS
8398 | NFS_INO_INVALID_ACL
8399 | NFS_INO_REVAL_FORCED);
8400 + cache_revalidated = false;
8401 + }
8402
8403 if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
8404 if (!gid_eq(inode->i_gid, fattr->gid)) {
8405 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
8406 inode->i_gid = fattr->gid;
8407 }
8408 - } else if (server->caps & NFS_CAP_OWNER_GROUP)
8409 + } else if (server->caps & NFS_CAP_OWNER_GROUP) {
8410 nfsi->cache_validity |= save_cache_validity &
8411 (NFS_INO_INVALID_ATTR
8412 | NFS_INO_INVALID_ACCESS
8413 | NFS_INO_INVALID_ACL
8414 | NFS_INO_REVAL_FORCED);
8415 + cache_revalidated = false;
8416 + }
8417
8418 if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
8419 if (inode->i_nlink != fattr->nlink) {
8420 @@ -1782,19 +1799,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8421 invalid |= NFS_INO_INVALID_DATA;
8422 set_nlink(inode, fattr->nlink);
8423 }
8424 - } else if (server->caps & NFS_CAP_NLINK)
8425 + } else if (server->caps & NFS_CAP_NLINK) {
8426 nfsi->cache_validity |= save_cache_validity &
8427 (NFS_INO_INVALID_ATTR
8428 | NFS_INO_REVAL_FORCED);
8429 + cache_revalidated = false;
8430 + }
8431
8432 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
8433 /*
8434 * report the blocks in 512byte units
8435 */
8436 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
8437 - }
8438 - if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
8439 + } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
8440 inode->i_blocks = fattr->du.nfs2.blocks;
8441 + else
8442 + cache_revalidated = false;
8443
8444 /* Update attrtimeo value if we're out of the unstable period */
8445 if (invalid & NFS_INO_INVALID_ATTR) {
8446 @@ -1804,9 +1824,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8447 /* Set barrier to be more recent than all outstanding updates */
8448 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
8449 } else {
8450 - if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
8451 - if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
8452 - nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
8453 + if (cache_revalidated) {
8454 + if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
8455 + nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
8456 + nfsi->attrtimeo <<= 1;
8457 + if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
8458 + nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
8459 + }
8460 nfsi->attrtimeo_timestamp = now;
8461 }
8462 /* Set the barrier to be more recent than this fattr */
8463 @@ -1815,7 +1839,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
8464 }
8465
8466 /* Don't declare attrcache up to date if there were no attrs! */
8467 - if (fattr->valid != 0)
8468 + if (cache_revalidated)
8469 invalid &= ~NFS_INO_INVALID_ATTR;
8470
8471 /* Don't invalidate the data if we were to blame */
8472 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
8473 index 8f393fcc313b..2c4f41c34366 100644
8474 --- a/fs/nfs/nfs4proc.c
8475 +++ b/fs/nfs/nfs4proc.c
8476 @@ -1284,6 +1284,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
8477 * Protect the call to nfs4_state_set_mode_locked and
8478 * serialise the stateid update
8479 */
8480 + spin_lock(&state->owner->so_lock);
8481 write_seqlock(&state->seqlock);
8482 if (deleg_stateid != NULL) {
8483 nfs4_stateid_copy(&state->stateid, deleg_stateid);
8484 @@ -1292,7 +1293,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
8485 if (open_stateid != NULL)
8486 nfs_set_open_stateid_locked(state, open_stateid, fmode);
8487 write_sequnlock(&state->seqlock);
8488 - spin_lock(&state->owner->so_lock);
8489 update_open_stateflags(state, fmode);
8490 spin_unlock(&state->owner->so_lock);
8491 }
8492 @@ -8512,7 +8512,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8493 .minor_version = 0,
8494 .init_caps = NFS_CAP_READDIRPLUS
8495 | NFS_CAP_ATOMIC_OPEN
8496 - | NFS_CAP_CHANGE_ATTR
8497 | NFS_CAP_POSIX_LOCK,
8498 .init_client = nfs40_init_client,
8499 .shutdown_client = nfs40_shutdown_client,
8500 @@ -8538,7 +8537,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8501 .minor_version = 1,
8502 .init_caps = NFS_CAP_READDIRPLUS
8503 | NFS_CAP_ATOMIC_OPEN
8504 - | NFS_CAP_CHANGE_ATTR
8505 | NFS_CAP_POSIX_LOCK
8506 | NFS_CAP_STATEID_NFSV41
8507 | NFS_CAP_ATOMIC_OPEN_V1,
8508 @@ -8561,7 +8559,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8509 .minor_version = 2,
8510 .init_caps = NFS_CAP_READDIRPLUS
8511 | NFS_CAP_ATOMIC_OPEN
8512 - | NFS_CAP_CHANGE_ATTR
8513 | NFS_CAP_POSIX_LOCK
8514 | NFS_CAP_STATEID_NFSV41
8515 | NFS_CAP_ATOMIC_OPEN_V1
8516 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
8517 index 482cfd34472d..523e485a11b8 100644
8518 --- a/fs/ocfs2/dlm/dlmmaster.c
8519 +++ b/fs/ocfs2/dlm/dlmmaster.c
8520 @@ -2518,6 +2518,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
8521 spin_lock(&dlm->master_lock);
8522 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
8523 namelen, target, dlm->node_num);
8524 + /* get an extra reference on the mle.
8525 + * otherwise the assert_master from the new
8526 + * master will destroy this.
8527 + */
8528 + dlm_get_mle_inuse(mle);
8529 spin_unlock(&dlm->master_lock);
8530 spin_unlock(&dlm->spinlock);
8531
8532 @@ -2553,6 +2558,7 @@ fail:
8533 if (mle_added) {
8534 dlm_mle_detach_hb_events(dlm, mle);
8535 dlm_put_mle(mle);
8536 + dlm_put_mle_inuse(mle);
8537 } else if (mle) {
8538 kmem_cache_free(dlm_mle_cache, mle);
8539 mle = NULL;
8540 @@ -2570,17 +2576,6 @@ fail:
8541 * ensure that all assert_master work is flushed. */
8542 flush_workqueue(dlm->dlm_worker);
8543
8544 - /* get an extra reference on the mle.
8545 - * otherwise the assert_master from the new
8546 - * master will destroy this.
8547 - * also, make sure that all callers of dlm_get_mle
8548 - * take both dlm->spinlock and dlm->master_lock */
8549 - spin_lock(&dlm->spinlock);
8550 - spin_lock(&dlm->master_lock);
8551 - dlm_get_mle_inuse(mle);
8552 - spin_unlock(&dlm->master_lock);
8553 - spin_unlock(&dlm->spinlock);
8554 -
8555 /* notify new node and send all lock state */
8556 /* call send_one_lockres with migration flag.
8557 * this serves as notice to the target node that a
8558 @@ -3309,6 +3304,15 @@ top:
8559 mle->new_master != dead_node)
8560 continue;
8561
8562 + if (mle->new_master == dead_node && mle->inuse) {
8563 + mlog(ML_NOTICE, "%s: target %u died during "
8564 + "migration from %u, the MLE is "
8565 + "still keep used, ignore it!\n",
8566 + dlm->name, dead_node,
8567 + mle->master);
8568 + continue;
8569 + }
8570 +
8571 /* If we have reached this point, this mle needs to be
8572 * removed from the list and freed. */
8573 dlm_clean_migration_mle(dlm, mle);
8574 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
8575 index 3d90ad7ff91f..f25ff5d3a2f9 100644
8576 --- a/fs/ocfs2/dlm/dlmrecovery.c
8577 +++ b/fs/ocfs2/dlm/dlmrecovery.c
8578 @@ -2360,6 +2360,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
8579 break;
8580 }
8581 }
8582 + dlm_lockres_clear_refmap_bit(dlm, res,
8583 + dead_node);
8584 spin_unlock(&res->spinlock);
8585 continue;
8586 }
8587 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
8588 index 23157e40dd74..3623ab6fa97f 100644
8589 --- a/fs/ocfs2/dlmglue.c
8590 +++ b/fs/ocfs2/dlmglue.c
8591 @@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
8592 unsigned int gen;
8593 int noqueue_attempted = 0;
8594 int dlm_locked = 0;
8595 + int kick_dc = 0;
8596
8597 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
8598 mlog_errno(-EINVAL);
8599 @@ -1524,7 +1525,12 @@ update_holders:
8600 unlock:
8601 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
8602
8603 + /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
8604 + kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
8605 +
8606 spin_unlock_irqrestore(&lockres->l_lock, flags);
8607 + if (kick_dc)
8608 + ocfs2_wake_downconvert_thread(osb);
8609 out:
8610 /*
8611 * This is helping work around a lock inversion between the page lock
8612 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
8613 index 871fcb67be97..758012bfd5f0 100644
8614 --- a/fs/overlayfs/copy_up.c
8615 +++ b/fs/overlayfs/copy_up.c
8616 @@ -22,9 +22,9 @@
8617
8618 int ovl_copy_xattr(struct dentry *old, struct dentry *new)
8619 {
8620 - ssize_t list_size, size;
8621 - char *buf, *name, *value;
8622 - int error;
8623 + ssize_t list_size, size, value_size = 0;
8624 + char *buf, *name, *value = NULL;
8625 + int uninitialized_var(error);
8626
8627 if (!old->d_inode->i_op->getxattr ||
8628 !new->d_inode->i_op->getxattr)
8629 @@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
8630 if (!buf)
8631 return -ENOMEM;
8632
8633 - error = -ENOMEM;
8634 - value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
8635 - if (!value)
8636 - goto out;
8637 -
8638 list_size = vfs_listxattr(old, buf, list_size);
8639 if (list_size <= 0) {
8640 error = list_size;
8641 - goto out_free_value;
8642 + goto out;
8643 }
8644
8645 for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
8646 - size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
8647 - if (size <= 0) {
8648 +retry:
8649 + size = vfs_getxattr(old, name, value, value_size);
8650 + if (size == -ERANGE)
8651 + size = vfs_getxattr(old, name, NULL, 0);
8652 +
8653 + if (size < 0) {
8654 error = size;
8655 - goto out_free_value;
8656 + break;
8657 + }
8658 +
8659 + if (size > value_size) {
8660 + void *new;
8661 +
8662 + new = krealloc(value, size, GFP_KERNEL);
8663 + if (!new) {
8664 + error = -ENOMEM;
8665 + break;
8666 + }
8667 + value = new;
8668 + value_size = size;
8669 + goto retry;
8670 }
8671 +
8672 error = vfs_setxattr(new, name, value, size, 0);
8673 if (error)
8674 - goto out_free_value;
8675 + break;
8676 }
8677 -
8678 -out_free_value:
8679 kfree(value);
8680 out:
8681 kfree(buf);
8682 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
8683 index ba0db2638946..a1b069e5e363 100644
8684 --- a/fs/overlayfs/inode.c
8685 +++ b/fs/overlayfs/inode.c
8686 @@ -45,6 +45,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
8687 int err;
8688 struct dentry *upperdentry;
8689
8690 + /*
8691 + * Check for permissions before trying to copy-up. This is redundant
8692 + * since it will be rechecked later by ->setattr() on upper dentry. But
8693 + * without this, copy-up can be triggered by just about anybody.
8694 + *
8695 + * We don't initialize inode->size, which just means that
8696 + * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
8697 + * check for a swapfile (which this won't be anyway).
8698 + */
8699 + err = inode_change_ok(dentry->d_inode, attr);
8700 + if (err)
8701 + return err;
8702 +
8703 err = ovl_want_write(dentry);
8704 if (err)
8705 goto out;
8706 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
8707 index 70e9af551600..adcb1398c481 100644
8708 --- a/fs/overlayfs/readdir.c
8709 +++ b/fs/overlayfs/readdir.c
8710 @@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
8711 (int) PTR_ERR(dentry));
8712 continue;
8713 }
8714 - ovl_cleanup(upper->d_inode, dentry);
8715 + if (dentry->d_inode)
8716 + ovl_cleanup(upper->d_inode, dentry);
8717 dput(dentry);
8718 }
8719 mutex_unlock(&upper->d_inode->i_mutex);
8720 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
8721 index d74af7f78fec..bd6d5c1e667d 100644
8722 --- a/fs/overlayfs/super.c
8723 +++ b/fs/overlayfs/super.c
8724 @@ -9,6 +9,7 @@
8725
8726 #include <linux/fs.h>
8727 #include <linux/namei.h>
8728 +#include <linux/pagemap.h>
8729 #include <linux/xattr.h>
8730 #include <linux/security.h>
8731 #include <linux/mount.h>
8732 @@ -847,6 +848,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
8733 }
8734
8735 sb->s_stack_depth = 0;
8736 + sb->s_maxbytes = MAX_LFS_FILESIZE;
8737 if (ufs->config.upperdir) {
8738 if (!ufs->config.workdir) {
8739 pr_err("overlayfs: missing 'workdir'\n");
8740 @@ -986,6 +988,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
8741
8742 root_dentry->d_fsdata = oe;
8743
8744 + ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
8745 + root_dentry->d_inode);
8746 +
8747 sb->s_magic = OVERLAYFS_SUPER_MAGIC;
8748 sb->s_op = &ovl_super_operations;
8749 sb->s_root = root_dentry;
8750 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
8751 index 6afac3d561ac..78a40ef0c463 100644
8752 --- a/fs/udf/inode.c
8753 +++ b/fs/udf/inode.c
8754 @@ -2052,14 +2052,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
8755 epos->offset += adsize;
8756 }
8757
8758 +/*
8759 + * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
8760 + * someone does some weird stuff.
8761 + */
8762 +#define UDF_MAX_INDIR_EXTS 16
8763 +
8764 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
8765 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
8766 {
8767 int8_t etype;
8768 + unsigned int indirections = 0;
8769
8770 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
8771 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
8772 int block;
8773 +
8774 + if (++indirections > UDF_MAX_INDIR_EXTS) {
8775 + udf_err(inode->i_sb,
8776 + "too many indirect extents in inode %lu\n",
8777 + inode->i_ino);
8778 + return -1;
8779 + }
8780 +
8781 epos->block = *eloc;
8782 epos->offset = sizeof(struct allocExtDesc);
8783 brelse(epos->bh);
8784 diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
8785 index b84fee372734..2eafe2c4d239 100644
8786 --- a/fs/udf/unicode.c
8787 +++ b/fs/udf/unicode.c
8788 @@ -133,11 +133,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
8789 if (c < 0x80U)
8790 utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
8791 else if (c < 0x800U) {
8792 + if (utf_o->u_len > (UDF_NAME_LEN - 4))
8793 + break;
8794 utf_o->u_name[utf_o->u_len++] =
8795 (uint8_t)(0xc0 | (c >> 6));
8796 utf_o->u_name[utf_o->u_len++] =
8797 (uint8_t)(0x80 | (c & 0x3f));
8798 } else {
8799 + if (utf_o->u_len > (UDF_NAME_LEN - 5))
8800 + break;
8801 utf_o->u_name[utf_o->u_len++] =
8802 (uint8_t)(0xe0 | (c >> 12));
8803 utf_o->u_name[utf_o->u_len++] =
8804 @@ -178,17 +182,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
8805 static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
8806 {
8807 unsigned c, i, max_val, utf_char;
8808 - int utf_cnt, u_len;
8809 + int utf_cnt, u_len, u_ch;
8810
8811 memset(ocu, 0, sizeof(dstring) * length);
8812 ocu[0] = 8;
8813 max_val = 0xffU;
8814 + u_ch = 1;
8815
8816 try_again:
8817 u_len = 0U;
8818 utf_char = 0U;
8819 utf_cnt = 0U;
8820 for (i = 0U; i < utf->u_len; i++) {
8821 + /* Name didn't fit? */
8822 + if (u_len + 1 + u_ch >= length)
8823 + return 0;
8824 +
8825 c = (uint8_t)utf->u_name[i];
8826
8827 /* Complete a multi-byte UTF-8 character */
8828 @@ -230,6 +239,7 @@ try_again:
8829 if (max_val == 0xffU) {
8830 max_val = 0xffffU;
8831 ocu[0] = (uint8_t)0x10U;
8832 + u_ch = 2;
8833 goto try_again;
8834 }
8835 goto error_out;
8836 @@ -282,7 +292,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
8837 c = (c << 8) | ocu[i++];
8838
8839 len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
8840 - UDF_NAME_LEN - utf_o->u_len);
8841 + UDF_NAME_LEN - 2 - utf_o->u_len);
8842 /* Valid character? */
8843 if (len >= 0)
8844 utf_o->u_len += len;
8845 @@ -300,15 +310,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
8846 int len;
8847 unsigned i, max_val;
8848 uint16_t uni_char;
8849 - int u_len;
8850 + int u_len, u_ch;
8851
8852 memset(ocu, 0, sizeof(dstring) * length);
8853 ocu[0] = 8;
8854 max_val = 0xffU;
8855 + u_ch = 1;
8856
8857 try_again:
8858 u_len = 0U;
8859 for (i = 0U; i < uni->u_len; i++) {
8860 + /* Name didn't fit? */
8861 + if (u_len + 1 + u_ch >= length)
8862 + return 0;
8863 len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
8864 if (!len)
8865 continue;
8866 @@ -321,6 +335,7 @@ try_again:
8867 if (uni_char > max_val) {
8868 max_val = 0xffffU;
8869 ocu[0] = (uint8_t)0x10U;
8870 + u_ch = 2;
8871 goto try_again;
8872 }
8873
8874 diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
8875 index 6fbf2d853a54..48aff071591d 100644
8876 --- a/fs/xfs/libxfs/xfs_dquot_buf.c
8877 +++ b/fs/xfs/libxfs/xfs_dquot_buf.c
8878 @@ -54,7 +54,7 @@ xfs_dqcheck(
8879 xfs_dqid_t id,
8880 uint type, /* used only when IO_dorepair is true */
8881 uint flags,
8882 - char *str)
8883 + const char *str)
8884 {
8885 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
8886 int errs = 0;
8887 @@ -207,7 +207,8 @@ xfs_dquot_buf_verify_crc(
8888 STATIC bool
8889 xfs_dquot_buf_verify(
8890 struct xfs_mount *mp,
8891 - struct xfs_buf *bp)
8892 + struct xfs_buf *bp,
8893 + int warn)
8894 {
8895 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
8896 xfs_dqid_t id = 0;
8897 @@ -240,8 +241,7 @@ xfs_dquot_buf_verify(
8898 if (i == 0)
8899 id = be32_to_cpu(ddq->d_id);
8900
8901 - error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
8902 - "xfs_dquot_buf_verify");
8903 + error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
8904 if (error)
8905 return false;
8906 }
8907 @@ -256,7 +256,7 @@ xfs_dquot_buf_read_verify(
8908
8909 if (!xfs_dquot_buf_verify_crc(mp, bp))
8910 xfs_buf_ioerror(bp, -EFSBADCRC);
8911 - else if (!xfs_dquot_buf_verify(mp, bp))
8912 + else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
8913 xfs_buf_ioerror(bp, -EFSCORRUPTED);
8914
8915 if (bp->b_error)
8916 @@ -264,6 +264,25 @@ xfs_dquot_buf_read_verify(
8917 }
8918
8919 /*
8920 + * readahead errors are silent and simply leave the buffer as !done so a real
8921 + * read will then be run with the xfs_dquot_buf_ops verifier. See
8922 + * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
8923 + * reporting the failure.
8924 + */
8925 +static void
8926 +xfs_dquot_buf_readahead_verify(
8927 + struct xfs_buf *bp)
8928 +{
8929 + struct xfs_mount *mp = bp->b_target->bt_mount;
8930 +
8931 + if (!xfs_dquot_buf_verify_crc(mp, bp) ||
8932 + !xfs_dquot_buf_verify(mp, bp, 0)) {
8933 + xfs_buf_ioerror(bp, -EIO);
8934 + bp->b_flags &= ~XBF_DONE;
8935 + }
8936 +}
8937 +
8938 +/*
8939 * we don't calculate the CRC here as that is done when the dquot is flushed to
8940 * the buffer after the update is done. This ensures that the dquot in the
8941 * buffer always has an up-to-date CRC value.
8942 @@ -274,7 +293,7 @@ xfs_dquot_buf_write_verify(
8943 {
8944 struct xfs_mount *mp = bp->b_target->bt_mount;
8945
8946 - if (!xfs_dquot_buf_verify(mp, bp)) {
8947 + if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
8948 xfs_buf_ioerror(bp, -EFSCORRUPTED);
8949 xfs_verifier_error(bp);
8950 return;
8951 @@ -286,3 +305,7 @@ const struct xfs_buf_ops xfs_dquot_buf_ops = {
8952 .verify_write = xfs_dquot_buf_write_verify,
8953 };
8954
8955 +const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
8956 + .verify_read = xfs_dquot_buf_readahead_verify,
8957 + .verify_write = xfs_dquot_buf_write_verify,
8958 +};
8959 diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
8960 index 002b6b3a1988..7da6d0b2c2ed 100644
8961 --- a/fs/xfs/libxfs/xfs_inode_buf.c
8962 +++ b/fs/xfs/libxfs/xfs_inode_buf.c
8963 @@ -63,11 +63,14 @@ xfs_inobp_check(
8964 * has not had the inode cores stamped into it. Hence for readahead, the buffer
8965 * may be potentially invalid.
8966 *
8967 - * If the readahead buffer is invalid, we don't want to mark it with an error,
8968 - * but we do want to clear the DONE status of the buffer so that a followup read
8969 - * will re-read it from disk. This will ensure that we don't get an unnecessary
8970 - * warnings during log recovery and we don't get unnecssary panics on debug
8971 - * kernels.
8972 + * If the readahead buffer is invalid, we need to mark it with an error and
8973 + * clear the DONE status of the buffer so that a followup read will re-read it
8974 + * from disk. We don't report the error otherwise to avoid warnings during log
8975 + * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
8976 + * because all we want to do is say readahead failed; there is no-one to report
8977 + * the error to, so this will distinguish it from a non-ra verifier failure.
8978 + * Changes to this readahead error behavour also need to be reflected in
8979 + * xfs_dquot_buf_readahead_verify().
8980 */
8981 static void
8982 xfs_inode_buf_verify(
8983 @@ -95,6 +98,7 @@ xfs_inode_buf_verify(
8984 XFS_RANDOM_ITOBP_INOTOBP))) {
8985 if (readahead) {
8986 bp->b_flags &= ~XBF_DONE;
8987 + xfs_buf_ioerror(bp, -EIO);
8988 return;
8989 }
8990
8991 diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
8992 index 1b0a08379759..f51078f1e92a 100644
8993 --- a/fs/xfs/libxfs/xfs_quota_defs.h
8994 +++ b/fs/xfs/libxfs/xfs_quota_defs.h
8995 @@ -153,7 +153,7 @@ typedef __uint16_t xfs_qwarncnt_t;
8996 #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
8997
8998 extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
8999 - xfs_dqid_t id, uint type, uint flags, char *str);
9000 + xfs_dqid_t id, uint type, uint flags, const char *str);
9001 extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
9002
9003 #endif /* __XFS_QUOTA_H__ */
9004 diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
9005 index 8dda4b321343..a3472a38efd2 100644
9006 --- a/fs/xfs/libxfs/xfs_shared.h
9007 +++ b/fs/xfs/libxfs/xfs_shared.h
9008 @@ -49,6 +49,7 @@ extern const struct xfs_buf_ops xfs_inobt_buf_ops;
9009 extern const struct xfs_buf_ops xfs_inode_buf_ops;
9010 extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
9011 extern const struct xfs_buf_ops xfs_dquot_buf_ops;
9012 +extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
9013 extern const struct xfs_buf_ops xfs_sb_buf_ops;
9014 extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
9015 extern const struct xfs_buf_ops xfs_symlink_buf_ops;
9016 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
9017 index 1790b00bea7a..7dd64bf98c56 100644
9018 --- a/fs/xfs/xfs_buf.c
9019 +++ b/fs/xfs/xfs_buf.c
9020 @@ -605,6 +605,13 @@ found:
9021 }
9022 }
9023
9024 + /*
9025 + * Clear b_error if this is a lookup from a caller that doesn't expect
9026 + * valid data to be found in the buffer.
9027 + */
9028 + if (!(flags & XBF_READ))
9029 + xfs_buf_ioerror(bp, 0);
9030 +
9031 XFS_STATS_INC(xb_get);
9032 trace_xfs_buf_get(bp, flags, _RET_IP_);
9033 return bp;
9034 @@ -1522,6 +1529,16 @@ xfs_wait_buftarg(
9035 LIST_HEAD(dispose);
9036 int loop = 0;
9037
9038 + /*
9039 + * We need to flush the buffer workqueue to ensure that all IO
9040 + * completion processing is 100% done. Just waiting on buffer locks is
9041 + * not sufficient for async IO as the reference count held over IO is
9042 + * not released until after the buffer lock is dropped. Hence we need to
9043 + * ensure here that all reference counts have been dropped before we
9044 + * start walking the LRU list.
9045 + */
9046 + drain_workqueue(btp->bt_mount->m_buf_workqueue);
9047 +
9048 /* loop until there is nothing left on the lru list. */
9049 while (list_lru_count(&btp->bt_lru)) {
9050 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
9051 diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
9052 index a5d03396dda0..1114afdd5a6b 100644
9053 --- a/fs/xfs/xfs_log_recover.c
9054 +++ b/fs/xfs/xfs_log_recover.c
9055 @@ -3154,6 +3154,7 @@ xlog_recover_dquot_ra_pass2(
9056 struct xfs_disk_dquot *recddq;
9057 struct xfs_dq_logformat *dq_f;
9058 uint type;
9059 + int len;
9060
9061
9062 if (mp->m_qflags == 0)
9063 @@ -3174,8 +3175,12 @@ xlog_recover_dquot_ra_pass2(
9064 ASSERT(dq_f);
9065 ASSERT(dq_f->qlf_len == 1);
9066
9067 - xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
9068 - XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
9069 + len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
9070 + if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
9071 + return;
9072 +
9073 + xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
9074 + &xfs_dquot_buf_ra_ops);
9075 }
9076
9077 STATIC void
9078 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
9079 index 98abda9ed3aa..bbc59bdd6395 100644
9080 --- a/include/crypto/hash.h
9081 +++ b/include/crypto/hash.h
9082 @@ -199,6 +199,7 @@ struct crypto_ahash {
9083 unsigned int keylen);
9084
9085 unsigned int reqsize;
9086 + bool has_setkey;
9087 struct crypto_tfm base;
9088 };
9089
9090 @@ -356,6 +357,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
9091 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
9092 unsigned int keylen);
9093
9094 +static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
9095 +{
9096 + return tfm->has_setkey;
9097 +}
9098 +
9099 /**
9100 * crypto_ahash_finup() - update and finalize message digest
9101 * @req: reference to the ahash_request handle that holds all information
9102 diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
9103 index 018afb264ac2..a2bfd7843f18 100644
9104 --- a/include/crypto/if_alg.h
9105 +++ b/include/crypto/if_alg.h
9106 @@ -30,6 +30,9 @@ struct alg_sock {
9107
9108 struct sock *parent;
9109
9110 + unsigned int refcnt;
9111 + unsigned int nokey_refcnt;
9112 +
9113 const struct af_alg_type *type;
9114 void *private;
9115 };
9116 @@ -50,9 +53,11 @@ struct af_alg_type {
9117 void (*release)(void *private);
9118 int (*setkey)(void *private, const u8 *key, unsigned int keylen);
9119 int (*accept)(void *private, struct sock *sk);
9120 + int (*accept_nokey)(void *private, struct sock *sk);
9121 int (*setauthsize)(void *private, unsigned int authsize);
9122
9123 struct proto_ops *ops;
9124 + struct proto_ops *ops_nokey;
9125 struct module *owner;
9126 char name[14];
9127 };
9128 @@ -67,6 +72,7 @@ int af_alg_register_type(const struct af_alg_type *type);
9129 int af_alg_unregister_type(const struct af_alg_type *type);
9130
9131 int af_alg_release(struct socket *sock);
9132 +void af_alg_release_parent(struct sock *sk);
9133 int af_alg_accept(struct sock *sk, struct socket *newsock);
9134
9135 int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
9136 @@ -83,11 +89,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
9137 return (struct alg_sock *)sk;
9138 }
9139
9140 -static inline void af_alg_release_parent(struct sock *sk)
9141 -{
9142 - sock_put(alg_sk(sk)->parent);
9143 -}
9144 -
9145 static inline void af_alg_init_completion(struct af_alg_completion *completion)
9146 {
9147 init_completion(&completion->completion);
9148 diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
9149 index 7bfb063029d8..461a0558bca4 100644
9150 --- a/include/drm/drm_cache.h
9151 +++ b/include/drm/drm_cache.h
9152 @@ -35,4 +35,13 @@
9153
9154 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
9155
9156 +static inline bool drm_arch_can_wc_memory(void)
9157 +{
9158 +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
9159 + return false;
9160 +#else
9161 + return true;
9162 +#endif
9163 +}
9164 +
9165 #endif
9166 diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
9167 index 54233583c6cb..ca71c03143d1 100644
9168 --- a/include/drm/drm_crtc.h
9169 +++ b/include/drm/drm_crtc.h
9170 @@ -731,8 +731,6 @@ struct drm_connector {
9171 uint8_t num_h_tile, num_v_tile;
9172 uint8_t tile_h_loc, tile_v_loc;
9173 uint16_t tile_h_size, tile_v_size;
9174 -
9175 - struct list_head destroy_list;
9176 };
9177
9178 /**
9179 diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
9180 index a89f505c856b..c7f01d1aa562 100644
9181 --- a/include/drm/drm_dp_mst_helper.h
9182 +++ b/include/drm/drm_dp_mst_helper.h
9183 @@ -449,9 +449,7 @@ struct drm_dp_mst_topology_mgr {
9184 the mstb tx_slots and txmsg->state once they are queued */
9185 struct mutex qlock;
9186 struct list_head tx_msg_downq;
9187 - struct list_head tx_msg_upq;
9188 bool tx_down_in_progress;
9189 - bool tx_up_in_progress;
9190
9191 /* payload info + lock for it */
9192 struct mutex payload_lock;
9193 diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
9194 index d639049a613d..553210c02ee0 100644
9195 --- a/include/drm/drm_fixed.h
9196 +++ b/include/drm/drm_fixed.h
9197 @@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
9198 #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
9199 #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
9200 #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
9201 +#define DRM_FIXED_EPSILON 1LL
9202 +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
9203
9204 static inline s64 drm_int2fixp(int a)
9205 {
9206 return ((s64)a) << DRM_FIXED_POINT;
9207 }
9208
9209 -static inline int drm_fixp2int(int64_t a)
9210 +static inline int drm_fixp2int(s64 a)
9211 {
9212 return ((s64)a) >> DRM_FIXED_POINT;
9213 }
9214
9215 -static inline unsigned drm_fixp_msbset(int64_t a)
9216 +static inline int drm_fixp2int_ceil(s64 a)
9217 +{
9218 + if (a > 0)
9219 + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
9220 + else
9221 + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
9222 +}
9223 +
9224 +static inline unsigned drm_fixp_msbset(s64 a)
9225 {
9226 unsigned shift, sign = (a >> 63) & 1;
9227
9228 @@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
9229 return result;
9230 }
9231
9232 +static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
9233 +{
9234 + s64 res;
9235 + bool a_neg = a < 0;
9236 + bool b_neg = b < 0;
9237 + u64 a_abs = a_neg ? -a : a;
9238 + u64 b_abs = b_neg ? -b : b;
9239 + u64 rem;
9240 +
9241 + /* determine integer part */
9242 + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
9243 +
9244 + /* determine fractional part */
9245 + {
9246 + u32 i = DRM_FIXED_POINT;
9247 +
9248 + do {
9249 + rem <<= 1;
9250 + res_abs <<= 1;
9251 + if (rem >= b_abs) {
9252 + res_abs |= 1;
9253 + rem -= b_abs;
9254 + }
9255 + } while (--i != 0);
9256 + }
9257 +
9258 + /* round up LSB */
9259 + {
9260 + u64 summand = (rem << 1) >= b_abs;
9261 +
9262 + res_abs += summand;
9263 + }
9264 +
9265 + res = (s64) res_abs;
9266 + if (a_neg ^ b_neg)
9267 + res = -res;
9268 + return res;
9269 +}
9270 +
9271 static inline s64 drm_fixp_exp(s64 x)
9272 {
9273 s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
9274 diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
9275 index e15499422fdc..e91c6f15f6e8 100644
9276 --- a/include/linux/ceph/messenger.h
9277 +++ b/include/linux/ceph/messenger.h
9278 @@ -224,6 +224,7 @@ struct ceph_connection {
9279 struct ceph_entity_addr actual_peer_addr;
9280
9281 /* message out temps */
9282 + struct ceph_msg_header out_hdr;
9283 struct ceph_msg *out_msg; /* sending message (== tail of
9284 out_sent) */
9285 bool out_msg_done;
9286 @@ -233,7 +234,6 @@ struct ceph_connection {
9287 int out_kvec_left; /* kvec's left in out_kvec */
9288 int out_skip; /* skip this many bytes */
9289 int out_kvec_bytes; /* total bytes left */
9290 - bool out_kvec_is_msg; /* kvec refers to out_msg */
9291 int out_more; /* there is more data after the kvecs */
9292 __le64 out_temp_ack; /* for writing an ack */
9293
9294 diff --git a/include/linux/console.h b/include/linux/console.h
9295 index 9f50fb413c11..901555a3886e 100644
9296 --- a/include/linux/console.h
9297 +++ b/include/linux/console.h
9298 @@ -149,6 +149,7 @@ extern int console_trylock(void);
9299 extern void console_unlock(void);
9300 extern void console_conditional_schedule(void);
9301 extern void console_unblank(void);
9302 +extern void console_flush_on_panic(void);
9303 extern struct tty_driver *console_device(int *);
9304 extern void console_stop(struct console *);
9305 extern void console_start(struct console *);
9306 diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
9307 index 5e1273d4de14..eda4a72a9b25 100644
9308 --- a/include/linux/nfs_fs_sb.h
9309 +++ b/include/linux/nfs_fs_sb.h
9310 @@ -220,7 +220,7 @@ struct nfs_server {
9311 #define NFS_CAP_SYMLINKS (1U << 2)
9312 #define NFS_CAP_ACLS (1U << 3)
9313 #define NFS_CAP_ATOMIC_OPEN (1U << 4)
9314 -#define NFS_CAP_CHANGE_ATTR (1U << 5)
9315 +/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
9316 #define NFS_CAP_FILEID (1U << 6)
9317 #define NFS_CAP_MODE (1U << 7)
9318 #define NFS_CAP_NLINK (1U << 8)
9319 diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
9320 index 33170dbd9db4..5d5174b59802 100644
9321 --- a/include/linux/radix-tree.h
9322 +++ b/include/linux/radix-tree.h
9323 @@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
9324 struct radix_tree_iter *iter, unsigned flags);
9325
9326 /**
9327 + * radix_tree_iter_retry - retry this chunk of the iteration
9328 + * @iter: iterator state
9329 + *
9330 + * If we iterate over a tree protected only by the RCU lock, a race
9331 + * against deletion or creation may result in seeing a slot for which
9332 + * radix_tree_deref_retry() returns true. If so, call this function
9333 + * and continue the iteration.
9334 + */
9335 +static inline __must_check
9336 +void **radix_tree_iter_retry(struct radix_tree_iter *iter)
9337 +{
9338 + iter->next_index = iter->index;
9339 + return NULL;
9340 +}
9341 +
9342 +/**
9343 * radix_tree_chunk_size - get current chunk size
9344 *
9345 * @iter: pointer to radix tree iterator
9346 * Returns: current chunk size
9347 */
9348 -static __always_inline unsigned
9349 +static __always_inline long
9350 radix_tree_chunk_size(struct radix_tree_iter *iter)
9351 {
9352 return iter->next_index - iter->index;
9353 @@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
9354 return slot + offset + 1;
9355 }
9356 } else {
9357 - unsigned size = radix_tree_chunk_size(iter) - 1;
9358 + long size = radix_tree_chunk_size(iter);
9359
9360 - while (size--) {
9361 + while (--size > 0) {
9362 slot++;
9363 iter->index++;
9364 if (likely(*slot))
9365 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
9366 index c89c53a113a8..6f48ddc4b2b5 100644
9367 --- a/include/linux/rmap.h
9368 +++ b/include/linux/rmap.h
9369 @@ -105,20 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
9370 __put_anon_vma(anon_vma);
9371 }
9372
9373 -static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
9374 -{
9375 - struct anon_vma *anon_vma = vma->anon_vma;
9376 - if (anon_vma)
9377 - down_write(&anon_vma->root->rwsem);
9378 -}
9379 -
9380 -static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
9381 -{
9382 - struct anon_vma *anon_vma = vma->anon_vma;
9383 - if (anon_vma)
9384 - up_write(&anon_vma->root->rwsem);
9385 -}
9386 -
9387 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
9388 {
9389 down_write(&anon_vma->root->rwsem);
9390 diff --git a/include/linux/thermal.h b/include/linux/thermal.h
9391 index 5eac316490ea..2e7d0f7a0ecc 100644
9392 --- a/include/linux/thermal.h
9393 +++ b/include/linux/thermal.h
9394 @@ -40,6 +40,9 @@
9395 /* No upper/lower limit requirement */
9396 #define THERMAL_NO_LIMIT ((u32)~0)
9397
9398 +/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
9399 +#define THERMAL_TEMP_INVALID -274000
9400 +
9401 /* Unit conversion macros */
9402 #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
9403 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
9404 @@ -159,6 +162,7 @@ struct thermal_attr {
9405 * @forced_passive: If > 0, temperature at which to switch on all ACPI
9406 * processor cooling devices. Currently only used by the
9407 * step-wise governor.
9408 + * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
9409 * @ops: operations this &thermal_zone_device supports
9410 * @tzp: thermal zone parameters
9411 * @governor: pointer to the governor for this thermal zone
9412 @@ -185,6 +189,7 @@ struct thermal_zone_device {
9413 int emul_temperature;
9414 int passive;
9415 unsigned int forced_passive;
9416 + atomic_t need_update;
9417 struct thermal_zone_device_ops *ops;
9418 const struct thermal_zone_params *tzp;
9419 struct thermal_governor *governor;
9420 diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
9421 index f6cbef78db62..3b91ad5d5115 100644
9422 --- a/include/sound/rawmidi.h
9423 +++ b/include/sound/rawmidi.h
9424 @@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
9425 int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
9426 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
9427 unsigned char *buffer, int count);
9428 +int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
9429 + unsigned char *buffer, int count);
9430 +int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
9431 + int count);
9432
9433 /* main midi functions */
9434
9435 diff --git a/kernel/futex.c b/kernel/futex.c
9436 index 2579e407ff67..f3043db6d36f 100644
9437 --- a/kernel/futex.c
9438 +++ b/kernel/futex.c
9439 @@ -2632,6 +2632,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
9440 if (q.pi_state && (q.pi_state->owner != current)) {
9441 spin_lock(q.lock_ptr);
9442 ret = fixup_pi_state_owner(uaddr2, &q, current);
9443 + /*
9444 + * Drop the reference to the pi state which
9445 + * the requeue_pi() code acquired for us.
9446 + */
9447 + free_pi_state(q.pi_state);
9448 spin_unlock(q.lock_ptr);
9449 }
9450 } else {
9451 diff --git a/kernel/panic.c b/kernel/panic.c
9452 index 8136ad76e5fd..a4f7820f5930 100644
9453 --- a/kernel/panic.c
9454 +++ b/kernel/panic.c
9455 @@ -23,6 +23,7 @@
9456 #include <linux/sysrq.h>
9457 #include <linux/init.h>
9458 #include <linux/nmi.h>
9459 +#include <linux/console.h>
9460
9461 #define PANIC_TIMER_STEP 100
9462 #define PANIC_BLINK_SPD 18
9463 @@ -146,6 +147,17 @@ void panic(const char *fmt, ...)
9464
9465 bust_spinlocks(0);
9466
9467 + /*
9468 + * We may have ended up stopping the CPU holding the lock (in
9469 + * smp_send_stop()) while still having some valuable data in the console
9470 + * buffer. Try to acquire the lock then release it regardless of the
9471 + * result. The release will also print the buffers out. Locks debug
9472 + * should be disabled to avoid reporting bad unlock balance when
9473 + * panic() is not being callled from OOPS.
9474 + */
9475 + debug_locks_off();
9476 + console_flush_on_panic();
9477 +
9478 if (!panic_blink)
9479 panic_blink = no_blink;
9480
9481 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
9482 index bff0169e1ad8..3c1aca0c3543 100644
9483 --- a/kernel/printk/printk.c
9484 +++ b/kernel/printk/printk.c
9485 @@ -2173,13 +2173,24 @@ void console_unlock(void)
9486 static u64 seen_seq;
9487 unsigned long flags;
9488 bool wake_klogd = false;
9489 - bool retry;
9490 + bool do_cond_resched, retry;
9491
9492 if (console_suspended) {
9493 up_console_sem();
9494 return;
9495 }
9496
9497 + /*
9498 + * Console drivers are called under logbuf_lock, so
9499 + * @console_may_schedule should be cleared before; however, we may
9500 + * end up dumping a lot of lines, for example, if called from
9501 + * console registration path, and should invoke cond_resched()
9502 + * between lines if allowable. Not doing so can cause a very long
9503 + * scheduling stall on a slow console leading to RCU stall and
9504 + * softlockup warnings which exacerbate the issue with more
9505 + * messages practically incapacitating the system.
9506 + */
9507 + do_cond_resched = console_may_schedule;
9508 console_may_schedule = 0;
9509
9510 /* flush buffered message fragment immediately to console */
9511 @@ -2241,6 +2252,9 @@ skip:
9512 call_console_drivers(level, text, len);
9513 start_critical_timings();
9514 local_irq_restore(flags);
9515 +
9516 + if (do_cond_resched)
9517 + cond_resched();
9518 }
9519 console_locked = 0;
9520
9521 @@ -2308,6 +2322,25 @@ void console_unblank(void)
9522 console_unlock();
9523 }
9524
9525 +/**
9526 + * console_flush_on_panic - flush console content on panic
9527 + *
9528 + * Immediately output all pending messages no matter what.
9529 + */
9530 +void console_flush_on_panic(void)
9531 +{
9532 + /*
9533 + * If someone else is holding the console lock, trylock will fail
9534 + * and may_schedule may be set. Ignore and proceed to unlock so
9535 + * that messages are flushed out. As this can be called from any
9536 + * context and we don't want to get preempted while flushing,
9537 + * ensure may_schedule is cleared.
9538 + */
9539 + console_trylock();
9540 + console_may_schedule = 0;
9541 + console_unlock();
9542 +}
9543 +
9544 /*
9545 * Return the console tty driver structure and its associated index
9546 */
9547 diff --git a/kernel/seccomp.c b/kernel/seccomp.c
9548 index 4f44028943e6..30c682adcdeb 100644
9549 --- a/kernel/seccomp.c
9550 +++ b/kernel/seccomp.c
9551 @@ -317,24 +317,24 @@ static inline void seccomp_sync_threads(void)
9552 put_seccomp_filter(thread);
9553 smp_store_release(&thread->seccomp.filter,
9554 caller->seccomp.filter);
9555 +
9556 + /*
9557 + * Don't let an unprivileged task work around
9558 + * the no_new_privs restriction by creating
9559 + * a thread that sets it up, enters seccomp,
9560 + * then dies.
9561 + */
9562 + if (task_no_new_privs(caller))
9563 + task_set_no_new_privs(thread);
9564 +
9565 /*
9566 * Opt the other thread into seccomp if needed.
9567 * As threads are considered to be trust-realm
9568 * equivalent (see ptrace_may_access), it is safe to
9569 * allow one thread to transition the other.
9570 */
9571 - if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
9572 - /*
9573 - * Don't let an unprivileged task work around
9574 - * the no_new_privs restriction by creating
9575 - * a thread that sets it up, enters seccomp,
9576 - * then dies.
9577 - */
9578 - if (task_no_new_privs(caller))
9579 - task_set_no_new_privs(thread);
9580 -
9581 + if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
9582 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
9583 - }
9584 }
9585 }
9586
9587 diff --git a/kernel/sys.c b/kernel/sys.c
9588 index a4e372b798a5..25ae8d2e65e2 100644
9589 --- a/kernel/sys.c
9590 +++ b/kernel/sys.c
9591 @@ -1854,11 +1854,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
9592 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
9593 }
9594
9595 - if (prctl_map.exe_fd != (u32)-1)
9596 + if (prctl_map.exe_fd != (u32)-1) {
9597 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
9598 - down_read(&mm->mmap_sem);
9599 - if (error)
9600 - goto out;
9601 + if (error)
9602 + return error;
9603 + }
9604 +
9605 + down_write(&mm->mmap_sem);
9606
9607 /*
9608 * We don't validate if these members are pointing to
9609 @@ -1895,10 +1897,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
9610 if (prctl_map.auxv_size)
9611 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
9612
9613 - error = 0;
9614 -out:
9615 - up_read(&mm->mmap_sem);
9616 - return error;
9617 + up_write(&mm->mmap_sem);
9618 + return 0;
9619 }
9620 #endif /* CONFIG_CHECKPOINT_RESTORE */
9621
9622 @@ -1930,7 +1930,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
9623
9624 error = -EINVAL;
9625
9626 - down_read(&mm->mmap_sem);
9627 + down_write(&mm->mmap_sem);
9628 vma = find_vma(mm, addr);
9629
9630 switch (opt) {
9631 @@ -2033,7 +2033,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
9632
9633 error = 0;
9634 out:
9635 - up_read(&mm->mmap_sem);
9636 + up_write(&mm->mmap_sem);
9637 return error;
9638 }
9639
9640 diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
9641 index ce033c7aa2e8..9cff0ab82b63 100644
9642 --- a/kernel/time/posix-clock.c
9643 +++ b/kernel/time/posix-clock.c
9644 @@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
9645 static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
9646 {
9647 struct posix_clock *clk = get_posix_clock(fp);
9648 - int result = 0;
9649 + unsigned int result = 0;
9650
9651 if (!clk)
9652 - return -ENODEV;
9653 + return POLLERR;
9654
9655 if (clk->ops.poll)
9656 result = clk->ops.poll(clk, fp, wait);
9657 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
9658 index 414d9df94724..65dbf8aee751 100644
9659 --- a/kernel/time/timekeeping.c
9660 +++ b/kernel/time/timekeeping.c
9661 @@ -316,8 +316,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
9662
9663 delta = timekeeping_get_delta(tkr);
9664
9665 - nsec = delta * tkr->mult + tkr->xtime_nsec;
9666 - nsec >>= tkr->shift;
9667 + nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
9668
9669 /* If arch requires, add in get_arch_timeoffset() */
9670 return nsec + arch_gettimeoffset();
9671 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
9672 index dace71fe41f7..517a568f038d 100644
9673 --- a/lib/dma-debug.c
9674 +++ b/lib/dma-debug.c
9675 @@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
9676
9677 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
9678 {
9679 - if (overlap(addr, len, _text, _etext) ||
9680 + if (overlap(addr, len, _stext, _etext) ||
9681 overlap(addr, len, __start_rodata, __end_rodata))
9682 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
9683 }
9684 diff --git a/lib/dump_stack.c b/lib/dump_stack.c
9685 index 6745c6230db3..c30d07e99dba 100644
9686 --- a/lib/dump_stack.c
9687 +++ b/lib/dump_stack.c
9688 @@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
9689
9690 asmlinkage __visible void dump_stack(void)
9691 {
9692 + unsigned long flags;
9693 int was_locked;
9694 int old;
9695 int cpu;
9696 @@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
9697 * Permit this cpu to perform nested stack dumps while serialising
9698 * against other CPUs
9699 */
9700 - preempt_disable();
9701 -
9702 retry:
9703 + local_irq_save(flags);
9704 cpu = smp_processor_id();
9705 old = atomic_cmpxchg(&dump_lock, -1, cpu);
9706 if (old == -1) {
9707 @@ -43,6 +43,7 @@ retry:
9708 } else if (old == cpu) {
9709 was_locked = 1;
9710 } else {
9711 + local_irq_restore(flags);
9712 cpu_relax();
9713 goto retry;
9714 }
9715 @@ -52,7 +53,7 @@ retry:
9716 if (!was_locked)
9717 atomic_set(&dump_lock, -1);
9718
9719 - preempt_enable();
9720 + local_irq_restore(flags);
9721 }
9722 #else
9723 asmlinkage __visible void dump_stack(void)
9724 diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
9725 index 6a08ce7d6adc..acf9da449f81 100644
9726 --- a/lib/libcrc32c.c
9727 +++ b/lib/libcrc32c.c
9728 @@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini);
9729 MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
9730 MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
9731 MODULE_LICENSE("GPL");
9732 +MODULE_SOFTDEP("pre: crc32c");
9733 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
9734 index 3d2aa27b845b..8399002aa0f0 100644
9735 --- a/lib/radix-tree.c
9736 +++ b/lib/radix-tree.c
9737 @@ -1014,9 +1014,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
9738 return 0;
9739
9740 radix_tree_for_each_slot(slot, root, &iter, first_index) {
9741 - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
9742 + results[ret] = rcu_dereference_raw(*slot);
9743 if (!results[ret])
9744 continue;
9745 + if (radix_tree_is_indirect_ptr(results[ret])) {
9746 + slot = radix_tree_iter_retry(&iter);
9747 + continue;
9748 + }
9749 if (++ret == max_items)
9750 break;
9751 }
9752 @@ -1093,9 +1097,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
9753 return 0;
9754
9755 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
9756 - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
9757 + results[ret] = rcu_dereference_raw(*slot);
9758 if (!results[ret])
9759 continue;
9760 + if (radix_tree_is_indirect_ptr(results[ret])) {
9761 + slot = radix_tree_iter_retry(&iter);
9762 + continue;
9763 + }
9764 if (++ret == max_items)
9765 break;
9766 }
9767 diff --git a/lib/string_helpers.c b/lib/string_helpers.c
9768 index c98ae818eb4e..33e79b5eea77 100644
9769 --- a/lib/string_helpers.c
9770 +++ b/lib/string_helpers.c
9771 @@ -43,46 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
9772 [STRING_UNITS_10] = 1000,
9773 [STRING_UNITS_2] = 1024,
9774 };
9775 - int i, j;
9776 - u32 remainder = 0, sf_cap, exp;
9777 + static const unsigned int rounding[] = { 500, 50, 5 };
9778 + int i = 0, j;
9779 + u32 remainder = 0, sf_cap;
9780 char tmp[8];
9781 const char *unit;
9782
9783 tmp[0] = '\0';
9784 - i = 0;
9785 - if (!size)
9786 +
9787 + if (blk_size == 0)
9788 + size = 0;
9789 + if (size == 0)
9790 goto out;
9791
9792 - while (blk_size >= divisor[units]) {
9793 - remainder = do_div(blk_size, divisor[units]);
9794 + /* This is Napier's algorithm. Reduce the original block size to
9795 + *
9796 + * coefficient * divisor[units]^i
9797 + *
9798 + * we do the reduction so both coefficients are just under 32 bits so
9799 + * that multiplying them together won't overflow 64 bits and we keep
9800 + * as much precision as possible in the numbers.
9801 + *
9802 + * Note: it's safe to throw away the remainders here because all the
9803 + * precision is in the coefficients.
9804 + */
9805 + while (blk_size >> 32) {
9806 + do_div(blk_size, divisor[units]);
9807 i++;
9808 }
9809
9810 - exp = divisor[units] / (u32)blk_size;
9811 - if (size >= exp) {
9812 - remainder = do_div(size, divisor[units]);
9813 - remainder *= blk_size;
9814 + while (size >> 32) {
9815 + do_div(size, divisor[units]);
9816 i++;
9817 - } else {
9818 - remainder *= size;
9819 }
9820
9821 + /* now perform the actual multiplication keeping i as the sum of the
9822 + * two logarithms */
9823 size *= blk_size;
9824 - size += remainder / divisor[units];
9825 - remainder %= divisor[units];
9826
9827 + /* and logarithmically reduce it until it's just under the divisor */
9828 while (size >= divisor[units]) {
9829 remainder = do_div(size, divisor[units]);
9830 i++;
9831 }
9832
9833 + /* work out in j how many digits of precision we need from the
9834 + * remainder */
9835 sf_cap = size;
9836 for (j = 0; sf_cap*10 < 1000; j++)
9837 sf_cap *= 10;
9838
9839 - if (j) {
9840 + if (units == STRING_UNITS_2) {
9841 + /* express the remainder as a decimal. It's currently the
9842 + * numerator of a fraction whose denominator is
9843 + * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
9844 remainder *= 1000;
9845 - remainder /= divisor[units];
9846 + remainder >>= 10;
9847 + }
9848 +
9849 + /* add a 5 to the digit below what will be printed to ensure
9850 + * an arithmetical round up and carry it through to size */
9851 + remainder += rounding[j];
9852 + if (remainder >= 1000) {
9853 + remainder -= 1000;
9854 + size += 1;
9855 + }
9856 +
9857 + if (j) {
9858 snprintf(tmp, sizeof(tmp), ".%03u", remainder);
9859 tmp[j+1] = '\0';
9860 }
9861 diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
9862 index fcad8322ef36..b640609bcd17 100644
9863 --- a/mm/balloon_compaction.c
9864 +++ b/mm/balloon_compaction.c
9865 @@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
9866 bool dequeued_page;
9867
9868 dequeued_page = false;
9869 + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
9870 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
9871 /*
9872 * Block others from accessing the 'page' while we get around
9873 @@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
9874 continue;
9875 }
9876 #endif
9877 - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
9878 balloon_page_delete(page);
9879 __count_vm_event(BALLOON_DEFLATE);
9880 - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
9881 unlock_page(page);
9882 dequeued_page = true;
9883 break;
9884 }
9885 }
9886 + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
9887
9888 if (!dequeued_page) {
9889 /*
9890 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
9891 index 68dea90334cb..aac1c98a9bc7 100644
9892 --- a/mm/memcontrol.c
9893 +++ b/mm/memcontrol.c
9894 @@ -3824,16 +3824,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
9895 swap_buffers:
9896 /* Swap primary and spare array */
9897 thresholds->spare = thresholds->primary;
9898 - /* If all events are unregistered, free the spare array */
9899 - if (!new) {
9900 - kfree(thresholds->spare);
9901 - thresholds->spare = NULL;
9902 - }
9903
9904 rcu_assign_pointer(thresholds->primary, new);
9905
9906 /* To be sure that nobody uses thresholds */
9907 synchronize_rcu();
9908 +
9909 + /* If all events are unregistered, free the spare array */
9910 + if (!new) {
9911 + kfree(thresholds->spare);
9912 + thresholds->spare = NULL;
9913 + }
9914 unlock:
9915 mutex_unlock(&memcg->thresholds_lock);
9916 }
9917 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
9918 index 9f48145c884f..e26bc59d7dff 100644
9919 --- a/mm/memory-failure.c
9920 +++ b/mm/memory-failure.c
9921 @@ -1557,7 +1557,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
9922 * Did it turn free?
9923 */
9924 ret = __get_any_page(page, pfn, 0);
9925 - if (!PageLRU(page)) {
9926 + if (ret == 1 && !PageLRU(page)) {
9927 /* Drop page reference which is from __get_any_page() */
9928 put_page(page);
9929 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
9930 diff --git a/mm/mlock.c b/mm/mlock.c
9931 index 6fd2cf15e868..3d3ee6cad776 100644
9932 --- a/mm/mlock.c
9933 +++ b/mm/mlock.c
9934 @@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
9935 */
9936 unsigned int munlock_vma_page(struct page *page)
9937 {
9938 - unsigned int nr_pages;
9939 + int nr_pages;
9940 struct zone *zone = page_zone(page);
9941
9942 /* For try_to_munlock() and to serialize with page migration */
9943 diff --git a/mm/mmap.c b/mm/mmap.c
9944 index bb50cacc3ea5..b639fa2721d8 100644
9945 --- a/mm/mmap.c
9946 +++ b/mm/mmap.c
9947 @@ -440,12 +440,16 @@ static void validate_mm(struct mm_struct *mm)
9948 struct vm_area_struct *vma = mm->mmap;
9949
9950 while (vma) {
9951 + struct anon_vma *anon_vma = vma->anon_vma;
9952 struct anon_vma_chain *avc;
9953
9954 - vma_lock_anon_vma(vma);
9955 - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
9956 - anon_vma_interval_tree_verify(avc);
9957 - vma_unlock_anon_vma(vma);
9958 + if (anon_vma) {
9959 + anon_vma_lock_read(anon_vma);
9960 + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
9961 + anon_vma_interval_tree_verify(avc);
9962 + anon_vma_unlock_read(anon_vma);
9963 + }
9964 +
9965 highest_address = vma->vm_end;
9966 vma = vma->vm_next;
9967 i++;
9968 @@ -2141,32 +2145,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
9969 */
9970 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
9971 {
9972 - int error;
9973 + int error = 0;
9974
9975 if (!(vma->vm_flags & VM_GROWSUP))
9976 return -EFAULT;
9977
9978 - /*
9979 - * We must make sure the anon_vma is allocated
9980 - * so that the anon_vma locking is not a noop.
9981 - */
9982 + /* Guard against wrapping around to address 0. */
9983 + if (address < PAGE_ALIGN(address+4))
9984 + address = PAGE_ALIGN(address+4);
9985 + else
9986 + return -ENOMEM;
9987 +
9988 + /* We must make sure the anon_vma is allocated. */
9989 if (unlikely(anon_vma_prepare(vma)))
9990 return -ENOMEM;
9991 - vma_lock_anon_vma(vma);
9992
9993 /*
9994 * vma->vm_start/vm_end cannot change under us because the caller
9995 * is required to hold the mmap_sem in read mode. We need the
9996 * anon_vma lock to serialize against concurrent expand_stacks.
9997 - * Also guard against wrapping around to address 0.
9998 */
9999 - if (address < PAGE_ALIGN(address+4))
10000 - address = PAGE_ALIGN(address+4);
10001 - else {
10002 - vma_unlock_anon_vma(vma);
10003 - return -ENOMEM;
10004 - }
10005 - error = 0;
10006 + anon_vma_lock_write(vma->anon_vma);
10007
10008 /* Somebody else might have raced and expanded it already */
10009 if (address > vma->vm_end) {
10010 @@ -2184,7 +2183,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
10011 * updates, but we only hold a shared mmap_sem
10012 * lock here, so we need to protect against
10013 * concurrent vma expansions.
10014 - * vma_lock_anon_vma() doesn't help here, as
10015 + * anon_vma_lock_write() doesn't help here, as
10016 * we don't guarantee that all growable vmas
10017 * in a mm share the same root anon vma.
10018 * So, we reuse mm->page_table_lock to guard
10019 @@ -2204,7 +2203,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
10020 }
10021 }
10022 }
10023 - vma_unlock_anon_vma(vma);
10024 + anon_vma_unlock_write(vma->anon_vma);
10025 khugepaged_enter_vma_merge(vma, vma->vm_flags);
10026 validate_mm(vma->vm_mm);
10027 return error;
10028 @@ -2219,25 +2218,21 @@ int expand_downwards(struct vm_area_struct *vma,
10029 {
10030 int error;
10031
10032 - /*
10033 - * We must make sure the anon_vma is allocated
10034 - * so that the anon_vma locking is not a noop.
10035 - */
10036 - if (unlikely(anon_vma_prepare(vma)))
10037 - return -ENOMEM;
10038 -
10039 address &= PAGE_MASK;
10040 error = security_mmap_addr(address);
10041 if (error)
10042 return error;
10043
10044 - vma_lock_anon_vma(vma);
10045 + /* We must make sure the anon_vma is allocated. */
10046 + if (unlikely(anon_vma_prepare(vma)))
10047 + return -ENOMEM;
10048
10049 /*
10050 * vma->vm_start/vm_end cannot change under us because the caller
10051 * is required to hold the mmap_sem in read mode. We need the
10052 * anon_vma lock to serialize against concurrent expand_stacks.
10053 */
10054 + anon_vma_lock_write(vma->anon_vma);
10055
10056 /* Somebody else might have raced and expanded it already */
10057 if (address < vma->vm_start) {
10058 @@ -2255,7 +2250,7 @@ int expand_downwards(struct vm_area_struct *vma,
10059 * updates, but we only hold a shared mmap_sem
10060 * lock here, so we need to protect against
10061 * concurrent vma expansions.
10062 - * vma_lock_anon_vma() doesn't help here, as
10063 + * anon_vma_lock_write() doesn't help here, as
10064 * we don't guarantee that all growable vmas
10065 * in a mm share the same root anon vma.
10066 * So, we reuse mm->page_table_lock to guard
10067 @@ -2273,7 +2268,7 @@ int expand_downwards(struct vm_area_struct *vma,
10068 }
10069 }
10070 }
10071 - vma_unlock_anon_vma(vma);
10072 + anon_vma_unlock_write(vma->anon_vma);
10073 khugepaged_enter_vma_merge(vma, vma->vm_flags);
10074 validate_mm(vma->vm_mm);
10075 return error;
10076 diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
10077 index a8b5e749e84e..fb1ec10ce449 100644
10078 --- a/mm/zsmalloc.c
10079 +++ b/mm/zsmalloc.c
10080 @@ -306,7 +306,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
10081
10082 static void record_obj(unsigned long handle, unsigned long obj)
10083 {
10084 - *(unsigned long *)handle = obj;
10085 + /*
10086 + * lsb of @obj represents handle lock while other bits
10087 + * represent object value the handle is pointing so
10088 + * updating shouldn't do store tearing.
10089 + */
10090 + WRITE_ONCE(*(unsigned long *)handle, obj);
10091 }
10092
10093 /* zpool driver */
10094 @@ -1641,6 +1646,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
10095 free_obj = obj_malloc(d_page, class, handle);
10096 zs_object_copy(used_obj, free_obj, class);
10097 index++;
10098 + /*
10099 + * record_obj updates handle's value to free_obj and it will
10100 + * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
10101 + * breaks synchronization using pin_tag(e,g, zs_free) so
10102 + * let's keep the lock bit.
10103 + */
10104 + free_obj |= BIT(HANDLE_PIN_BIT);
10105 record_obj(handle, free_obj);
10106 unpin_tag(handle);
10107 obj_free(pool, class, used_obj);
10108 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
10109 index 967080a9f043..e51af69c61bf 100644
10110 --- a/net/ceph/messenger.c
10111 +++ b/net/ceph/messenger.c
10112 @@ -675,6 +675,8 @@ static void reset_connection(struct ceph_connection *con)
10113 }
10114 con->in_seq = 0;
10115 con->in_seq_acked = 0;
10116 +
10117 + con->out_skip = 0;
10118 }
10119
10120 /*
10121 @@ -774,6 +776,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
10122
10123 static void con_out_kvec_reset(struct ceph_connection *con)
10124 {
10125 + BUG_ON(con->out_skip);
10126 +
10127 con->out_kvec_left = 0;
10128 con->out_kvec_bytes = 0;
10129 con->out_kvec_cur = &con->out_kvec[0];
10130 @@ -782,9 +786,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
10131 static void con_out_kvec_add(struct ceph_connection *con,
10132 size_t size, void *data)
10133 {
10134 - int index;
10135 + int index = con->out_kvec_left;
10136
10137 - index = con->out_kvec_left;
10138 + BUG_ON(con->out_skip);
10139 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
10140
10141 con->out_kvec[index].iov_len = size;
10142 @@ -793,6 +797,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
10143 con->out_kvec_bytes += size;
10144 }
10145
10146 +/*
10147 + * Chop off a kvec from the end. Return residual number of bytes for
10148 + * that kvec, i.e. how many bytes would have been written if the kvec
10149 + * hadn't been nuked.
10150 + */
10151 +static int con_out_kvec_skip(struct ceph_connection *con)
10152 +{
10153 + int off = con->out_kvec_cur - con->out_kvec;
10154 + int skip = 0;
10155 +
10156 + if (con->out_kvec_bytes > 0) {
10157 + skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
10158 + BUG_ON(con->out_kvec_bytes < skip);
10159 + BUG_ON(!con->out_kvec_left);
10160 + con->out_kvec_bytes -= skip;
10161 + con->out_kvec_left--;
10162 + }
10163 +
10164 + return skip;
10165 +}
10166 +
10167 #ifdef CONFIG_BLOCK
10168
10169 /*
10170 @@ -1200,7 +1225,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
10171 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
10172
10173 dout("prepare_write_message_footer %p\n", con);
10174 - con->out_kvec_is_msg = true;
10175 con->out_kvec[v].iov_base = &m->footer;
10176 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
10177 if (con->ops->sign_message)
10178 @@ -1228,7 +1252,6 @@ static void prepare_write_message(struct ceph_connection *con)
10179 u32 crc;
10180
10181 con_out_kvec_reset(con);
10182 - con->out_kvec_is_msg = true;
10183 con->out_msg_done = false;
10184
10185 /* Sneak an ack in there first? If we can get it into the same
10186 @@ -1268,18 +1291,19 @@ static void prepare_write_message(struct ceph_connection *con)
10187
10188 /* tag + hdr + front + middle */
10189 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
10190 - con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
10191 + con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
10192 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
10193
10194 if (m->middle)
10195 con_out_kvec_add(con, m->middle->vec.iov_len,
10196 m->middle->vec.iov_base);
10197
10198 - /* fill in crc (except data pages), footer */
10199 + /* fill in hdr crc and finalize hdr */
10200 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
10201 con->out_msg->hdr.crc = cpu_to_le32(crc);
10202 - con->out_msg->footer.flags = 0;
10203 + memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
10204
10205 + /* fill in front and middle crc, footer */
10206 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
10207 con->out_msg->footer.front_crc = cpu_to_le32(crc);
10208 if (m->middle) {
10209 @@ -1291,6 +1315,7 @@ static void prepare_write_message(struct ceph_connection *con)
10210 dout("%s front_crc %u middle_crc %u\n", __func__,
10211 le32_to_cpu(con->out_msg->footer.front_crc),
10212 le32_to_cpu(con->out_msg->footer.middle_crc));
10213 + con->out_msg->footer.flags = 0;
10214
10215 /* is there a data payload? */
10216 con->out_msg->footer.data_crc = 0;
10217 @@ -1485,7 +1510,6 @@ static int write_partial_kvec(struct ceph_connection *con)
10218 }
10219 }
10220 con->out_kvec_left = 0;
10221 - con->out_kvec_is_msg = false;
10222 ret = 1;
10223 out:
10224 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
10225 @@ -1577,6 +1601,7 @@ static int write_partial_skip(struct ceph_connection *con)
10226 {
10227 int ret;
10228
10229 + dout("%s %p %d left\n", __func__, con, con->out_skip);
10230 while (con->out_skip > 0) {
10231 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
10232
10233 @@ -2493,13 +2518,13 @@ more:
10234
10235 more_kvec:
10236 /* kvec data queued? */
10237 - if (con->out_skip) {
10238 - ret = write_partial_skip(con);
10239 + if (con->out_kvec_left) {
10240 + ret = write_partial_kvec(con);
10241 if (ret <= 0)
10242 goto out;
10243 }
10244 - if (con->out_kvec_left) {
10245 - ret = write_partial_kvec(con);
10246 + if (con->out_skip) {
10247 + ret = write_partial_skip(con);
10248 if (ret <= 0)
10249 goto out;
10250 }
10251 @@ -3026,16 +3051,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
10252 ceph_msg_put(msg);
10253 }
10254 if (con->out_msg == msg) {
10255 - dout("%s %p msg %p - was sending\n", __func__, con, msg);
10256 - con->out_msg = NULL;
10257 - if (con->out_kvec_is_msg) {
10258 - con->out_skip = con->out_kvec_bytes;
10259 - con->out_kvec_is_msg = false;
10260 + BUG_ON(con->out_skip);
10261 + /* footer */
10262 + if (con->out_msg_done) {
10263 + con->out_skip += con_out_kvec_skip(con);
10264 + } else {
10265 + BUG_ON(!msg->data_length);
10266 + if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
10267 + con->out_skip += sizeof(msg->footer);
10268 + else
10269 + con->out_skip += sizeof(msg->old_footer);
10270 }
10271 + /* data, middle, front */
10272 + if (msg->data_length)
10273 + con->out_skip += msg->cursor.total_resid;
10274 + if (msg->middle)
10275 + con->out_skip += con_out_kvec_skip(con);
10276 + con->out_skip += con_out_kvec_skip(con);
10277 +
10278 + dout("%s %p msg %p - was sending, will write %d skip %d\n",
10279 + __func__, con, msg, con->out_kvec_bytes, con->out_skip);
10280 msg->hdr.seq = 0;
10281 -
10282 + con->out_msg = NULL;
10283 ceph_msg_put(msg);
10284 }
10285 +
10286 mutex_unlock(&con->mutex);
10287 }
10288
10289 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
10290 index a9c9d961f039..41adfc898a18 100644
10291 --- a/net/mac80211/ibss.c
10292 +++ b/net/mac80211/ibss.c
10293 @@ -1727,7 +1727,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
10294 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
10295 continue;
10296 sdata->u.ibss.last_scan_completed = jiffies;
10297 - ieee80211_queue_work(&local->hw, &sdata->work);
10298 }
10299 mutex_unlock(&local->iflist_mtx);
10300 }
10301 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
10302 index 817098add1d6..afcc67a157fd 100644
10303 --- a/net/mac80211/mesh.c
10304 +++ b/net/mac80211/mesh.c
10305 @@ -1299,17 +1299,6 @@ out:
10306 sdata_unlock(sdata);
10307 }
10308
10309 -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
10310 -{
10311 - struct ieee80211_sub_if_data *sdata;
10312 -
10313 - rcu_read_lock();
10314 - list_for_each_entry_rcu(sdata, &local->interfaces, list)
10315 - if (ieee80211_vif_is_mesh(&sdata->vif) &&
10316 - ieee80211_sdata_running(sdata))
10317 - ieee80211_queue_work(&local->hw, &sdata->work);
10318 - rcu_read_unlock();
10319 -}
10320
10321 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
10322 {
10323 diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
10324 index 50c8473cf9dc..472bdc73e950 100644
10325 --- a/net/mac80211/mesh.h
10326 +++ b/net/mac80211/mesh.h
10327 @@ -358,14 +358,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
10328 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
10329 }
10330
10331 -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
10332 -
10333 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
10334 void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
10335 void ieee80211s_stop(void);
10336 #else
10337 -static inline void
10338 -ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
10339 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
10340 { return false; }
10341 static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
10342 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
10343 index a93906103f8b..844825829992 100644
10344 --- a/net/mac80211/mlme.c
10345 +++ b/net/mac80211/mlme.c
10346 @@ -4002,8 +4002,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
10347 if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
10348 ieee80211_queue_work(&sdata->local->hw,
10349 &sdata->u.mgd.monitor_work);
10350 - /* and do all the other regular work too */
10351 - ieee80211_queue_work(&sdata->local->hw, &sdata->work);
10352 }
10353 }
10354
10355 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
10356 index 7bb6a9383f58..ee9351affa5b 100644
10357 --- a/net/mac80211/scan.c
10358 +++ b/net/mac80211/scan.c
10359 @@ -310,6 +310,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
10360 bool was_scanning = local->scanning;
10361 struct cfg80211_scan_request *scan_req;
10362 struct ieee80211_sub_if_data *scan_sdata;
10363 + struct ieee80211_sub_if_data *sdata;
10364
10365 lockdep_assert_held(&local->mtx);
10366
10367 @@ -369,7 +370,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
10368
10369 ieee80211_mlme_notify_scan_completed(local);
10370 ieee80211_ibss_notify_scan_completed(local);
10371 - ieee80211_mesh_notify_scan_completed(local);
10372 +
10373 + /* Requeue all the work that might have been ignored while
10374 + * the scan was in progress; if there was none this will
10375 + * just be a no-op for the particular interface.
10376 + */
10377 + list_for_each_entry_rcu(sdata, &local->interfaces, list) {
10378 + if (ieee80211_sdata_running(sdata))
10379 + ieee80211_queue_work(&sdata->local->hw, &sdata->work);
10380 + }
10381 +
10382 if (was_scanning)
10383 ieee80211_start_next_roc(local);
10384 }
10385 diff --git a/net/rfkill/core.c b/net/rfkill/core.c
10386 index fa7cd792791c..a97bb7332607 100644
10387 --- a/net/rfkill/core.c
10388 +++ b/net/rfkill/core.c
10389 @@ -1081,17 +1081,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
10390 return res;
10391 }
10392
10393 -static bool rfkill_readable(struct rfkill_data *data)
10394 -{
10395 - bool r;
10396 -
10397 - mutex_lock(&data->mtx);
10398 - r = !list_empty(&data->events);
10399 - mutex_unlock(&data->mtx);
10400 -
10401 - return r;
10402 -}
10403 -
10404 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
10405 size_t count, loff_t *pos)
10406 {
10407 @@ -1108,8 +1097,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
10408 goto out;
10409 }
10410 mutex_unlock(&data->mtx);
10411 + /* since we re-check and it just compares pointers,
10412 + * using !list_empty() without locking isn't a problem
10413 + */
10414 ret = wait_event_interruptible(data->read_wait,
10415 - rfkill_readable(data));
10416 + !list_empty(&data->events));
10417 mutex_lock(&data->mtx);
10418
10419 if (ret)
10420 diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
10421 index 23e78dcd12bf..38b64f487315 100755
10422 --- a/scripts/bloat-o-meter
10423 +++ b/scripts/bloat-o-meter
10424 @@ -58,8 +58,8 @@ for name in common:
10425 delta.sort()
10426 delta.reverse()
10427
10428 -print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
10429 - (add, remove, grow, shrink, up, -down, up-down)
10430 -print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
10431 +print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
10432 + (add, remove, grow, shrink, up, -down, up-down))
10433 +print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
10434 for d, n in delta:
10435 - if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
10436 + if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
10437 diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
10438 index b123c42e7dc8..b554d7f9e3be 100644
10439 --- a/sound/core/compress_offload.c
10440 +++ b/sound/core/compress_offload.c
10441 @@ -44,6 +44,13 @@
10442 #include <sound/compress_offload.h>
10443 #include <sound/compress_driver.h>
10444
10445 +/* struct snd_compr_codec_caps overflows the ioctl bit size for some
10446 + * architectures, so we need to disable the relevant ioctls.
10447 + */
10448 +#if _IOC_SIZEBITS < 14
10449 +#define COMPR_CODEC_CAPS_OVERFLOW
10450 +#endif
10451 +
10452 /* TODO:
10453 * - add substream support for multiple devices in case of
10454 * SND_DYNAMIC_MINORS is not used
10455 @@ -438,6 +445,7 @@ out:
10456 return retval;
10457 }
10458
10459 +#ifndef COMPR_CODEC_CAPS_OVERFLOW
10460 static int
10461 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
10462 {
10463 @@ -461,6 +469,7 @@ out:
10464 kfree(caps);
10465 return retval;
10466 }
10467 +#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
10468
10469 /* revisit this with snd_pcm_preallocate_xxx */
10470 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
10471 @@ -799,9 +808,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
10472 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
10473 retval = snd_compr_get_caps(stream, arg);
10474 break;
10475 +#ifndef COMPR_CODEC_CAPS_OVERFLOW
10476 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
10477 retval = snd_compr_get_codec_caps(stream, arg);
10478 break;
10479 +#endif
10480 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
10481 retval = snd_compr_set_params(stream, arg);
10482 break;
10483 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
10484 index 58550cc93f28..33e72c809e50 100644
10485 --- a/sound/core/oss/pcm_oss.c
10486 +++ b/sound/core/oss/pcm_oss.c
10487 @@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
10488 return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
10489 }
10490
10491 -static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
10492 +static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
10493 + bool trylock)
10494 {
10495 struct snd_pcm_runtime *runtime = substream->runtime;
10496 struct snd_pcm_hw_params *params, *sparams;
10497 @@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
10498 struct snd_mask sformat_mask;
10499 struct snd_mask mask;
10500
10501 - if (mutex_lock_interruptible(&runtime->oss.params_lock))
10502 + if (trylock) {
10503 + if (!(mutex_trylock(&runtime->oss.params_lock)))
10504 + return -EAGAIN;
10505 + } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
10506 return -EINTR;
10507 sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
10508 params = kmalloc(sizeof(*params), GFP_KERNEL);
10509 @@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
10510 if (asubstream == NULL)
10511 asubstream = substream;
10512 if (substream->runtime->oss.params) {
10513 - err = snd_pcm_oss_change_params(substream);
10514 + err = snd_pcm_oss_change_params(substream, false);
10515 if (err < 0)
10516 return err;
10517 }
10518 @@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
10519 return 0;
10520 runtime = substream->runtime;
10521 if (runtime->oss.params) {
10522 - err = snd_pcm_oss_change_params(substream);
10523 + err = snd_pcm_oss_change_params(substream, false);
10524 if (err < 0)
10525 return err;
10526 }
10527 @@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
10528 runtime = substream->runtime;
10529
10530 if (runtime->oss.params &&
10531 - (err = snd_pcm_oss_change_params(substream)) < 0)
10532 + (err = snd_pcm_oss_change_params(substream, false)) < 0)
10533 return err;
10534
10535 info.fragsize = runtime->oss.period_bytes;
10536 @@ -2800,7 +2804,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
10537 return -EIO;
10538
10539 if (runtime->oss.params) {
10540 - if ((err = snd_pcm_oss_change_params(substream)) < 0)
10541 + /* use mutex_trylock() for params_lock for avoiding a deadlock
10542 + * between mmap_sem and params_lock taken by
10543 + * copy_from/to_user() in snd_pcm_oss_write/read()
10544 + */
10545 + err = snd_pcm_oss_change_params(substream, true);
10546 + if (err < 0)
10547 return err;
10548 }
10549 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
10550 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
10551 index a7759846fbaa..795437b10082 100644
10552 --- a/sound/core/rawmidi.c
10553 +++ b/sound/core/rawmidi.c
10554 @@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
10555 unsigned long flags;
10556 long result = 0, count1;
10557 struct snd_rawmidi_runtime *runtime = substream->runtime;
10558 + unsigned long appl_ptr;
10559
10560 + spin_lock_irqsave(&runtime->lock, flags);
10561 while (count > 0 && runtime->avail) {
10562 count1 = runtime->buffer_size - runtime->appl_ptr;
10563 if (count1 > count)
10564 count1 = count;
10565 - spin_lock_irqsave(&runtime->lock, flags);
10566 if (count1 > (int)runtime->avail)
10567 count1 = runtime->avail;
10568 +
10569 + /* update runtime->appl_ptr before unlocking for userbuf */
10570 + appl_ptr = runtime->appl_ptr;
10571 + runtime->appl_ptr += count1;
10572 + runtime->appl_ptr %= runtime->buffer_size;
10573 + runtime->avail -= count1;
10574 +
10575 if (kernelbuf)
10576 - memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
10577 + memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
10578 if (userbuf) {
10579 spin_unlock_irqrestore(&runtime->lock, flags);
10580 if (copy_to_user(userbuf + result,
10581 - runtime->buffer + runtime->appl_ptr, count1)) {
10582 + runtime->buffer + appl_ptr, count1)) {
10583 return result > 0 ? result : -EFAULT;
10584 }
10585 spin_lock_irqsave(&runtime->lock, flags);
10586 }
10587 - runtime->appl_ptr += count1;
10588 - runtime->appl_ptr %= runtime->buffer_size;
10589 - runtime->avail -= count1;
10590 - spin_unlock_irqrestore(&runtime->lock, flags);
10591 result += count1;
10592 count -= count1;
10593 }
10594 + spin_unlock_irqrestore(&runtime->lock, flags);
10595 return result;
10596 }
10597
10598 @@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
10599 EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
10600
10601 /**
10602 - * snd_rawmidi_transmit_peek - copy data from the internal buffer
10603 + * __snd_rawmidi_transmit_peek - copy data from the internal buffer
10604 * @substream: the rawmidi substream
10605 * @buffer: the buffer pointer
10606 * @count: data size to transfer
10607 *
10608 - * Copies data from the internal output buffer to the given buffer.
10609 - *
10610 - * Call this in the interrupt handler when the midi output is ready,
10611 - * and call snd_rawmidi_transmit_ack() after the transmission is
10612 - * finished.
10613 - *
10614 - * Return: The size of copied data, or a negative error code on failure.
10615 + * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
10616 */
10617 -int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
10618 +int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
10619 unsigned char *buffer, int count)
10620 {
10621 - unsigned long flags;
10622 int result, count1;
10623 struct snd_rawmidi_runtime *runtime = substream->runtime;
10624
10625 @@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
10626 return -EINVAL;
10627 }
10628 result = 0;
10629 - spin_lock_irqsave(&runtime->lock, flags);
10630 if (runtime->avail >= runtime->buffer_size) {
10631 /* warning: lowlevel layer MUST trigger down the hardware */
10632 goto __skip;
10633 @@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
10634 }
10635 }
10636 __skip:
10637 + return result;
10638 +}
10639 +EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
10640 +
10641 +/**
10642 + * snd_rawmidi_transmit_peek - copy data from the internal buffer
10643 + * @substream: the rawmidi substream
10644 + * @buffer: the buffer pointer
10645 + * @count: data size to transfer
10646 + *
10647 + * Copies data from the internal output buffer to the given buffer.
10648 + *
10649 + * Call this in the interrupt handler when the midi output is ready,
10650 + * and call snd_rawmidi_transmit_ack() after the transmission is
10651 + * finished.
10652 + *
10653 + * Return: The size of copied data, or a negative error code on failure.
10654 + */
10655 +int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
10656 + unsigned char *buffer, int count)
10657 +{
10658 + struct snd_rawmidi_runtime *runtime = substream->runtime;
10659 + int result;
10660 + unsigned long flags;
10661 +
10662 + spin_lock_irqsave(&runtime->lock, flags);
10663 + result = __snd_rawmidi_transmit_peek(substream, buffer, count);
10664 spin_unlock_irqrestore(&runtime->lock, flags);
10665 return result;
10666 }
10667 EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
10668
10669 /**
10670 - * snd_rawmidi_transmit_ack - acknowledge the transmission
10671 + * __snd_rawmidi_transmit_ack - acknowledge the transmission
10672 * @substream: the rawmidi substream
10673 * @count: the transferred count
10674 *
10675 - * Advances the hardware pointer for the internal output buffer with
10676 - * the given size and updates the condition.
10677 - * Call after the transmission is finished.
10678 - *
10679 - * Return: The advanced size if successful, or a negative error code on failure.
10680 + * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
10681 */
10682 -int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
10683 +int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
10684 {
10685 - unsigned long flags;
10686 struct snd_rawmidi_runtime *runtime = substream->runtime;
10687
10688 if (runtime->buffer == NULL) {
10689 @@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
10690 "snd_rawmidi_transmit_ack: output is not active!!!\n");
10691 return -EINVAL;
10692 }
10693 - spin_lock_irqsave(&runtime->lock, flags);
10694 snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
10695 runtime->hw_ptr += count;
10696 runtime->hw_ptr %= runtime->buffer_size;
10697 @@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
10698 if (runtime->drain || snd_rawmidi_ready(substream))
10699 wake_up(&runtime->sleep);
10700 }
10701 - spin_unlock_irqrestore(&runtime->lock, flags);
10702 return count;
10703 }
10704 +EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
10705 +
10706 +/**
10707 + * snd_rawmidi_transmit_ack - acknowledge the transmission
10708 + * @substream: the rawmidi substream
10709 + * @count: the transferred count
10710 + *
10711 + * Advances the hardware pointer for the internal output buffer with
10712 + * the given size and updates the condition.
10713 + * Call after the transmission is finished.
10714 + *
10715 + * Return: The advanced size if successful, or a negative error code on failure.
10716 + */
10717 +int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
10718 +{
10719 + struct snd_rawmidi_runtime *runtime = substream->runtime;
10720 + int result;
10721 + unsigned long flags;
10722 +
10723 + spin_lock_irqsave(&runtime->lock, flags);
10724 + result = __snd_rawmidi_transmit_ack(substream, count);
10725 + spin_unlock_irqrestore(&runtime->lock, flags);
10726 + return result;
10727 +}
10728 EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
10729
10730 /**
10731 @@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
10732 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
10733 unsigned char *buffer, int count)
10734 {
10735 + struct snd_rawmidi_runtime *runtime = substream->runtime;
10736 + int result;
10737 + unsigned long flags;
10738 +
10739 + spin_lock_irqsave(&runtime->lock, flags);
10740 if (!substream->opened)
10741 - return -EBADFD;
10742 - count = snd_rawmidi_transmit_peek(substream, buffer, count);
10743 - if (count < 0)
10744 - return count;
10745 - return snd_rawmidi_transmit_ack(substream, count);
10746 + result = -EBADFD;
10747 + else {
10748 + count = __snd_rawmidi_transmit_peek(substream, buffer, count);
10749 + if (count <= 0)
10750 + result = count;
10751 + else
10752 + result = __snd_rawmidi_transmit_ack(substream, count);
10753 + }
10754 + spin_unlock_irqrestore(&runtime->lock, flags);
10755 + return result;
10756 }
10757 EXPORT_SYMBOL(snd_rawmidi_transmit);
10758
10759 @@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
10760 unsigned long flags;
10761 long count1, result;
10762 struct snd_rawmidi_runtime *runtime = substream->runtime;
10763 + unsigned long appl_ptr;
10764
10765 - if (snd_BUG_ON(!kernelbuf && !userbuf))
10766 + if (!kernelbuf && !userbuf)
10767 return -EINVAL;
10768 if (snd_BUG_ON(!runtime->buffer))
10769 return -EINVAL;
10770 @@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
10771 count1 = count;
10772 if (count1 > (long)runtime->avail)
10773 count1 = runtime->avail;
10774 +
10775 + /* update runtime->appl_ptr before unlocking for userbuf */
10776 + appl_ptr = runtime->appl_ptr;
10777 + runtime->appl_ptr += count1;
10778 + runtime->appl_ptr %= runtime->buffer_size;
10779 + runtime->avail -= count1;
10780 +
10781 if (kernelbuf)
10782 - memcpy(runtime->buffer + runtime->appl_ptr,
10783 + memcpy(runtime->buffer + appl_ptr,
10784 kernelbuf + result, count1);
10785 else if (userbuf) {
10786 spin_unlock_irqrestore(&runtime->lock, flags);
10787 - if (copy_from_user(runtime->buffer + runtime->appl_ptr,
10788 + if (copy_from_user(runtime->buffer + appl_ptr,
10789 userbuf + result, count1)) {
10790 spin_lock_irqsave(&runtime->lock, flags);
10791 result = result > 0 ? result : -EFAULT;
10792 @@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
10793 }
10794 spin_lock_irqsave(&runtime->lock, flags);
10795 }
10796 - runtime->appl_ptr += count1;
10797 - runtime->appl_ptr %= runtime->buffer_size;
10798 - runtime->avail -= count1;
10799 result += count1;
10800 count -= count1;
10801 }
10802 diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
10803 index 2de3feff70d0..dad5b1123e46 100644
10804 --- a/sound/core/seq/oss/seq_oss_init.c
10805 +++ b/sound/core/seq/oss/seq_oss_init.c
10806 @@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
10807
10808 dp->index = i;
10809 if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
10810 - pr_err("ALSA: seq_oss: too many applications\n");
10811 + pr_debug("ALSA: seq_oss: too many applications\n");
10812 rc = -ENOMEM;
10813 goto _error;
10814 }
10815 diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
10816 index 48e4fe1b68ab..f38cf91b4faf 100644
10817 --- a/sound/core/seq/oss/seq_oss_synth.c
10818 +++ b/sound/core/seq/oss/seq_oss_synth.c
10819 @@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
10820 struct seq_oss_synth *rec;
10821 struct seq_oss_synthinfo *info;
10822
10823 - if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
10824 + if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
10825 return;
10826 for (i = 0; i < dp->max_synthdev; i++) {
10827 info = &dp->synths[i];
10828 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
10829 index bd4741442909..ce6703ecfcef 100644
10830 --- a/sound/core/seq/seq_clientmgr.c
10831 +++ b/sound/core/seq/seq_clientmgr.c
10832 @@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
10833 else
10834 down_read(&grp->list_mutex);
10835 list_for_each_entry(subs, &grp->list_head, src_list) {
10836 + /* both ports ready? */
10837 + if (atomic_read(&subs->ref_count) != 2)
10838 + continue;
10839 event->dest = subs->info.dest;
10840 if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
10841 /* convert time according to flag with subscription */
10842 diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
10843 index 55170a20ae72..921fb2bd8fad 100644
10844 --- a/sound/core/seq/seq_ports.c
10845 +++ b/sound/core/seq/seq_ports.c
10846 @@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
10847 }
10848
10849 /* */
10850 -enum group_type {
10851 - SRC_LIST, DEST_LIST
10852 -};
10853 -
10854 static int subscribe_port(struct snd_seq_client *client,
10855 struct snd_seq_client_port *port,
10856 struct snd_seq_port_subs_info *grp,
10857 @@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
10858 return NULL;
10859 }
10860
10861 +static void delete_and_unsubscribe_port(struct snd_seq_client *client,
10862 + struct snd_seq_client_port *port,
10863 + struct snd_seq_subscribers *subs,
10864 + bool is_src, bool ack);
10865 +
10866 +static inline struct snd_seq_subscribers *
10867 +get_subscriber(struct list_head *p, bool is_src)
10868 +{
10869 + if (is_src)
10870 + return list_entry(p, struct snd_seq_subscribers, src_list);
10871 + else
10872 + return list_entry(p, struct snd_seq_subscribers, dest_list);
10873 +}
10874 +
10875 /*
10876 * remove all subscribers on the list
10877 * this is called from port_delete, for each src and dest list.
10878 @@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
10879 static void clear_subscriber_list(struct snd_seq_client *client,
10880 struct snd_seq_client_port *port,
10881 struct snd_seq_port_subs_info *grp,
10882 - int grptype)
10883 + int is_src)
10884 {
10885 struct list_head *p, *n;
10886
10887 @@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
10888 struct snd_seq_client *c;
10889 struct snd_seq_client_port *aport;
10890
10891 - if (grptype == SRC_LIST) {
10892 - subs = list_entry(p, struct snd_seq_subscribers, src_list);
10893 + subs = get_subscriber(p, is_src);
10894 + if (is_src)
10895 aport = get_client_port(&subs->info.dest, &c);
10896 - } else {
10897 - subs = list_entry(p, struct snd_seq_subscribers, dest_list);
10898 + else
10899 aport = get_client_port(&subs->info.sender, &c);
10900 - }
10901 - list_del(p);
10902 - unsubscribe_port(client, port, grp, &subs->info, 0);
10903 + delete_and_unsubscribe_port(client, port, subs, is_src, false);
10904 +
10905 if (!aport) {
10906 /* looks like the connected port is being deleted.
10907 * we decrease the counter, and when both ports are deleted
10908 @@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
10909 */
10910 if (atomic_dec_and_test(&subs->ref_count))
10911 kfree(subs);
10912 - } else {
10913 - /* ok we got the connected port */
10914 - struct snd_seq_port_subs_info *agrp;
10915 - agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
10916 - down_write(&agrp->list_mutex);
10917 - if (grptype == SRC_LIST)
10918 - list_del(&subs->dest_list);
10919 - else
10920 - list_del(&subs->src_list);
10921 - up_write(&agrp->list_mutex);
10922 - unsubscribe_port(c, aport, agrp, &subs->info, 1);
10923 - kfree(subs);
10924 - snd_seq_port_unlock(aport);
10925 - snd_seq_client_unlock(c);
10926 + continue;
10927 }
10928 +
10929 + /* ok we got the connected port */
10930 + delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
10931 + kfree(subs);
10932 + snd_seq_port_unlock(aport);
10933 + snd_seq_client_unlock(c);
10934 }
10935 }
10936
10937 @@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
10938 snd_use_lock_sync(&port->use_lock);
10939
10940 /* clear subscribers info */
10941 - clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
10942 - clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
10943 + clear_subscriber_list(client, port, &port->c_src, true);
10944 + clear_subscriber_list(client, port, &port->c_dest, false);
10945
10946 if (port->private_free)
10947 port->private_free(port->private_data);
10948 @@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
10949 return 0;
10950 }
10951
10952 -
10953 -/* connect two ports */
10954 -int snd_seq_port_connect(struct snd_seq_client *connector,
10955 - struct snd_seq_client *src_client,
10956 - struct snd_seq_client_port *src_port,
10957 - struct snd_seq_client *dest_client,
10958 - struct snd_seq_client_port *dest_port,
10959 - struct snd_seq_port_subscribe *info)
10960 +static int check_and_subscribe_port(struct snd_seq_client *client,
10961 + struct snd_seq_client_port *port,
10962 + struct snd_seq_subscribers *subs,
10963 + bool is_src, bool exclusive, bool ack)
10964 {
10965 - struct snd_seq_port_subs_info *src = &src_port->c_src;
10966 - struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
10967 - struct snd_seq_subscribers *subs, *s;
10968 - int err, src_called = 0;
10969 - unsigned long flags;
10970 - int exclusive;
10971 + struct snd_seq_port_subs_info *grp;
10972 + struct list_head *p;
10973 + struct snd_seq_subscribers *s;
10974 + int err;
10975
10976 - subs = kzalloc(sizeof(*subs), GFP_KERNEL);
10977 - if (! subs)
10978 - return -ENOMEM;
10979 -
10980 - subs->info = *info;
10981 - atomic_set(&subs->ref_count, 2);
10982 -
10983 - down_write(&src->list_mutex);
10984 - down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
10985 -
10986 - exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
10987 + grp = is_src ? &port->c_src : &port->c_dest;
10988 err = -EBUSY;
10989 + down_write(&grp->list_mutex);
10990 if (exclusive) {
10991 - if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
10992 + if (!list_empty(&grp->list_head))
10993 goto __error;
10994 } else {
10995 - if (src->exclusive || dest->exclusive)
10996 + if (grp->exclusive)
10997 goto __error;
10998 /* check whether already exists */
10999 - list_for_each_entry(s, &src->list_head, src_list) {
11000 - if (match_subs_info(info, &s->info))
11001 - goto __error;
11002 - }
11003 - list_for_each_entry(s, &dest->list_head, dest_list) {
11004 - if (match_subs_info(info, &s->info))
11005 + list_for_each(p, &grp->list_head) {
11006 + s = get_subscriber(p, is_src);
11007 + if (match_subs_info(&subs->info, &s->info))
11008 goto __error;
11009 }
11010 }
11011
11012 - if ((err = subscribe_port(src_client, src_port, src, info,
11013 - connector->number != src_client->number)) < 0)
11014 - goto __error;
11015 - src_called = 1;
11016 -
11017 - if ((err = subscribe_port(dest_client, dest_port, dest, info,
11018 - connector->number != dest_client->number)) < 0)
11019 + err = subscribe_port(client, port, grp, &subs->info, ack);
11020 + if (err < 0) {
11021 + grp->exclusive = 0;
11022 goto __error;
11023 + }
11024
11025 /* add to list */
11026 - write_lock_irqsave(&src->list_lock, flags);
11027 - // write_lock(&dest->list_lock); // no other lock yet
11028 - list_add_tail(&subs->src_list, &src->list_head);
11029 - list_add_tail(&subs->dest_list, &dest->list_head);
11030 - // write_unlock(&dest->list_lock); // no other lock yet
11031 - write_unlock_irqrestore(&src->list_lock, flags);
11032 + write_lock_irq(&grp->list_lock);
11033 + if (is_src)
11034 + list_add_tail(&subs->src_list, &grp->list_head);
11035 + else
11036 + list_add_tail(&subs->dest_list, &grp->list_head);
11037 + grp->exclusive = exclusive;
11038 + atomic_inc(&subs->ref_count);
11039 + write_unlock_irq(&grp->list_lock);
11040 + err = 0;
11041 +
11042 + __error:
11043 + up_write(&grp->list_mutex);
11044 + return err;
11045 +}
11046
11047 - src->exclusive = dest->exclusive = exclusive;
11048 +static void delete_and_unsubscribe_port(struct snd_seq_client *client,
11049 + struct snd_seq_client_port *port,
11050 + struct snd_seq_subscribers *subs,
11051 + bool is_src, bool ack)
11052 +{
11053 + struct snd_seq_port_subs_info *grp;
11054 +
11055 + grp = is_src ? &port->c_src : &port->c_dest;
11056 + down_write(&grp->list_mutex);
11057 + write_lock_irq(&grp->list_lock);
11058 + if (is_src)
11059 + list_del(&subs->src_list);
11060 + else
11061 + list_del(&subs->dest_list);
11062 + grp->exclusive = 0;
11063 + write_unlock_irq(&grp->list_lock);
11064 + up_write(&grp->list_mutex);
11065 +
11066 + unsubscribe_port(client, port, grp, &subs->info, ack);
11067 +}
11068 +
11069 +/* connect two ports */
11070 +int snd_seq_port_connect(struct snd_seq_client *connector,
11071 + struct snd_seq_client *src_client,
11072 + struct snd_seq_client_port *src_port,
11073 + struct snd_seq_client *dest_client,
11074 + struct snd_seq_client_port *dest_port,
11075 + struct snd_seq_port_subscribe *info)
11076 +{
11077 + struct snd_seq_subscribers *subs;
11078 + bool exclusive;
11079 + int err;
11080 +
11081 + subs = kzalloc(sizeof(*subs), GFP_KERNEL);
11082 + if (!subs)
11083 + return -ENOMEM;
11084 +
11085 + subs->info = *info;
11086 + atomic_set(&subs->ref_count, 0);
11087 + INIT_LIST_HEAD(&subs->src_list);
11088 + INIT_LIST_HEAD(&subs->dest_list);
11089 +
11090 + exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
11091 +
11092 + err = check_and_subscribe_port(src_client, src_port, subs, true,
11093 + exclusive,
11094 + connector->number != src_client->number);
11095 + if (err < 0)
11096 + goto error;
11097 + err = check_and_subscribe_port(dest_client, dest_port, subs, false,
11098 + exclusive,
11099 + connector->number != dest_client->number);
11100 + if (err < 0)
11101 + goto error_dest;
11102
11103 - up_write(&dest->list_mutex);
11104 - up_write(&src->list_mutex);
11105 return 0;
11106
11107 - __error:
11108 - if (src_called)
11109 - unsubscribe_port(src_client, src_port, src, info,
11110 - connector->number != src_client->number);
11111 + error_dest:
11112 + delete_and_unsubscribe_port(src_client, src_port, subs, true,
11113 + connector->number != src_client->number);
11114 + error:
11115 kfree(subs);
11116 - up_write(&dest->list_mutex);
11117 - up_write(&src->list_mutex);
11118 return err;
11119 }
11120
11121 -
11122 /* remove the connection */
11123 int snd_seq_port_disconnect(struct snd_seq_client *connector,
11124 struct snd_seq_client *src_client,
11125 @@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
11126 struct snd_seq_port_subscribe *info)
11127 {
11128 struct snd_seq_port_subs_info *src = &src_port->c_src;
11129 - struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
11130 struct snd_seq_subscribers *subs;
11131 int err = -ENOENT;
11132 - unsigned long flags;
11133
11134 down_write(&src->list_mutex);
11135 - down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
11136 -
11137 /* look for the connection */
11138 list_for_each_entry(subs, &src->list_head, src_list) {
11139 if (match_subs_info(info, &subs->info)) {
11140 - write_lock_irqsave(&src->list_lock, flags);
11141 - // write_lock(&dest->list_lock); // no lock yet
11142 - list_del(&subs->src_list);
11143 - list_del(&subs->dest_list);
11144 - // write_unlock(&dest->list_lock);
11145 - write_unlock_irqrestore(&src->list_lock, flags);
11146 - src->exclusive = dest->exclusive = 0;
11147 - unsubscribe_port(src_client, src_port, src, info,
11148 - connector->number != src_client->number);
11149 - unsubscribe_port(dest_client, dest_port, dest, info,
11150 - connector->number != dest_client->number);
11151 - kfree(subs);
11152 + atomic_dec(&subs->ref_count); /* mark as not ready */
11153 err = 0;
11154 break;
11155 }
11156 }
11157 -
11158 - up_write(&dest->list_mutex);
11159 up_write(&src->list_mutex);
11160 - return err;
11161 + if (err < 0)
11162 + return err;
11163 +
11164 + delete_and_unsubscribe_port(src_client, src_port, subs, true,
11165 + connector->number != src_client->number);
11166 + delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
11167 + connector->number != dest_client->number);
11168 + kfree(subs);
11169 + return 0;
11170 }
11171
11172
11173 diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
11174 index 186f1611103c..a2468f1101d1 100644
11175 --- a/sound/core/seq/seq_timer.c
11176 +++ b/sound/core/seq/seq_timer.c
11177 @@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
11178
11179 void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
11180 {
11181 + unsigned long flags;
11182 +
11183 + spin_lock_irqsave(&tmr->lock, flags);
11184 /* setup defaults */
11185 tmr->ppq = 96; /* 96 PPQ */
11186 tmr->tempo = 500000; /* 120 BPM */
11187 @@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
11188 tmr->preferred_resolution = seq_default_timer_resolution;
11189
11190 tmr->skew = tmr->skew_base = SKEW_BASE;
11191 + spin_unlock_irqrestore(&tmr->lock, flags);
11192 }
11193
11194 -void snd_seq_timer_reset(struct snd_seq_timer * tmr)
11195 +static void seq_timer_reset(struct snd_seq_timer *tmr)
11196 {
11197 - unsigned long flags;
11198 -
11199 - spin_lock_irqsave(&tmr->lock, flags);
11200 -
11201 /* reset time & songposition */
11202 tmr->cur_time.tv_sec = 0;
11203 tmr->cur_time.tv_nsec = 0;
11204
11205 tmr->tick.cur_tick = 0;
11206 tmr->tick.fraction = 0;
11207 +}
11208 +
11209 +void snd_seq_timer_reset(struct snd_seq_timer *tmr)
11210 +{
11211 + unsigned long flags;
11212
11213 + spin_lock_irqsave(&tmr->lock, flags);
11214 + seq_timer_reset(tmr);
11215 spin_unlock_irqrestore(&tmr->lock, flags);
11216 }
11217
11218 @@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
11219 tmr = q->timer;
11220 if (tmr == NULL)
11221 return;
11222 - if (!tmr->running)
11223 + spin_lock_irqsave(&tmr->lock, flags);
11224 + if (!tmr->running) {
11225 + spin_unlock_irqrestore(&tmr->lock, flags);
11226 return;
11227 + }
11228
11229 resolution *= ticks;
11230 if (tmr->skew != tmr->skew_base) {
11231 @@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
11232 (((resolution & 0xffff) * tmr->skew) >> 16);
11233 }
11234
11235 - spin_lock_irqsave(&tmr->lock, flags);
11236 -
11237 /* update timer */
11238 snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
11239
11240 @@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
11241 t->callback = snd_seq_timer_interrupt;
11242 t->callback_data = q;
11243 t->flags |= SNDRV_TIMER_IFLG_AUTO;
11244 + spin_lock_irq(&tmr->lock);
11245 tmr->timeri = t;
11246 + spin_unlock_irq(&tmr->lock);
11247 return 0;
11248 }
11249
11250 int snd_seq_timer_close(struct snd_seq_queue *q)
11251 {
11252 struct snd_seq_timer *tmr;
11253 + struct snd_timer_instance *t;
11254
11255 tmr = q->timer;
11256 if (snd_BUG_ON(!tmr))
11257 return -EINVAL;
11258 - if (tmr->timeri) {
11259 - snd_timer_stop(tmr->timeri);
11260 - snd_timer_close(tmr->timeri);
11261 - tmr->timeri = NULL;
11262 - }
11263 + spin_lock_irq(&tmr->lock);
11264 + t = tmr->timeri;
11265 + tmr->timeri = NULL;
11266 + spin_unlock_irq(&tmr->lock);
11267 + if (t)
11268 + snd_timer_close(t);
11269 return 0;
11270 }
11271
11272 -int snd_seq_timer_stop(struct snd_seq_timer * tmr)
11273 +static int seq_timer_stop(struct snd_seq_timer *tmr)
11274 {
11275 if (! tmr->timeri)
11276 return -EINVAL;
11277 @@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
11278 return 0;
11279 }
11280
11281 +int snd_seq_timer_stop(struct snd_seq_timer *tmr)
11282 +{
11283 + unsigned long flags;
11284 + int err;
11285 +
11286 + spin_lock_irqsave(&tmr->lock, flags);
11287 + err = seq_timer_stop(tmr);
11288 + spin_unlock_irqrestore(&tmr->lock, flags);
11289 + return err;
11290 +}
11291 +
11292 static int initialize_timer(struct snd_seq_timer *tmr)
11293 {
11294 struct snd_timer *t;
11295 @@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
11296 return 0;
11297 }
11298
11299 -int snd_seq_timer_start(struct snd_seq_timer * tmr)
11300 +static int seq_timer_start(struct snd_seq_timer *tmr)
11301 {
11302 if (! tmr->timeri)
11303 return -EINVAL;
11304 if (tmr->running)
11305 - snd_seq_timer_stop(tmr);
11306 - snd_seq_timer_reset(tmr);
11307 + seq_timer_stop(tmr);
11308 + seq_timer_reset(tmr);
11309 if (initialize_timer(tmr) < 0)
11310 return -EINVAL;
11311 snd_timer_start(tmr->timeri, tmr->ticks);
11312 @@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
11313 return 0;
11314 }
11315
11316 -int snd_seq_timer_continue(struct snd_seq_timer * tmr)
11317 +int snd_seq_timer_start(struct snd_seq_timer *tmr)
11318 +{
11319 + unsigned long flags;
11320 + int err;
11321 +
11322 + spin_lock_irqsave(&tmr->lock, flags);
11323 + err = seq_timer_start(tmr);
11324 + spin_unlock_irqrestore(&tmr->lock, flags);
11325 + return err;
11326 +}
11327 +
11328 +static int seq_timer_continue(struct snd_seq_timer *tmr)
11329 {
11330 if (! tmr->timeri)
11331 return -EINVAL;
11332 if (tmr->running)
11333 return -EBUSY;
11334 if (! tmr->initialized) {
11335 - snd_seq_timer_reset(tmr);
11336 + seq_timer_reset(tmr);
11337 if (initialize_timer(tmr) < 0)
11338 return -EINVAL;
11339 }
11340 @@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
11341 return 0;
11342 }
11343
11344 +int snd_seq_timer_continue(struct snd_seq_timer *tmr)
11345 +{
11346 + unsigned long flags;
11347 + int err;
11348 +
11349 + spin_lock_irqsave(&tmr->lock, flags);
11350 + err = seq_timer_continue(tmr);
11351 + spin_unlock_irqrestore(&tmr->lock, flags);
11352 + return err;
11353 +}
11354 +
11355 /* return current 'real' time. use timeofday() to get better granularity. */
11356 snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
11357 {
11358 snd_seq_real_time_t cur_time;
11359 + unsigned long flags;
11360
11361 + spin_lock_irqsave(&tmr->lock, flags);
11362 cur_time = tmr->cur_time;
11363 if (tmr->running) {
11364 struct timeval tm;
11365 @@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
11366 }
11367 snd_seq_sanity_real_time(&cur_time);
11368 }
11369 -
11370 + spin_unlock_irqrestore(&tmr->lock, flags);
11371 return cur_time;
11372 }
11373
11374 diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
11375 index 56e0f4cd3f82..81134e067184 100644
11376 --- a/sound/core/seq/seq_virmidi.c
11377 +++ b/sound/core/seq/seq_virmidi.c
11378 @@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
11379 struct snd_virmidi *vmidi = substream->runtime->private_data;
11380 int count, res;
11381 unsigned char buf[32], *pbuf;
11382 + unsigned long flags;
11383
11384 if (up) {
11385 vmidi->trigger = 1;
11386 if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
11387 !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
11388 - snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
11389 - return; /* ignored */
11390 + while (snd_rawmidi_transmit(substream, buf,
11391 + sizeof(buf)) > 0) {
11392 + /* ignored */
11393 + }
11394 + return;
11395 }
11396 if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
11397 if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
11398 return;
11399 vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
11400 }
11401 + spin_lock_irqsave(&substream->runtime->lock, flags);
11402 while (1) {
11403 - count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
11404 + count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
11405 if (count <= 0)
11406 break;
11407 pbuf = buf;
11408 @@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
11409 snd_midi_event_reset_encode(vmidi->parser);
11410 continue;
11411 }
11412 - snd_rawmidi_transmit_ack(substream, res);
11413 + __snd_rawmidi_transmit_ack(substream, res);
11414 pbuf += res;
11415 count -= res;
11416 if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
11417 if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
11418 - return;
11419 + goto out;
11420 vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
11421 }
11422 }
11423 }
11424 + out:
11425 + spin_unlock_irqrestore(&substream->runtime->lock, flags);
11426 } else {
11427 vmidi->trigger = 0;
11428 }
11429 @@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
11430 */
11431 static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
11432 {
11433 + struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
11434 struct snd_virmidi *vmidi = substream->runtime->private_data;
11435 - snd_midi_event_free(vmidi->parser);
11436 +
11437 + write_lock_irq(&rdev->filelist_lock);
11438 list_del(&vmidi->list);
11439 + write_unlock_irq(&rdev->filelist_lock);
11440 + snd_midi_event_free(vmidi->parser);
11441 substream->runtime->private_data = NULL;
11442 kfree(vmidi);
11443 return 0;
11444 diff --git a/sound/core/timer.c b/sound/core/timer.c
11445 index a419878901c4..00e8c5f4de17 100644
11446 --- a/sound/core/timer.c
11447 +++ b/sound/core/timer.c
11448 @@ -305,8 +305,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
11449 return 0;
11450 }
11451
11452 -static int _snd_timer_stop(struct snd_timer_instance *timeri,
11453 - int keep_flag, int event);
11454 +static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
11455
11456 /*
11457 * close a timer instance
11458 @@ -348,7 +347,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
11459 spin_unlock_irq(&timer->lock);
11460 mutex_lock(&register_mutex);
11461 list_del(&timeri->open_list);
11462 - if (timer && list_empty(&timer->open_list_head) &&
11463 + if (list_empty(&timer->open_list_head) &&
11464 timer->hw.close)
11465 timer->hw.close(timer);
11466 /* remove slave links */
11467 @@ -452,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
11468 unsigned long flags;
11469
11470 spin_lock_irqsave(&slave_active_lock, flags);
11471 + if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
11472 + spin_unlock_irqrestore(&slave_active_lock, flags);
11473 + return -EBUSY;
11474 + }
11475 timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
11476 if (timeri->master && timeri->timer) {
11477 spin_lock(&timeri->timer->lock);
11478 @@ -476,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
11479 return -EINVAL;
11480 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
11481 result = snd_timer_start_slave(timeri);
11482 - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
11483 + if (result >= 0)
11484 + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
11485 return result;
11486 }
11487 timer = timeri->timer;
11488 @@ -485,16 +489,22 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
11489 if (timer->card && timer->card->shutdown)
11490 return -ENODEV;
11491 spin_lock_irqsave(&timer->lock, flags);
11492 + if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
11493 + SNDRV_TIMER_IFLG_START)) {
11494 + result = -EBUSY;
11495 + goto unlock;
11496 + }
11497 timeri->ticks = timeri->cticks = ticks;
11498 timeri->pticks = 0;
11499 result = snd_timer_start1(timer, timeri, ticks);
11500 + unlock:
11501 spin_unlock_irqrestore(&timer->lock, flags);
11502 - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
11503 + if (result >= 0)
11504 + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
11505 return result;
11506 }
11507
11508 -static int _snd_timer_stop(struct snd_timer_instance * timeri,
11509 - int keep_flag, int event)
11510 +static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
11511 {
11512 struct snd_timer *timer;
11513 unsigned long flags;
11514 @@ -503,19 +513,26 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
11515 return -ENXIO;
11516
11517 if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
11518 - if (!keep_flag) {
11519 - spin_lock_irqsave(&slave_active_lock, flags);
11520 - timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
11521 - list_del_init(&timeri->ack_list);
11522 - list_del_init(&timeri->active_list);
11523 + spin_lock_irqsave(&slave_active_lock, flags);
11524 + if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
11525 spin_unlock_irqrestore(&slave_active_lock, flags);
11526 + return -EBUSY;
11527 }
11528 + timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
11529 + list_del_init(&timeri->ack_list);
11530 + list_del_init(&timeri->active_list);
11531 + spin_unlock_irqrestore(&slave_active_lock, flags);
11532 goto __end;
11533 }
11534 timer = timeri->timer;
11535 if (!timer)
11536 return -EINVAL;
11537 spin_lock_irqsave(&timer->lock, flags);
11538 + if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
11539 + SNDRV_TIMER_IFLG_START))) {
11540 + spin_unlock_irqrestore(&timer->lock, flags);
11541 + return -EBUSY;
11542 + }
11543 list_del_init(&timeri->ack_list);
11544 list_del_init(&timeri->active_list);
11545 if (timer->card && timer->card->shutdown) {
11546 @@ -534,9 +551,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
11547 }
11548 }
11549 }
11550 - if (!keep_flag)
11551 - timeri->flags &=
11552 - ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
11553 + timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
11554 spin_unlock_irqrestore(&timer->lock, flags);
11555 __end:
11556 if (event != SNDRV_TIMER_EVENT_RESOLUTION)
11557 @@ -555,7 +570,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
11558 unsigned long flags;
11559 int err;
11560
11561 - err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
11562 + err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
11563 if (err < 0)
11564 return err;
11565 timer = timeri->timer;
11566 @@ -587,10 +602,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
11567 if (timer->card && timer->card->shutdown)
11568 return -ENODEV;
11569 spin_lock_irqsave(&timer->lock, flags);
11570 + if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
11571 + result = -EBUSY;
11572 + goto unlock;
11573 + }
11574 if (!timeri->cticks)
11575 timeri->cticks = 1;
11576 timeri->pticks = 0;
11577 result = snd_timer_start1(timer, timeri, timer->sticks);
11578 + unlock:
11579 spin_unlock_irqrestore(&timer->lock, flags);
11580 snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
11581 return result;
11582 @@ -601,7 +621,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
11583 */
11584 int snd_timer_pause(struct snd_timer_instance * timeri)
11585 {
11586 - return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
11587 + return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
11588 }
11589
11590 /*
11591 @@ -724,8 +744,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
11592 ti->cticks = ti->ticks;
11593 } else {
11594 ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
11595 - if (--timer->running)
11596 - list_del_init(&ti->active_list);
11597 + --timer->running;
11598 + list_del_init(&ti->active_list);
11599 }
11600 if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
11601 (ti->flags & SNDRV_TIMER_IFLG_FAST))
11602 diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
11603 index d11baaf0f0b4..96592d5ba7bf 100644
11604 --- a/sound/drivers/dummy.c
11605 +++ b/sound/drivers/dummy.c
11606 @@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
11607 module_param(fake_buffer, bool, 0444);
11608 MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
11609 #ifdef CONFIG_HIGH_RES_TIMERS
11610 -module_param(hrtimer, bool, 0644);
11611 +module_param(hrtimer, bool, 0444);
11612 MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
11613 #endif
11614
11615 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
11616 index 98e4fc8121a1..5e547cb199f0 100644
11617 --- a/sound/firewire/bebob/bebob_stream.c
11618 +++ b/sound/firewire/bebob/bebob_stream.c
11619 @@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
11620 [6] = 0x07,
11621 };
11622
11623 -static unsigned int
11624 -get_formation_index(unsigned int rate)
11625 +static int
11626 +get_formation_index(unsigned int rate, unsigned int *index)
11627 {
11628 unsigned int i;
11629
11630 for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
11631 - if (snd_bebob_rate_table[i] == rate)
11632 - return i;
11633 + if (snd_bebob_rate_table[i] == rate) {
11634 + *index = i;
11635 + return 0;
11636 + }
11637 }
11638 return -EINVAL;
11639 }
11640 @@ -367,7 +369,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
11641 goto end;
11642
11643 /* confirm params for both streams */
11644 - index = get_formation_index(rate);
11645 + err = get_formation_index(rate, &index);
11646 + if (err < 0)
11647 + goto end;
11648 pcm_channels = bebob->tx_stream_formations[index].pcm;
11649 midi_channels = bebob->tx_stream_formations[index].midi;
11650 amdtp_stream_set_parameters(&bebob->tx_stream,
11651 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
11652 index b791529bf31c..8f50a257a80d 100644
11653 --- a/sound/pci/hda/patch_cirrus.c
11654 +++ b/sound/pci/hda/patch_cirrus.c
11655 @@ -614,6 +614,7 @@ enum {
11656 CS4208_MAC_AUTO,
11657 CS4208_MBA6,
11658 CS4208_MBP11,
11659 + CS4208_MACMINI,
11660 CS4208_GPIO0,
11661 };
11662
11663 @@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
11664 { .id = CS4208_GPIO0, .name = "gpio0" },
11665 { .id = CS4208_MBA6, .name = "mba6" },
11666 { .id = CS4208_MBP11, .name = "mbp11" },
11667 + { .id = CS4208_MACMINI, .name = "macmini" },
11668 {}
11669 };
11670
11671 @@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
11672 /* codec SSID matching */
11673 static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
11674 SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
11675 + SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
11676 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
11677 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
11678 SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
11679 @@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
11680 snd_hda_apply_fixup(codec, action);
11681 }
11682
11683 +/* MacMini 7,1 has the inverted jack detection */
11684 +static void cs4208_fixup_macmini(struct hda_codec *codec,
11685 + const struct hda_fixup *fix, int action)
11686 +{
11687 + static const struct hda_pintbl pincfgs[] = {
11688 + { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
11689 + { 0x21, 0x004be140 }, /* SPDIF: disable detect */
11690 + { }
11691 + };
11692 +
11693 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
11694 + /* HP pin (0x10) has an inverted detection */
11695 + codec->inv_jack_detect = 1;
11696 + /* disable the bogus Mic and SPDIF jack detections */
11697 + snd_hda_apply_pincfgs(codec, pincfgs);
11698 + }
11699 +}
11700 +
11701 static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
11702 struct snd_ctl_elem_value *ucontrol)
11703 {
11704 @@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
11705 .chained = true,
11706 .chain_id = CS4208_GPIO0,
11707 },
11708 + [CS4208_MACMINI] = {
11709 + .type = HDA_FIXUP_FUNC,
11710 + .v.func = cs4208_fixup_macmini,
11711 + .chained = true,
11712 + .chain_id = CS4208_GPIO0,
11713 + },
11714 [CS4208_GPIO0] = {
11715 .type = HDA_FIXUP_FUNC,
11716 .v.func = cs4208_fixup_gpio0,
11717 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
11718 index fb9a8a5787a6..37d8ababfc04 100644
11719 --- a/sound/usb/quirks.c
11720 +++ b/sound/usb/quirks.c
11721 @@ -1118,6 +1118,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
11722 switch (chip->usb_id) {
11723 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
11724 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
11725 + case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
11726 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
11727 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
11728 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
11729 @@ -1202,8 +1203,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
11730 * "Playback Design" products need a 50ms delay after setting the
11731 * USB interface.
11732 */
11733 - if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
11734 + switch (le16_to_cpu(dev->descriptor.idVendor)) {
11735 + case 0x23ba: /* Playback Design */
11736 + case 0x0644: /* TEAC Corp. */
11737 mdelay(50);
11738 + break;
11739 + }
11740 }
11741
11742 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
11743 @@ -1218,6 +1223,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
11744 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
11745 mdelay(20);
11746
11747 + /*
11748 + * "TEAC Corp." products need a 20ms delay after each
11749 + * class compliant request
11750 + */
11751 + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
11752 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
11753 + mdelay(20);
11754 +
11755 /* Marantz/Denon devices with USB DAC functionality need a delay
11756 * after each class compliant request
11757 */
11758 @@ -1266,7 +1279,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
11759 case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
11760 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
11761 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
11762 - case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
11763 + case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
11764 if (fp->altsetting == 2)
11765 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
11766 break;
11767 @@ -1275,6 +1288,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
11768 case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
11769 case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
11770 case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
11771 + case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
11772 if (fp->altsetting == 3)
11773 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
11774 break;
11775 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
11776 index ed5461f065bd..f64a2d54d467 100644
11777 --- a/tools/lib/traceevent/event-parse.c
11778 +++ b/tools/lib/traceevent/event-parse.c
11779 @@ -4841,13 +4841,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
11780 sizeof(long) != 8) {
11781 char *p;
11782
11783 - ls = 2;
11784 /* make %l into %ll */
11785 - p = strchr(format, 'l');
11786 - if (p)
11787 + if (ls == 1 && (p = strchr(format, 'l')))
11788 memmove(p+1, p, strlen(p)+1);
11789 else if (strcmp(format, "%p") == 0)
11790 strcpy(format, "0x%llx");
11791 + ls = 2;
11792 }
11793 switch (ls) {
11794 case -2:
11795 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
11796 index 0c74012575ac..83054ef6c1a1 100644
11797 --- a/tools/perf/util/session.c
11798 +++ b/tools/perf/util/session.c
11799 @@ -816,7 +816,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
11800
11801 machine = machines__find(machines, pid);
11802 if (!machine)
11803 - machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
11804 + machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
11805 return machine;
11806 }
11807