Magellan Linux

Contents of /trunk/kernel-alx/patches-3.18/0103-3.18.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2553 - (show annotations) (download)
Wed Mar 18 09:24:38 2015 UTC (9 years, 1 month ago) by niro
File size: 246965 byte(s)
-import from kernel-magellan
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 479f33204a37..f4c71d4a9ba3 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -1270,6 +1270,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 i8042.notimeout [HW] Ignore timeout condition signalled by controller
7 i8042.reset [HW] Reset the controller during init and cleanup
8 i8042.unlock [HW] Unlock (ignore) the keylock
9 + i8042.kbdreset [HW] Reset device connected to KBD port
10
11 i810= [HW,DRM]
12
13 diff --git a/Makefile b/Makefile
14 index 91cfe8d5ee06..4e9328491c1e 100644
15 --- a/Makefile
16 +++ b/Makefile
17 @@ -1,6 +1,6 @@
18 VERSION = 3
19 PATCHLEVEL = 18
20 -SUBLEVEL = 3
21 +SUBLEVEL = 4
22 EXTRAVERSION =
23 NAME = Diseased Newt
24
25 diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
26 index cfaedd9c61c9..1c169dc74ad1 100644
27 --- a/arch/arc/boot/dts/nsimosci.dts
28 +++ b/arch/arc/boot/dts/nsimosci.dts
29 @@ -20,7 +20,7 @@
30 /* this is for console on PGU */
31 /* bootargs = "console=tty0 consoleblank=0"; */
32 /* this is for console on serial */
33 - bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
34 + bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
35 };
36
37 aliases {
38 @@ -41,9 +41,9 @@
39 #interrupt-cells = <1>;
40 };
41
42 - uart0: serial@c0000000 {
43 + uart0: serial@f0000000 {
44 compatible = "ns8250";
45 - reg = <0xc0000000 0x2000>;
46 + reg = <0xf0000000 0x2000>;
47 interrupts = <11>;
48 clock-frequency = <3686400>;
49 baud = <115200>;
50 @@ -52,21 +52,21 @@
51 no-loopback-test = <1>;
52 };
53
54 - pgu0: pgu@c9000000 {
55 + pgu0: pgu@f9000000 {
56 compatible = "snps,arcpgufb";
57 - reg = <0xc9000000 0x400>;
58 + reg = <0xf9000000 0x400>;
59 };
60
61 - ps2: ps2@c9001000 {
62 + ps2: ps2@f9001000 {
63 compatible = "snps,arc_ps2";
64 - reg = <0xc9000400 0x14>;
65 + reg = <0xf9000400 0x14>;
66 interrupts = <13>;
67 interrupt-names = "arc_ps2_irq";
68 };
69
70 - eth0: ethernet@c0003000 {
71 + eth0: ethernet@f0003000 {
72 compatible = "snps,oscilan";
73 - reg = <0xc0003000 0x44>;
74 + reg = <0xf0003000 0x44>;
75 interrupts = <7>, <8>;
76 interrupt-names = "rx", "tx";
77 };
78 diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
79 index ea1f99b8eed6..45ac1d04cf42 100644
80 --- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
81 +++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
82 @@ -30,6 +30,8 @@
83 };
84
85 &sdhci2 {
86 + broken-cd;
87 + bus-width = <8>;
88 non-removable;
89 status = "okay";
90 };
91 diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
92 index 891d56b03922..b805e19ed390 100644
93 --- a/arch/arm/boot/dts/berlin2q.dtsi
94 +++ b/arch/arm/boot/dts/berlin2q.dtsi
95 @@ -83,7 +83,8 @@
96 compatible = "mrvl,pxav3-mmc";
97 reg = <0xab1000 0x200>;
98 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
99 - clocks = <&chip CLKID_SDIO1XIN>;
100 + clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
101 + clock-names = "io", "core";
102 status = "disabled";
103 };
104
105 @@ -314,36 +315,6 @@
106 interrupt-parent = <&gic>;
107 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
108 };
109 -
110 - gpio4: gpio@5000 {
111 - compatible = "snps,dw-apb-gpio";
112 - reg = <0x5000 0x400>;
113 - #address-cells = <1>;
114 - #size-cells = <0>;
115 -
116 - porte: gpio-port@4 {
117 - compatible = "snps,dw-apb-gpio-port";
118 - gpio-controller;
119 - #gpio-cells = <2>;
120 - snps,nr-gpios = <32>;
121 - reg = <0>;
122 - };
123 - };
124 -
125 - gpio5: gpio@c000 {
126 - compatible = "snps,dw-apb-gpio";
127 - reg = <0xc000 0x400>;
128 - #address-cells = <1>;
129 - #size-cells = <0>;
130 -
131 - portf: gpio-port@5 {
132 - compatible = "snps,dw-apb-gpio-port";
133 - gpio-controller;
134 - #gpio-cells = <2>;
135 - snps,nr-gpios = <32>;
136 - reg = <0>;
137 - };
138 - };
139 };
140
141 chip: chip-control@ea0000 {
142 @@ -372,6 +343,21 @@
143 ranges = <0 0xfc0000 0x10000>;
144 interrupt-parent = <&sic>;
145
146 + sm_gpio1: gpio@5000 {
147 + compatible = "snps,dw-apb-gpio";
148 + reg = <0x5000 0x400>;
149 + #address-cells = <1>;
150 + #size-cells = <0>;
151 +
152 + portf: gpio-port@5 {
153 + compatible = "snps,dw-apb-gpio-port";
154 + gpio-controller;
155 + #gpio-cells = <2>;
156 + snps,nr-gpios = <32>;
157 + reg = <0>;
158 + };
159 + };
160 +
161 i2c2: i2c@7000 {
162 compatible = "snps,designware-i2c";
163 #address-cells = <1>;
164 @@ -422,6 +408,21 @@
165 status = "disabled";
166 };
167
168 + sm_gpio0: gpio@c000 {
169 + compatible = "snps,dw-apb-gpio";
170 + reg = <0xc000 0x400>;
171 + #address-cells = <1>;
172 + #size-cells = <0>;
173 +
174 + porte: gpio-port@4 {
175 + compatible = "snps,dw-apb-gpio-port";
176 + gpio-controller;
177 + #gpio-cells = <2>;
178 + snps,nr-gpios = <32>;
179 + reg = <0>;
180 + };
181 + };
182 +
183 sysctrl: pin-controller@d000 {
184 compatible = "marvell,berlin2q-system-ctrl";
185 reg = <0xd000 0x100>;
186 diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
187 index c6ce6258434f..1bd6c79f445e 100644
188 --- a/arch/arm/boot/dts/dra7-evm.dts
189 +++ b/arch/arm/boot/dts/dra7-evm.dts
190 @@ -399,23 +399,23 @@
191 };
192 partition@5 {
193 label = "QSPI.u-boot-spl-os";
194 - reg = <0x00140000 0x00010000>;
195 + reg = <0x00140000 0x00080000>;
196 };
197 partition@6 {
198 label = "QSPI.u-boot-env";
199 - reg = <0x00150000 0x00010000>;
200 + reg = <0x001c0000 0x00010000>;
201 };
202 partition@7 {
203 label = "QSPI.u-boot-env.backup1";
204 - reg = <0x00160000 0x0010000>;
205 + reg = <0x001d0000 0x0010000>;
206 };
207 partition@8 {
208 label = "QSPI.kernel";
209 - reg = <0x00170000 0x0800000>;
210 + reg = <0x001e0000 0x0800000>;
211 };
212 partition@9 {
213 label = "QSPI.file-system";
214 - reg = <0x00970000 0x01690000>;
215 + reg = <0x009e0000 0x01620000>;
216 };
217 };
218 };
219 diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
220 index 58d3c3cf2923..d238676a9107 100644
221 --- a/arch/arm/boot/dts/imx25.dtsi
222 +++ b/arch/arm/boot/dts/imx25.dtsi
223 @@ -162,7 +162,7 @@
224 #size-cells = <0>;
225 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
226 reg = <0x43fa4000 0x4000>;
227 - clocks = <&clks 62>, <&clks 62>;
228 + clocks = <&clks 78>, <&clks 78>;
229 clock-names = "ipg", "per";
230 interrupts = <14>;
231 status = "disabled";
232 diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
233 index 56569cecaa78..649befeb2cf9 100644
234 --- a/arch/arm/boot/dts/imx51-babbage.dts
235 +++ b/arch/arm/boot/dts/imx51-babbage.dts
236 @@ -127,24 +127,12 @@
237 #address-cells = <1>;
238 #size-cells = <0>;
239
240 - reg_usbh1_vbus: regulator@0 {
241 - compatible = "regulator-fixed";
242 - pinctrl-names = "default";
243 - pinctrl-0 = <&pinctrl_usbh1reg>;
244 - reg = <0>;
245 - regulator-name = "usbh1_vbus";
246 - regulator-min-microvolt = <5000000>;
247 - regulator-max-microvolt = <5000000>;
248 - gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
249 - enable-active-high;
250 - };
251 -
252 - reg_usbotg_vbus: regulator@1 {
253 + reg_hub_reset: regulator@0 {
254 compatible = "regulator-fixed";
255 pinctrl-names = "default";
256 pinctrl-0 = <&pinctrl_usbotgreg>;
257 - reg = <1>;
258 - regulator-name = "usbotg_vbus";
259 + reg = <0>;
260 + regulator-name = "hub_reset";
261 regulator-min-microvolt = <5000000>;
262 regulator-max-microvolt = <5000000>;
263 gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
264 @@ -176,6 +164,7 @@
265 reg = <0>;
266 clocks = <&clks IMX5_CLK_DUMMY>;
267 clock-names = "main_clk";
268 + reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
269 };
270 };
271 };
272 @@ -419,7 +408,7 @@
273 &usbh1 {
274 pinctrl-names = "default";
275 pinctrl-0 = <&pinctrl_usbh1>;
276 - vbus-supply = <&reg_usbh1_vbus>;
277 + vbus-supply = <&reg_hub_reset>;
278 fsl,usbphy = <&usbh1phy>;
279 phy_type = "ulpi";
280 status = "okay";
281 @@ -429,7 +418,6 @@
282 dr_mode = "otg";
283 disable-over-current;
284 phy_type = "utmi_wide";
285 - vbus-supply = <&reg_usbotg_vbus>;
286 status = "okay";
287 };
288
289 diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
290 index b3f86670d2eb..a0e51bb68b2d 100644
291 --- a/arch/arm/configs/omap2plus_defconfig
292 +++ b/arch/arm/configs/omap2plus_defconfig
293 @@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
294 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
295 CONFIG_CPU_FREQ_GOV_USERSPACE=y
296 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
297 -CONFIG_GENERIC_CPUFREQ_CPU0=y
298 +CONFIG_CPUFREQ_DT=y
299 # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
300 CONFIG_CPU_IDLE=y
301 CONFIG_BINFMT_MISC=y
302 diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
303 index 4e79da7c5e30..2daef619d053 100644
304 --- a/arch/arm/mach-imx/clk-imx6q.c
305 +++ b/arch/arm/mach-imx/clk-imx6q.c
306 @@ -144,8 +144,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
307 post_div_table[1].div = 1;
308 post_div_table[2].div = 1;
309 video_div_table[1].div = 1;
310 - video_div_table[2].div = 1;
311 - };
312 + video_div_table[3].div = 1;
313 + }
314
315 clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
316 clk[IMX6QDL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
317 diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
318 index 17354a11356f..5a3e5a159e70 100644
319 --- a/arch/arm/mach-imx/clk-imx6sx.c
320 +++ b/arch/arm/mach-imx/clk-imx6sx.c
321 @@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
322 clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
323 clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
324
325 + clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
326 + clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
327 +
328 /* Set initial power mode */
329 imx6q_set_lpm(WAIT_CLOCKED);
330 }
331 diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
332 index 377eea849e7b..db57741c9c8a 100644
333 --- a/arch/arm/mach-omap2/common.h
334 +++ b/arch/arm/mach-omap2/common.h
335 @@ -249,6 +249,7 @@ extern void omap4_cpu_die(unsigned int cpu);
336 extern struct smp_operations omap4_smp_ops;
337
338 extern void omap5_secondary_startup(void);
339 +extern void omap5_secondary_hyp_startup(void);
340 #endif
341
342 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
343 diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
344 index 4993d4bfe9b2..6d1dffca6c7b 100644
345 --- a/arch/arm/mach-omap2/omap-headsmp.S
346 +++ b/arch/arm/mach-omap2/omap-headsmp.S
347 @@ -22,6 +22,7 @@
348
349 /* Physical address needed since MMU not enabled yet on secondary core */
350 #define AUX_CORE_BOOT0_PA 0x48281800
351 +#define API_HYP_ENTRY 0x102
352
353 /*
354 * OMAP5 specific entry point for secondary CPU to jump from ROM
355 @@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
356 b secondary_startup
357 ENDPROC(omap5_secondary_startup)
358 /*
359 + * Same as omap5_secondary_startup except we call into the ROM to
360 + * enable HYP mode first. This is called instead of
361 + * omap5_secondary_startup if the primary CPU was put into HYP mode by
362 + * the boot loader.
363 + */
364 +ENTRY(omap5_secondary_hyp_startup)
365 +wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
366 + ldr r0, [r2]
367 + mov r0, r0, lsr #5
368 + mrc p15, 0, r4, c0, c0, 5
369 + and r4, r4, #0x0f
370 + cmp r0, r4
371 + bne wait_2
372 + ldr r12, =API_HYP_ENTRY
373 + adr r0, hyp_boot
374 + smc #0
375 +hyp_boot:
376 + b secondary_startup
377 +ENDPROC(omap5_secondary_hyp_startup)
378 +/*
379 * OMAP4 specific entry point for secondary CPU to jump from ROM
380 * code. This routine also provides a holding flag into which
381 * secondary core is held until we're ready for it to initialise.
382 diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
383 index 256e84ef0f67..5305ec7341ec 100644
384 --- a/arch/arm/mach-omap2/omap-smp.c
385 +++ b/arch/arm/mach-omap2/omap-smp.c
386 @@ -22,6 +22,7 @@
387 #include <linux/irqchip/arm-gic.h>
388
389 #include <asm/smp_scu.h>
390 +#include <asm/virt.h>
391
392 #include "omap-secure.h"
393 #include "omap-wakeupgen.h"
394 @@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
395 if (omap_secure_apis_support())
396 omap_auxcoreboot_addr(virt_to_phys(startup_addr));
397 else
398 - writel_relaxed(virt_to_phys(omap5_secondary_startup),
399 - base + OMAP_AUX_CORE_BOOT_1);
400 + /*
401 + * If the boot CPU is in HYP mode then start secondary
402 + * CPU in HYP mode as well.
403 + */
404 + if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
405 + writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
406 + base + OMAP_AUX_CORE_BOOT_1);
407 + else
408 + writel_relaxed(virt_to_phys(omap5_secondary_startup),
409 + base + OMAP_AUX_CORE_BOOT_1);
410
411 }
412
413 diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
414 index 4f61148ec168..fb0cb2b817a9 100644
415 --- a/arch/arm/mach-omap2/timer.c
416 +++ b/arch/arm/mach-omap2/timer.c
417 @@ -513,11 +513,11 @@ static void __init realtime_counter_init(void)
418 rate = clk_get_rate(sys_clk);
419 /* Numerator/denumerator values refer TRM Realtime Counter section */
420 switch (rate) {
421 - case 1200000:
422 + case 12000000:
423 num = 64;
424 den = 125;
425 break;
426 - case 1300000:
427 + case 13000000:
428 num = 768;
429 den = 1625;
430 break;
431 @@ -529,11 +529,11 @@ static void __init realtime_counter_init(void)
432 num = 192;
433 den = 625;
434 break;
435 - case 2600000:
436 + case 26000000:
437 num = 384;
438 den = 1625;
439 break;
440 - case 2700000:
441 + case 27000000:
442 num = 256;
443 den = 1125;
444 break;
445 diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
446 index 328657d011d5..1ba4f6357843 100644
447 --- a/arch/arm/mach-shmobile/setup-sh73a0.c
448 +++ b/arch/arm/mach-shmobile/setup-sh73a0.c
449 @@ -598,6 +598,7 @@ static struct platform_device ipmmu_device = {
450
451 static struct renesas_intc_irqpin_config irqpin0_platform_data = {
452 .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
453 + .control_parent = true,
454 };
455
456 static struct resource irqpin0_resources[] = {
457 @@ -659,6 +660,7 @@ static struct platform_device irqpin1_device = {
458
459 static struct renesas_intc_irqpin_config irqpin2_platform_data = {
460 .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
461 + .control_parent = true,
462 };
463
464 static struct resource irqpin2_resources[] = {
465 @@ -689,6 +691,7 @@ static struct platform_device irqpin2_device = {
466
467 static struct renesas_intc_irqpin_config irqpin3_platform_data = {
468 .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
469 + .control_parent = true,
470 };
471
472 static struct resource irqpin3_resources[] = {
473 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
474 index 494297c698ca..fff81f02251c 100644
475 --- a/arch/arm64/mm/init.c
476 +++ b/arch/arm64/mm/init.c
477 @@ -333,14 +333,8 @@ static int keep_initrd;
478
479 void free_initrd_mem(unsigned long start, unsigned long end)
480 {
481 - if (!keep_initrd) {
482 - if (start == initrd_start)
483 - start = round_down(start, PAGE_SIZE);
484 - if (end == initrd_end)
485 - end = round_up(end, PAGE_SIZE);
486 -
487 + if (!keep_initrd)
488 free_reserved_area((void *)start, (void *)end, 0, "initrd");
489 - }
490 }
491
492 static int __init keepinitrd_setup(char *__unused)
493 diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
494 index d2d11b7055ba..8121aa6db2ff 100644
495 --- a/arch/parisc/include/asm/ldcw.h
496 +++ b/arch/parisc/include/asm/ldcw.h
497 @@ -33,11 +33,18 @@
498
499 #endif /*!CONFIG_PA20*/
500
501 -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
502 +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
503 + We don't explicitly expose that "*a" may be written as reload
504 + fails to find a register in class R1_REGS when "a" needs to be
505 + reloaded when generating 64-bit PIC code. Instead, we clobber
506 + memory to indicate to the compiler that the assembly code reads
507 + or writes to items other than those listed in the input and output
508 + operands. This may pessimize the code somewhat but __ldcw is
509 + usually used within code blocks surrounded by memory barriors. */
510 #define __ldcw(a) ({ \
511 unsigned __ret; \
512 - __asm__ __volatile__(__LDCW " 0(%2),%0" \
513 - : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
514 + __asm__ __volatile__(__LDCW " 0(%1),%0" \
515 + : "=r" (__ret) : "r" (a) : "memory"); \
516 __ret; \
517 })
518
519 diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
520 index feb549aa3eea..b67ea67eb71b 100644
521 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S
522 +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
523 @@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
524 b 1f; \
525 END_FTR_SECTION(0, 1); \
526 ld r12,opal_tracepoint_refcount@toc(r2); \
527 - std r12,32(r1); \
528 cmpdi r12,0; \
529 bne- LABEL; \
530 1:
531 diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
532 index 87bc86821bc9..d195a87ca542 100644
533 --- a/arch/um/Kconfig.common
534 +++ b/arch/um/Kconfig.common
535 @@ -3,6 +3,7 @@ config UML
536 default y
537 select HAVE_ARCH_AUDITSYSCALL
538 select HAVE_UID16
539 + select HAVE_FUTEX_CMPXCHG if FUTEX
540 select GENERIC_IRQ_SHOW
541 select GENERIC_CPU_DEVICES
542 select GENERIC_IO
543 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
544 index 5b016e2498f3..3db07f30636f 100644
545 --- a/arch/x86/boot/Makefile
546 +++ b/arch/x86/boot/Makefile
547 @@ -51,6 +51,7 @@ targets += cpustr.h
548 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
549 $(call if_changed,cpustr)
550 endif
551 +clean-files += cpustr.h
552
553 # ---------------------------------------------------------------------------
554
555 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
556 index e27b49d7c922..80091ae54c2b 100644
557 --- a/arch/x86/kernel/cpu/Makefile
558 +++ b/arch/x86/kernel/cpu/Makefile
559 @@ -66,3 +66,4 @@ targets += capflags.c
560 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
561 $(call if_changed,mkcapflags)
562 endif
563 +clean-files += capflags.c
564 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
565 index 67e6d19ef1be..93d2c04c6f8f 100644
566 --- a/arch/x86/kernel/kprobes/core.c
567 +++ b/arch/x86/kernel/kprobes/core.c
568 @@ -1018,6 +1018,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
569 regs->flags &= ~X86_EFLAGS_IF;
570 trace_hardirqs_off();
571 regs->ip = (unsigned long)(jp->entry);
572 +
573 + /*
574 + * jprobes use jprobe_return() which skips the normal return
575 + * path of the function, and this messes up the accounting of the
576 + * function graph tracer to get messed up.
577 + *
578 + * Pause function graph tracing while performing the jprobe function.
579 + */
580 + pause_graph_tracing();
581 return 1;
582 }
583 NOKPROBE_SYMBOL(setjmp_pre_handler);
584 @@ -1046,24 +1055,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
585 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
586 u8 *addr = (u8 *) (regs->ip - 1);
587 struct jprobe *jp = container_of(p, struct jprobe, kp);
588 + void *saved_sp = kcb->jprobe_saved_sp;
589
590 if ((addr > (u8 *) jprobe_return) &&
591 (addr < (u8 *) jprobe_return_end)) {
592 - if (stack_addr(regs) != kcb->jprobe_saved_sp) {
593 + if (stack_addr(regs) != saved_sp) {
594 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
595 printk(KERN_ERR
596 "current sp %p does not match saved sp %p\n",
597 - stack_addr(regs), kcb->jprobe_saved_sp);
598 + stack_addr(regs), saved_sp);
599 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
600 show_regs(saved_regs);
601 printk(KERN_ERR "Current registers\n");
602 show_regs(regs);
603 BUG();
604 }
605 + /* It's OK to start function graph tracing again */
606 + unpause_graph_tracing();
607 *regs = kcb->jprobe_saved_regs;
608 - memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
609 - kcb->jprobes_stack,
610 - MIN_STACK_SIZE(kcb->jprobe_saved_sp));
611 + memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
612 preempt_enable_no_resched();
613 return 1;
614 }
615 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
616 index 3e556c68351b..ed7039465f16 100644
617 --- a/arch/x86/kvm/vmx.c
618 +++ b/arch/x86/kvm/vmx.c
619 @@ -2377,12 +2377,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
620 nested_vmx_secondary_ctls_low = 0;
621 nested_vmx_secondary_ctls_high &=
622 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
623 - SECONDARY_EXEC_UNRESTRICTED_GUEST |
624 SECONDARY_EXEC_WBINVD_EXITING;
625
626 if (enable_ept) {
627 /* nested EPT: emulate EPT also to L1 */
628 - nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
629 + nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
630 + SECONDARY_EXEC_UNRESTRICTED_GUEST;
631 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
632 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
633 VMX_EPT_INVEPT_BIT;
634 diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
635 index 531d4269e2e3..bd16d6c370ec 100644
636 --- a/arch/x86/um/sys_call_table_32.c
637 +++ b/arch/x86/um/sys_call_table_32.c
638 @@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
639
640 extern asmlinkage void sys_ni_syscall(void);
641
642 -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
643 +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
644 /*
645 * Smells like a compiler bug -- it doesn't work
646 * when the & below is removed.
647 diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
648 index f2f0723070ca..95783087f0d3 100644
649 --- a/arch/x86/um/sys_call_table_64.c
650 +++ b/arch/x86/um/sys_call_table_64.c
651 @@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
652
653 extern void sys_ni_syscall(void);
654
655 -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
656 +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
657 /*
658 * Smells like a compiler bug -- it doesn't work
659 * when the & below is removed.
660 diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
661 index 531ae591783b..17d86595951c 100644
662 --- a/drivers/bus/omap_l3_noc.c
663 +++ b/drivers/bus/omap_l3_noc.c
664 @@ -222,10 +222,14 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
665 }
666
667 /* Error found so break the for loop */
668 - break;
669 + return IRQ_HANDLED;
670 }
671 }
672 - return IRQ_HANDLED;
673 +
674 + dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
675 + inttype ? "debug" : "application");
676 +
677 + return IRQ_NONE;
678 }
679
680 static const struct of_device_id l3_noc_match[] = {
681 @@ -296,11 +300,66 @@ static int omap_l3_probe(struct platform_device *pdev)
682 return ret;
683 }
684
685 +#ifdef CONFIG_PM
686 +
687 +/**
688 + * l3_resume_noirq() - resume function for l3_noc
689 + * @dev: pointer to l3_noc device structure
690 + *
691 + * We only have the resume handler only since we
692 + * have already maintained the delta register
693 + * configuration as part of configuring the system
694 + */
695 +static int l3_resume_noirq(struct device *dev)
696 +{
697 + struct omap_l3 *l3 = dev_get_drvdata(dev);
698 + int i;
699 + struct l3_flagmux_data *flag_mux;
700 + void __iomem *base, *mask_regx = NULL;
701 + u32 mask_val;
702 +
703 + for (i = 0; i < l3->num_modules; i++) {
704 + base = l3->l3_base[i];
705 + flag_mux = l3->l3_flagmux[i];
706 + if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
707 + continue;
708 +
709 + mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
710 + (L3_APPLICATION_ERROR << 3);
711 + mask_val = readl_relaxed(mask_regx);
712 + mask_val &= ~(flag_mux->mask_app_bits);
713 +
714 + writel_relaxed(mask_val, mask_regx);
715 + mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
716 + (L3_DEBUG_ERROR << 3);
717 + mask_val = readl_relaxed(mask_regx);
718 + mask_val &= ~(flag_mux->mask_dbg_bits);
719 +
720 + writel_relaxed(mask_val, mask_regx);
721 + }
722 +
723 + /* Dummy read to force OCP barrier */
724 + if (mask_regx)
725 + (void)readl(mask_regx);
726 +
727 + return 0;
728 +}
729 +
730 +static const struct dev_pm_ops l3_dev_pm_ops = {
731 + .resume_noirq = l3_resume_noirq,
732 +};
733 +
734 +#define L3_DEV_PM_OPS (&l3_dev_pm_ops)
735 +#else
736 +#define L3_DEV_PM_OPS NULL
737 +#endif
738 +
739 static struct platform_driver omap_l3_driver = {
740 .probe = omap_l3_probe,
741 .driver = {
742 .name = "omap_l3_noc",
743 .owner = THIS_MODULE,
744 + .pm = L3_DEV_PM_OPS,
745 .of_match_table = of_match_ptr(l3_noc_match),
746 },
747 };
748 diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
749 index 32f7c1b36204..2f13bd5246b5 100644
750 --- a/drivers/clk/at91/clk-slow.c
751 +++ b/drivers/clk/at91/clk-slow.c
752 @@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
753
754 #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
755
756 +static struct clk *slow_clk;
757
758 static int clk_slow_osc_prepare(struct clk_hw *hw)
759 {
760 @@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
761 clk = clk_register(NULL, &slowck->hw);
762 if (IS_ERR(clk))
763 kfree(slowck);
764 + else
765 + slow_clk = clk;
766
767 return clk;
768 }
769 @@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
770 clk = clk_register(NULL, &slowck->hw);
771 if (IS_ERR(clk))
772 kfree(slowck);
773 + else
774 + slow_clk = clk;
775
776 return clk;
777 }
778 @@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
779
780 of_clk_add_provider(np, of_clk_src_simple_get, clk);
781 }
782 +
783 +/*
784 + * FIXME: All slow clk users are not properly claiming it (get + prepare +
785 + * enable) before using it.
786 + * If all users properly claiming this clock decide that they don't need it
787 + * anymore (or are removed), it is disabled while faulty users are still
788 + * requiring it, and the system hangs.
789 + * Prevent this clock from being disabled until all users are properly
790 + * requesting it.
791 + * Once this is done we should remove this function and the slow_clk variable.
792 + */
793 +static int __init of_at91_clk_slow_retain(void)
794 +{
795 + if (!slow_clk)
796 + return 0;
797 +
798 + __clk_get(slow_clk);
799 + clk_prepare_enable(slow_clk);
800 +
801 + return 0;
802 +}
803 +arch_initcall(of_at91_clk_slow_retain);
804 diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
805 index 21784e4eb3f0..440ef81ab15c 100644
806 --- a/drivers/clk/berlin/bg2q.c
807 +++ b/drivers/clk/berlin/bg2q.c
808 @@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
809 { "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
810 { "sdio", "perif", 16, CLK_IGNORE_UNUSED },
811 { "nfc", "perif", 18 },
812 - { "smemc", "perif", 19 },
813 { "pcie", "perif", 22 },
814 };
815
816 diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
817 index 8e58edfeeb37..8b284be4efa4 100644
818 --- a/drivers/clk/clk-ppc-corenet.c
819 +++ b/drivers/clk/clk-ppc-corenet.c
820 @@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
821 {}
822 };
823
824 -static struct platform_driver ppc_corenet_clk_driver __initdata = {
825 +static struct platform_driver ppc_corenet_clk_driver = {
826 .driver = {
827 .name = "ppc_corenet_clock",
828 .owner = THIS_MODULE,
829 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
830 index 4896ae9e23da..26bed0889e97 100644
831 --- a/drivers/clk/clk.c
832 +++ b/drivers/clk/clk.c
833 @@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
834 .release = single_release,
835 };
836
837 -/* caller must hold prepare_lock */
838 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
839 {
840 struct dentry *d;
841 @@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
842 else
843 clk->rate = 0;
844
845 - clk_debug_register(clk);
846 /*
847 * walk the list of orphan clocks and reparent any that are children of
848 * this clock
849 @@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
850 out:
851 clk_prepare_unlock();
852
853 + if (!ret)
854 + clk_debug_register(clk);
855 +
856 return ret;
857 }
858
859 @@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
860
861 void __clk_put(struct clk *clk)
862 {
863 + struct module *owner;
864 +
865 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
866 return;
867
868 clk_prepare_lock();
869 + owner = clk->owner;
870 kref_put(&clk->ref, __clk_release);
871 clk_prepare_unlock();
872
873 - module_put(clk->owner);
874 + module_put(owner);
875 }
876
877 /*** clk rate change notifiers ***/
878 diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
879 index beed49c79126..8088b384ce6e 100644
880 --- a/drivers/clk/rockchip/clk-rk3188.c
881 +++ b/drivers/clk/rockchip/clk-rk3188.c
882 @@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
883 PNAME(mux_mac_p) = { "gpll", "dpll" };
884 PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
885
886 +static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
887 + [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
888 + RK2928_MODE_CON, 0, 5, rk3188_pll_rates),
889 + [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
890 + RK2928_MODE_CON, 4, 4, NULL),
891 + [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
892 + RK2928_MODE_CON, 8, 6, rk3188_pll_rates),
893 + [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
894 + RK2928_MODE_CON, 12, 7, rk3188_pll_rates),
895 +};
896 +
897 static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
898 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
899 RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
900 @@ -742,8 +753,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
901 static void __init rk3066a_clk_init(struct device_node *np)
902 {
903 rk3188_common_clk_init(np);
904 - rockchip_clk_register_plls(rk3188_pll_clks,
905 - ARRAY_SIZE(rk3188_pll_clks),
906 + rockchip_clk_register_plls(rk3066_pll_clks,
907 + ARRAY_SIZE(rk3066_pll_clks),
908 RK3066_GRF_SOC_STATUS);
909 rockchip_clk_register_branches(rk3066a_clk_branches,
910 ARRAY_SIZE(rk3066a_clk_branches));
911 diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
912 index 23278291da44..6dbc64759a58 100644
913 --- a/drivers/clk/rockchip/clk-rk3288.c
914 +++ b/drivers/clk/rockchip/clk-rk3288.c
915 @@ -142,20 +142,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
916 }
917
918 static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
919 - RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
920 - RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
921 - RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
922 - RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
923 - RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
924 - RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
925 - RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
926 - RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
927 - RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
928 - RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
929 - RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
930 - RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
931 - RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
932 - RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
933 + RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
934 + RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
935 + RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
936 + RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
937 + RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
938 + RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
939 + RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
940 + RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
941 + RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
942 + RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
943 + RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
944 + RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
945 + RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
946 + RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
947 };
948
949 static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
950 diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
951 index 13eae14c2cc2..b50469faf70c 100644
952 --- a/drivers/clk/samsung/clk-exynos-audss.c
953 +++ b/drivers/clk/samsung/clk-exynos-audss.c
954 @@ -210,6 +210,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
955 {
956 int i;
957
958 +#ifdef CONFIG_PM_SLEEP
959 + unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
960 +#endif
961 +
962 of_clk_del_provider(pdev->dev.of_node);
963
964 for (i = 0; i < clk_data.clk_num; i++) {
965 diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
966 index bbfe7f508502..a7cf0c193ba8 100644
967 --- a/drivers/gpio/gpio-crystalcove.c
968 +++ b/drivers/gpio/gpio-crystalcove.c
969 @@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
970 for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
971 if (pending & BIT(gpio)) {
972 virq = irq_find_mapping(cg->chip.irqdomain, gpio);
973 - generic_handle_irq(virq);
974 + handle_nested_irq(virq);
975 }
976 }
977
978 diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
979 index 604dbe60bdee..08261f2b3a82 100644
980 --- a/drivers/gpio/gpiolib-of.c
981 +++ b/drivers/gpio/gpiolib-of.c
982 @@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
983 return false;
984
985 ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
986 - if (ret < 0)
987 - return false;
988 + if (ret < 0) {
989 + /* We've found the gpio chip, but the translation failed.
990 + * Return true to stop looking and return the translation
991 + * error via out_gpio
992 + */
993 + gg_data->out_gpio = ERR_PTR(ret);
994 + return true;
995 + }
996
997 gg_data->out_gpio = gpiochip_get_desc(gc, ret);
998 return true;
999 diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
1000 index 5f2150b619a7..0d21396f961d 100644
1001 --- a/drivers/gpio/gpiolib-sysfs.c
1002 +++ b/drivers/gpio/gpiolib-sysfs.c
1003 @@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
1004 return status;
1005 }
1006
1007 -static const DEVICE_ATTR(value, 0644,
1008 +static DEVICE_ATTR(value, 0644,
1009 gpio_value_show, gpio_value_store);
1010
1011 static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
1012 @@ -353,18 +353,15 @@ static ssize_t gpio_active_low_store(struct device *dev,
1013 return status ? : size;
1014 }
1015
1016 -static const DEVICE_ATTR(active_low, 0644,
1017 +static DEVICE_ATTR(active_low, 0644,
1018 gpio_active_low_show, gpio_active_low_store);
1019
1020 -static const struct attribute *gpio_attrs[] = {
1021 +static struct attribute *gpio_attrs[] = {
1022 &dev_attr_value.attr,
1023 &dev_attr_active_low.attr,
1024 NULL,
1025 };
1026 -
1027 -static const struct attribute_group gpio_attr_group = {
1028 - .attrs = (struct attribute **) gpio_attrs,
1029 -};
1030 +ATTRIBUTE_GROUPS(gpio);
1031
1032 /*
1033 * /sys/class/gpio/gpiochipN/
1034 @@ -400,16 +397,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
1035 }
1036 static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
1037
1038 -static const struct attribute *gpiochip_attrs[] = {
1039 +static struct attribute *gpiochip_attrs[] = {
1040 &dev_attr_base.attr,
1041 &dev_attr_label.attr,
1042 &dev_attr_ngpio.attr,
1043 NULL,
1044 };
1045 -
1046 -static const struct attribute_group gpiochip_attr_group = {
1047 - .attrs = (struct attribute **) gpiochip_attrs,
1048 -};
1049 +ATTRIBUTE_GROUPS(gpiochip);
1050
1051 /*
1052 * /sys/class/gpio/export ... write-only
1053 @@ -564,18 +558,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1054 if (desc->chip->names && desc->chip->names[offset])
1055 ioname = desc->chip->names[offset];
1056
1057 - dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
1058 - desc, ioname ? ioname : "gpio%u",
1059 - desc_to_gpio(desc));
1060 + dev = device_create_with_groups(&gpio_class, desc->chip->dev,
1061 + MKDEV(0, 0), desc, gpio_groups,
1062 + ioname ? ioname : "gpio%u",
1063 + desc_to_gpio(desc));
1064 if (IS_ERR(dev)) {
1065 status = PTR_ERR(dev);
1066 goto fail_unlock;
1067 }
1068
1069 - status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
1070 - if (status)
1071 - goto fail_unregister_device;
1072 -
1073 if (direction_may_change) {
1074 status = device_create_file(dev, &dev_attr_direction);
1075 if (status)
1076 @@ -586,13 +577,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1077 !test_bit(FLAG_IS_OUT, &desc->flags))) {
1078 status = device_create_file(dev, &dev_attr_edge);
1079 if (status)
1080 - goto fail_unregister_device;
1081 + goto fail_remove_attr_direction;
1082 }
1083
1084 set_bit(FLAG_EXPORT, &desc->flags);
1085 mutex_unlock(&sysfs_lock);
1086 return 0;
1087
1088 +fail_remove_attr_direction:
1089 + device_remove_file(dev, &dev_attr_direction);
1090 fail_unregister_device:
1091 device_unregister(dev);
1092 fail_unlock:
1093 @@ -726,6 +719,8 @@ void gpiod_unexport(struct gpio_desc *desc)
1094 mutex_unlock(&sysfs_lock);
1095
1096 if (dev) {
1097 + device_remove_file(dev, &dev_attr_edge);
1098 + device_remove_file(dev, &dev_attr_direction);
1099 device_unregister(dev);
1100 put_device(dev);
1101 }
1102 @@ -750,13 +745,13 @@ int gpiochip_export(struct gpio_chip *chip)
1103
1104 /* use chip->base for the ID; it's already known to be unique */
1105 mutex_lock(&sysfs_lock);
1106 - dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
1107 - "gpiochip%d", chip->base);
1108 - if (!IS_ERR(dev)) {
1109 - status = sysfs_create_group(&dev->kobj,
1110 - &gpiochip_attr_group);
1111 - } else
1112 + dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
1113 + chip, gpiochip_groups,
1114 + "gpiochip%d", chip->base);
1115 + if (IS_ERR(dev))
1116 status = PTR_ERR(dev);
1117 + else
1118 + status = 0;
1119 chip->exported = (status == 0);
1120 mutex_unlock(&sysfs_lock);
1121
1122 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1123 index e8e98ca25ec7..c81bda0ec2cf 100644
1124 --- a/drivers/gpio/gpiolib.c
1125 +++ b/drivers/gpio/gpiolib.c
1126 @@ -268,6 +268,9 @@ int gpiochip_add(struct gpio_chip *chip)
1127
1128 spin_unlock_irqrestore(&gpio_lock, flags);
1129
1130 + if (status)
1131 + goto fail;
1132 +
1133 #ifdef CONFIG_PINCTRL
1134 INIT_LIST_HEAD(&chip->pin_ranges);
1135 #endif
1136 @@ -275,12 +278,12 @@ int gpiochip_add(struct gpio_chip *chip)
1137 of_gpiochip_add(chip);
1138 acpi_gpiochip_add(chip);
1139
1140 - if (status)
1141 - goto fail;
1142 -
1143 status = gpiochip_export(chip);
1144 - if (status)
1145 + if (status) {
1146 + acpi_gpiochip_remove(chip);
1147 + of_gpiochip_remove(chip);
1148 goto fail;
1149 + }
1150
1151 pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
1152 chip->base, chip->base + chip->ngpio - 1,
1153 @@ -313,14 +316,13 @@ void gpiochip_remove(struct gpio_chip *chip)
1154 unsigned long flags;
1155 unsigned id;
1156
1157 - acpi_gpiochip_remove(chip);
1158 -
1159 - spin_lock_irqsave(&gpio_lock, flags);
1160 -
1161 gpiochip_irqchip_remove(chip);
1162 +
1163 + acpi_gpiochip_remove(chip);
1164 gpiochip_remove_pin_ranges(chip);
1165 of_gpiochip_remove(chip);
1166
1167 + spin_lock_irqsave(&gpio_lock, flags);
1168 for (id = 0; id < chip->ngpio; id++) {
1169 if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
1170 dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
1171 diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
1172 index 08e33b8b13a4..9f59c9027ebe 100644
1173 --- a/drivers/gpu/drm/drm_dp_helper.c
1174 +++ b/drivers/gpu/drm/drm_dp_helper.c
1175 @@ -378,10 +378,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
1176
1177 /*
1178 * The specification doesn't give any recommendation on how often to
1179 - * retry native transactions, so retry 7 times like for I2C-over-AUX
1180 - * transactions.
1181 + * retry native transactions. We used to retry 7 times like for
1182 + * aux i2c transactions but real world devices this wasn't
1183 + * sufficient, bump to 32 which makes Dell 4k monitors happier.
1184 */
1185 - for (retry = 0; retry < 7; retry++) {
1186 + for (retry = 0; retry < 32; retry++) {
1187
1188 mutex_lock(&aux->hw_mutex);
1189 err = aux->transfer(aux, &msg);
1190 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1191 index 070f913d2dba..f50d884b81cf 100644
1192 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1193 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1194 @@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
1195
1196 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1197 {
1198 + struct drm_dp_mst_branch *mstb;
1199 +
1200 switch (old_pdt) {
1201 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1202 case DP_PEER_DEVICE_SST_SINK:
1203 @@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1204 drm_dp_mst_unregister_i2c_bus(&port->aux);
1205 break;
1206 case DP_PEER_DEVICE_MST_BRANCHING:
1207 - drm_dp_put_mst_branch_device(port->mstb);
1208 + mstb = port->mstb;
1209 port->mstb = NULL;
1210 + drm_dp_put_mst_branch_device(mstb);
1211 break;
1212 }
1213 }
1214 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1215 index 0c0c39bac23d..ef757f712a3d 100644
1216 --- a/drivers/gpu/drm/drm_fb_helper.c
1217 +++ b/drivers/gpu/drm/drm_fb_helper.c
1218 @@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
1219 {
1220 struct drm_device *dev = fb_helper->dev;
1221 bool ret;
1222 + bool do_delayed = false;
1223 +
1224 drm_modeset_lock_all(dev);
1225 ret = restore_fbdev_mode(fb_helper);
1226 +
1227 + do_delayed = fb_helper->delayed_hotplug;
1228 + if (do_delayed)
1229 + fb_helper->delayed_hotplug = false;
1230 drm_modeset_unlock_all(dev);
1231 +
1232 + if (do_delayed)
1233 + drm_fb_helper_hotplug_event(fb_helper);
1234 return ret;
1235 }
1236 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
1237 @@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
1238
1239 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
1240
1241 - if (fb_helper->delayed_hotplug) {
1242 - fb_helper->delayed_hotplug = false;
1243 - drm_fb_helper_hotplug_event(fb_helper);
1244 - }
1245 return 0;
1246 }
1247 EXPORT_SYMBOL(drm_fb_helper_set_par);
1248 diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
1249 index 5ef03c216a27..c4edea907f8f 100644
1250 --- a/drivers/gpu/drm/drm_irq.c
1251 +++ b/drivers/gpu/drm/drm_irq.c
1252 @@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
1253 {
1254 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1255
1256 - BUG_ON(atomic_read(&vblank->refcount) == 0);
1257 + if (WARN_ON(atomic_read(&vblank->refcount) == 0))
1258 + return;
1259
1260 if (WARN_ON(crtc >= dev->num_crtcs))
1261 return;
1262 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1263 index 2318b4c7a8f8..925697320949 100644
1264 --- a/drivers/gpu/drm/i915/i915_drv.c
1265 +++ b/drivers/gpu/drm/i915/i915_drv.c
1266 @@ -692,11 +692,12 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
1267 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1268 }
1269
1270 - intel_dp_mst_resume(dev);
1271 drm_modeset_lock_all(dev);
1272 intel_modeset_setup_hw_state(dev, true);
1273 drm_modeset_unlock_all(dev);
1274
1275 + intel_dp_mst_resume(dev);
1276 +
1277 /*
1278 * ... but also need to make sure that hotplug processing
1279 * doesn't cause havoc. Like in the driver load code we don't
1280 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1281 index 16a6f6d187a1..346aee828dc3 100644
1282 --- a/drivers/gpu/drm/i915/i915_drv.h
1283 +++ b/drivers/gpu/drm/i915/i915_drv.h
1284 @@ -877,6 +877,7 @@ struct i915_suspend_saved_registers {
1285 u32 savePIPEB_LINK_N1;
1286 u32 saveMCHBAR_RENDER_STANDBY;
1287 u32 savePCH_PORT_HOTPLUG;
1288 + u16 saveGCDGMBUS;
1289 };
1290
1291 struct vlv_s0ix_state {
1292 @@ -1665,8 +1666,6 @@ struct drm_i915_private {
1293 */
1294 struct workqueue_struct *dp_wq;
1295
1296 - uint32_t bios_vgacntr;
1297 -
1298 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1299 * here! */
1300 struct i915_dri1_state dri1;
1301 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1302 index 28f91df2604d..2de5f5f4ba45 100644
1303 --- a/drivers/gpu/drm/i915/i915_gem.c
1304 +++ b/drivers/gpu/drm/i915/i915_gem.c
1305 @@ -4193,7 +4193,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1306 struct drm_i915_gem_object *obj;
1307 int ret;
1308
1309 - if (INTEL_INFO(dev)->gen >= 6)
1310 + if (drm_core_check_feature(dev, DRIVER_MODESET))
1311 return -ENODEV;
1312
1313 ret = i915_mutex_lock_interruptible(dev);
1314 @@ -4249,6 +4249,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1315 struct drm_i915_gem_object *obj;
1316 int ret;
1317
1318 + if (drm_core_check_feature(dev, DRIVER_MODESET))
1319 + return -ENODEV;
1320 +
1321 ret = i915_mutex_lock_interruptible(dev);
1322 if (ret)
1323 return ret;
1324 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
1325 index a5221d8f1580..c12f087d7a14 100644
1326 --- a/drivers/gpu/drm/i915/i915_gem_context.c
1327 +++ b/drivers/gpu/drm/i915/i915_gem_context.c
1328 @@ -468,7 +468,12 @@ mi_set_context(struct intel_engine_cs *ring,
1329 u32 hw_flags)
1330 {
1331 u32 flags = hw_flags | MI_MM_SPACE_GTT;
1332 - int ret;
1333 + const int num_rings =
1334 + /* Use an extended w/a on ivb+ if signalling from other rings */
1335 + i915_semaphore_is_enabled(ring->dev) ?
1336 + hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
1337 + 0;
1338 + int len, i, ret;
1339
1340 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
1341 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
1342 @@ -485,15 +490,31 @@ mi_set_context(struct intel_engine_cs *ring,
1343 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
1344 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
1345
1346 - ret = intel_ring_begin(ring, 6);
1347 +
1348 + len = 4;
1349 + if (INTEL_INFO(ring->dev)->gen >= 7)
1350 + len += 2 + (num_rings ? 4*num_rings + 2 : 0);
1351 +
1352 + ret = intel_ring_begin(ring, len);
1353 if (ret)
1354 return ret;
1355
1356 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1357 - if (INTEL_INFO(ring->dev)->gen >= 7)
1358 + if (INTEL_INFO(ring->dev)->gen >= 7) {
1359 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1360 - else
1361 - intel_ring_emit(ring, MI_NOOP);
1362 + if (num_rings) {
1363 + struct intel_engine_cs *signaller;
1364 +
1365 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
1366 + for_each_ring(signaller, to_i915(ring->dev), i) {
1367 + if (signaller == ring)
1368 + continue;
1369 +
1370 + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
1371 + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1372 + }
1373 + }
1374 + }
1375
1376 intel_ring_emit(ring, MI_NOOP);
1377 intel_ring_emit(ring, MI_SET_CONTEXT);
1378 @@ -505,10 +526,21 @@ mi_set_context(struct intel_engine_cs *ring,
1379 */
1380 intel_ring_emit(ring, MI_NOOP);
1381
1382 - if (INTEL_INFO(ring->dev)->gen >= 7)
1383 + if (INTEL_INFO(ring->dev)->gen >= 7) {
1384 + if (num_rings) {
1385 + struct intel_engine_cs *signaller;
1386 +
1387 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
1388 + for_each_ring(signaller, to_i915(ring->dev), i) {
1389 + if (signaller == ring)
1390 + continue;
1391 +
1392 + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
1393 + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
1394 + }
1395 + }
1396 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1397 - else
1398 - intel_ring_emit(ring, MI_NOOP);
1399 + }
1400
1401 intel_ring_advance(ring);
1402
1403 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
1404 index 85fda6b803e4..0ee76b25204c 100644
1405 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
1406 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
1407 @@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
1408 r = devm_request_mem_region(dev->dev, base + 1,
1409 dev_priv->gtt.stolen_size - 1,
1410 "Graphics Stolen Memory");
1411 - if (r == NULL) {
1412 + /*
1413 + * GEN3 firmware likes to smash pci bridges into the stolen
1414 + * range. Apparently this works.
1415 + */
1416 + if (r == NULL && !IS_GEN3(dev)) {
1417 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
1418 base, base + (uint32_t)dev_priv->gtt.stolen_size);
1419 base = 0;
1420 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1421 index f66392b6e287..9ba1177200b2 100644
1422 --- a/drivers/gpu/drm/i915/i915_irq.c
1423 +++ b/drivers/gpu/drm/i915/i915_irq.c
1424 @@ -4022,8 +4022,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
1425 if ((iir & flip_pending) == 0)
1426 goto check_page_flip;
1427
1428 - intel_prepare_page_flip(dev, plane);
1429 -
1430 /* We detect FlipDone by looking for the change in PendingFlip from '1'
1431 * to '0' on the following vblank, i.e. IIR has the Pendingflip
1432 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
1433 @@ -4033,6 +4031,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
1434 if (I915_READ16(ISR) & flip_pending)
1435 goto check_page_flip;
1436
1437 + intel_prepare_page_flip(dev, plane);
1438 intel_finish_page_flip(dev, pipe);
1439 return true;
1440
1441 @@ -4210,8 +4209,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
1442 if ((iir & flip_pending) == 0)
1443 goto check_page_flip;
1444
1445 - intel_prepare_page_flip(dev, plane);
1446 -
1447 /* We detect FlipDone by looking for the change in PendingFlip from '1'
1448 * to '0' on the following vblank, i.e. IIR has the Pendingflip
1449 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
1450 @@ -4221,6 +4218,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
1451 if (I915_READ(ISR) & flip_pending)
1452 goto check_page_flip;
1453
1454 + intel_prepare_page_flip(dev, plane);
1455 intel_finish_page_flip(dev, pipe);
1456 return true;
1457
1458 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1459 index c01e5f31430e..3f1e54bfcddb 100644
1460 --- a/drivers/gpu/drm/i915/i915_reg.h
1461 +++ b/drivers/gpu/drm/i915/i915_reg.h
1462 @@ -74,6 +74,7 @@
1463 #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
1464 #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
1465 #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
1466 +#define GCDGMBUS 0xcc
1467 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
1468
1469
1470 @@ -370,6 +371,7 @@
1471 #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
1472 #define PIPE_CONTROL_CS_STALL (1<<20)
1473 #define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
1474 +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
1475 #define PIPE_CONTROL_QW_WRITE (1<<14)
1476 #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
1477 #define PIPE_CONTROL_DEPTH_STALL (1<<13)
1478 @@ -1071,6 +1073,7 @@ enum punit_power_well {
1479 #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1480 #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1481 #define GEN6_NOSYNC 0
1482 +#define RING_PSMI_CTL(base) ((base)+0x50)
1483 #define RING_MAX_IDLE(base) ((base)+0x54)
1484 #define RING_HWS_PGA(base) ((base)+0x80)
1485 #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
1486 @@ -1401,6 +1404,7 @@ enum punit_power_well {
1487 #define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1488
1489 #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
1490 +#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1491 #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1492 #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1493
1494 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
1495 index 043123c77a1f..e22b0e825de2 100644
1496 --- a/drivers/gpu/drm/i915/i915_suspend.c
1497 +++ b/drivers/gpu/drm/i915/i915_suspend.c
1498 @@ -328,6 +328,10 @@ int i915_save_state(struct drm_device *dev)
1499 }
1500 }
1501
1502 + if (IS_GEN4(dev))
1503 + pci_read_config_word(dev->pdev, GCDGMBUS,
1504 + &dev_priv->regfile.saveGCDGMBUS);
1505 +
1506 /* Cache mode state */
1507 if (INTEL_INFO(dev)->gen < 7)
1508 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
1509 @@ -356,6 +360,10 @@ int i915_restore_state(struct drm_device *dev)
1510 mutex_lock(&dev->struct_mutex);
1511
1512 i915_gem_restore_fences(dev);
1513 +
1514 + if (IS_GEN4(dev))
1515 + pci_write_config_word(dev->pdev, GCDGMBUS,
1516 + dev_priv->regfile.saveGCDGMBUS);
1517 i915_restore_display(dev);
1518
1519 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
1520 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1521 index 9cb5c95d5898..cadc3bcf1de2 100644
1522 --- a/drivers/gpu/drm/i915/intel_display.c
1523 +++ b/drivers/gpu/drm/i915/intel_display.c
1524 @@ -12933,11 +12933,7 @@ static void i915_disable_vga(struct drm_device *dev)
1525 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
1526 udelay(300);
1527
1528 - /*
1529 - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
1530 - * from S3 without preserving (some of?) the other bits.
1531 - */
1532 - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
1533 + I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1534 POSTING_READ(vga_reg);
1535 }
1536
1537 @@ -13026,8 +13022,6 @@ void intel_modeset_init(struct drm_device *dev)
1538
1539 intel_shared_dpll_init(dev);
1540
1541 - /* save the BIOS value before clobbering it */
1542 - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
1543 /* Just disable it once at startup */
1544 i915_disable_vga(dev);
1545 intel_setup_outputs(dev);
1546 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1547 index ad2fd605f76b..83c7ecf2608a 100644
1548 --- a/drivers/gpu/drm/i915/intel_pm.c
1549 +++ b/drivers/gpu/drm/i915/intel_pm.c
1550 @@ -6520,29 +6520,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1551 chv_set_pipe_power_well(dev_priv, power_well, false);
1552 }
1553
1554 -static void check_power_well_state(struct drm_i915_private *dev_priv,
1555 - struct i915_power_well *power_well)
1556 -{
1557 - bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
1558 -
1559 - if (power_well->always_on || !i915.disable_power_well) {
1560 - if (!enabled)
1561 - goto mismatch;
1562 -
1563 - return;
1564 - }
1565 -
1566 - if (enabled != (power_well->count > 0))
1567 - goto mismatch;
1568 -
1569 - return;
1570 -
1571 -mismatch:
1572 - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
1573 - power_well->name, power_well->always_on, enabled,
1574 - power_well->count, i915.disable_power_well);
1575 -}
1576 -
1577 void intel_display_power_get(struct drm_i915_private *dev_priv,
1578 enum intel_display_power_domain domain)
1579 {
1580 @@ -6562,8 +6539,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1581 power_well->ops->enable(dev_priv, power_well);
1582 power_well->hw_enabled = true;
1583 }
1584 -
1585 - check_power_well_state(dev_priv, power_well);
1586 }
1587
1588 power_domains->domain_use_count[domain]++;
1589 @@ -6593,8 +6568,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1590 power_well->hw_enabled = false;
1591 power_well->ops->disable(dev_priv, power_well);
1592 }
1593 -
1594 - check_power_well_state(dev_priv, power_well);
1595 }
1596
1597 mutex_unlock(&power_domains->lock);
1598 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1599 index 0a80e419b589..ae17e77dc08d 100644
1600 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1601 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1602 @@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
1603 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1604 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1605 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1606 + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
1607 /*
1608 * TLB invalidate requires a post-sync write.
1609 */
1610 flags |= PIPE_CONTROL_QW_WRITE;
1611 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1612
1613 + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
1614 +
1615 /* Workaround: we must issue a pipe_control with CS-stall bit
1616 * set before a pipe_control command that has the state cache
1617 * invalidate bit set. */
1618 diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1619 index 918b76163965..b29091b21a76 100644
1620 --- a/drivers/gpu/drm/i915/intel_uncore.c
1621 +++ b/drivers/gpu/drm/i915/intel_uncore.c
1622 @@ -43,8 +43,8 @@
1623 static void
1624 assert_device_not_suspended(struct drm_i915_private *dev_priv)
1625 {
1626 - WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
1627 - "Device suspended\n");
1628 + WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
1629 + "Device suspended\n");
1630 }
1631
1632 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
1633 diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1634 index a75c35ccf25c..165401c4045c 100644
1635 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1636 +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
1637 @@ -24,13 +24,6 @@
1638
1639 #include "nv04.h"
1640
1641 -static void
1642 -nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
1643 -{
1644 - struct nv04_mc_priv *priv = (void *)pmc;
1645 - nv_wr08(priv, 0x088050, 0xff);
1646 -}
1647 -
1648 struct nouveau_oclass *
1649 nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
1650 .base.handle = NV_SUBDEV(MC, 0x4c),
1651 @@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
1652 .fini = _nouveau_mc_fini,
1653 },
1654 .intr = nv04_mc_intr,
1655 - .msi_rearm = nv4c_mc_msi_rearm,
1656 }.base;
1657 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1658 index 30d242b25078..dce0d3918fa7 100644
1659 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1660 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1661 @@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1662 return pll;
1663 }
1664 /* otherwise, pick one of the plls */
1665 - if ((rdev->family == CHIP_KAVERI) ||
1666 - (rdev->family == CHIP_KABINI) ||
1667 + if ((rdev->family == CHIP_KABINI) ||
1668 (rdev->family == CHIP_MULLINS)) {
1669 - /* KB/KV/ML has PPLL1 and PPLL2 */
1670 + /* KB/ML has PPLL1 and PPLL2 */
1671 pll_in_use = radeon_get_pll_use_mask(crtc);
1672 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1673 return ATOM_PPLL2;
1674 @@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1675 DRM_ERROR("unable to allocate a PPLL\n");
1676 return ATOM_PPLL_INVALID;
1677 } else {
1678 - /* CI has PPLL0, PPLL1, and PPLL2 */
1679 + /* CI/KV has PPLL0, PPLL1, and PPLL2 */
1680 pll_in_use = radeon_get_pll_use_mask(crtc);
1681 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1682 return ATOM_PPLL2;
1683 @@ -2154,6 +2153,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1684 case ATOM_PPLL0:
1685 /* disable the ppll */
1686 if ((rdev->family == CHIP_ARUBA) ||
1687 + (rdev->family == CHIP_KAVERI) ||
1688 (rdev->family == CHIP_BONAIRE) ||
1689 (rdev->family == CHIP_HAWAII))
1690 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1691 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1692 index 11ba9d21b89b..db42a670f995 100644
1693 --- a/drivers/gpu/drm/radeon/atombios_dp.c
1694 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1695 @@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
1696 struct radeon_connector_atom_dig *dig_connector;
1697 int dp_clock;
1698
1699 + if ((mode->clock > 340000) &&
1700 + (!radeon_connector_is_dp12_capable(connector)))
1701 + return MODE_CLOCK_HIGH;
1702 +
1703 if (!radeon_connector->con_priv)
1704 return MODE_CLOCK_HIGH;
1705 dig_connector = radeon_connector->con_priv;
1706 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
1707 index 11a55e9dad7f..c5699b593665 100644
1708 --- a/drivers/gpu/drm/radeon/ci_dpm.c
1709 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
1710 @@ -4729,7 +4729,7 @@ void ci_dpm_disable(struct radeon_device *rdev)
1711 ci_enable_spread_spectrum(rdev, false);
1712 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
1713 ci_stop_dpm(rdev);
1714 - ci_enable_ds_master_switch(rdev, true);
1715 + ci_enable_ds_master_switch(rdev, false);
1716 ci_enable_ulv(rdev, false);
1717 ci_clear_vc(rdev);
1718 ci_reset_to_default(rdev);
1719 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1720 index 89c01fa6dd8e..9328fb3dcfce 100644
1721 --- a/drivers/gpu/drm/radeon/cik.c
1722 +++ b/drivers/gpu/drm/radeon/cik.c
1723 @@ -6314,6 +6314,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
1724 }
1725
1726 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
1727 + data |= 0x00000001;
1728 data &= 0xfffffffd;
1729 if (orig != data)
1730 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
1731 @@ -6345,7 +6346,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
1732 }
1733 } else {
1734 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
1735 - data |= 0x00000002;
1736 + data |= 0x00000003;
1737 if (orig != data)
1738 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
1739
1740 diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1741 index 2fe8cfc966d9..bafdf92a5732 100644
1742 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
1743 +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1744 @@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
1745 }
1746
1747 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
1748 - if (sad_count < 0) {
1749 + if (sad_count <= 0) {
1750 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1751 return;
1752 }
1753 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
1754 index 9b42001295ba..e3e9c10cfba9 100644
1755 --- a/drivers/gpu/drm/radeon/kv_dpm.c
1756 +++ b/drivers/gpu/drm/radeon/kv_dpm.c
1757 @@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
1758 pi->enable_auto_thermal_throttling = true;
1759 pi->disable_nb_ps3_in_battery = false;
1760 if (radeon_bapm == -1) {
1761 - /* There are stability issues reported on with
1762 - * bapm enabled on an asrock system.
1763 - */
1764 - if (rdev->pdev->subsystem_vendor == 0x1849)
1765 - pi->bapm_enable = false;
1766 - else
1767 + /* only enable bapm on KB, ML by default */
1768 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1769 pi->bapm_enable = true;
1770 + else
1771 + pi->bapm_enable = false;
1772 } else if (radeon_bapm == 0) {
1773 pi->bapm_enable = false;
1774 } else {
1775 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1776 index 8624979afb65..d2510cfd3fea 100644
1777 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
1778 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1779 @@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
1780 rbo = container_of(bo, struct radeon_bo, tbo);
1781 switch (bo->mem.mem_type) {
1782 case TTM_PL_VRAM:
1783 - if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
1784 + if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
1785 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
1786 else
1787 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
1788 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1789 index 09874d695188..025c429050c0 100644
1790 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
1791 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
1792 @@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
1793 *
1794 * @pool: to free the pages from
1795 * @free_all: If set to true will free all pages in pool
1796 - * @gfp: GFP flags.
1797 + * @use_static: Safe to use static buffer
1798 **/
1799 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
1800 - gfp_t gfp)
1801 + bool use_static)
1802 {
1803 + static struct page *static_buf[NUM_PAGES_TO_ALLOC];
1804 unsigned long irq_flags;
1805 struct page *p;
1806 struct page **pages_to_free;
1807 @@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
1808 if (NUM_PAGES_TO_ALLOC < nr_free)
1809 npages_to_free = NUM_PAGES_TO_ALLOC;
1810
1811 - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1812 + if (use_static)
1813 + pages_to_free = static_buf;
1814 + else
1815 + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1816 + GFP_KERNEL);
1817 if (!pages_to_free) {
1818 pr_err("Failed to allocate memory for pool free operation\n");
1819 return 0;
1820 @@ -374,7 +379,8 @@ restart:
1821 if (freed_pages)
1822 ttm_pages_put(pages_to_free, freed_pages);
1823 out:
1824 - kfree(pages_to_free);
1825 + if (pages_to_free != static_buf)
1826 + kfree(pages_to_free);
1827 return nr_free;
1828 }
1829
1830 @@ -383,8 +389,6 @@ out:
1831 *
1832 * XXX: (dchinner) Deadlock warning!
1833 *
1834 - * We need to pass sc->gfp_mask to ttm_page_pool_free().
1835 - *
1836 * This code is crying out for a shrinker per pool....
1837 */
1838 static unsigned long
1839 @@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1840 if (shrink_pages == 0)
1841 break;
1842 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1843 - shrink_pages = ttm_page_pool_free(pool, nr_free,
1844 - sc->gfp_mask);
1845 + /* OK to use static buffer since global mutex is held. */
1846 + shrink_pages = ttm_page_pool_free(pool, nr_free, true);
1847 freed += nr_free - shrink_pages;
1848 }
1849 mutex_unlock(&lock);
1850 @@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
1851 }
1852 spin_unlock_irqrestore(&pool->lock, irq_flags);
1853 if (npages)
1854 - ttm_page_pool_free(pool, npages, GFP_KERNEL);
1855 + ttm_page_pool_free(pool, npages, false);
1856 }
1857
1858 /*
1859 @@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
1860 pr_info("Finalizing pool allocator\n");
1861 ttm_pool_mm_shrink_fini(_manager);
1862
1863 + /* OK to use static buffer since global mutex is no longer used. */
1864 for (i = 0; i < NUM_POOLS; ++i)
1865 - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
1866 - GFP_KERNEL);
1867 + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1868
1869 kobject_put(&_manager->kobj);
1870 _manager = NULL;
1871 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1872 index c96db433f8af..01e1d27eb078 100644
1873 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1874 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
1875 @@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
1876 *
1877 * @pool: to free the pages from
1878 * @nr_free: If set to true will free all pages in pool
1879 - * @gfp: GFP flags.
1880 + * @use_static: Safe to use static buffer
1881 **/
1882 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
1883 - gfp_t gfp)
1884 + bool use_static)
1885 {
1886 + static struct page *static_buf[NUM_PAGES_TO_ALLOC];
1887 unsigned long irq_flags;
1888 struct dma_page *dma_p, *tmp;
1889 struct page **pages_to_free;
1890 @@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
1891 npages_to_free, nr_free);
1892 }
1893 #endif
1894 - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
1895 + if (use_static)
1896 + pages_to_free = static_buf;
1897 + else
1898 + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
1899 + GFP_KERNEL);
1900
1901 if (!pages_to_free) {
1902 pr_err("%s: Failed to allocate memory for pool free operation\n",
1903 @@ -502,7 +507,8 @@ restart:
1904 if (freed_pages)
1905 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
1906 out:
1907 - kfree(pages_to_free);
1908 + if (pages_to_free != static_buf)
1909 + kfree(pages_to_free);
1910 return nr_free;
1911 }
1912
1913 @@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
1914 if (pool->type != type)
1915 continue;
1916 /* Takes a spinlock.. */
1917 - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
1918 + /* OK to use static buffer since global mutex is held. */
1919 + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
1920 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
1921 /* This code path is called after _all_ references to the
1922 * struct device has been dropped - so nobody should be
1923 @@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1924
1925 /* shrink pool if necessary (only on !is_cached pools)*/
1926 if (npages)
1927 - ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
1928 + ttm_dma_page_pool_free(pool, npages, false);
1929 ttm->state = tt_unpopulated;
1930 }
1931 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1932 @@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1933 *
1934 * XXX: (dchinner) Deadlock warning!
1935 *
1936 - * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1937 - *
1938 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1939 * shrinkers
1940 */
1941 @@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1942 if (++idx < pool_offset)
1943 continue;
1944 nr_free = shrink_pages;
1945 - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1946 - sc->gfp_mask);
1947 + /* OK to use static buffer since global mutex is held. */
1948 + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1949 freed += nr_free - shrink_pages;
1950
1951 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1952 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1953 index 25f3c250fd98..daeca571b42f 100644
1954 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1955 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1956 @@ -1063,8 +1063,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1957
1958 vmaster = vmw_master_check(dev, file_priv, flags);
1959 if (unlikely(IS_ERR(vmaster))) {
1960 - DRM_INFO("IOCTL ERROR %d\n", nr);
1961 - return PTR_ERR(vmaster);
1962 + ret = PTR_ERR(vmaster);
1963 +
1964 + if (ret != -ERESTARTSYS)
1965 + DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1966 + nr, ret);
1967 + return ret;
1968 }
1969
1970 ret = ioctl_func(filp, cmd, arg);
1971 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1972 index 197164fd7803..b7594cb758af 100644
1973 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1974 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
1975 @@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
1976
1977 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
1978 {
1979 - struct vmw_fence_manager *fman = fman_from_fence(fence);
1980 -
1981 fence_free(&fence->base);
1982 -
1983 - /*
1984 - * Free kernel space accounting.
1985 - */
1986 - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
1987 - fman->fence_size);
1988 }
1989
1990 int vmw_fence_create(struct vmw_fence_manager *fman,
1991 uint32_t seqno,
1992 struct vmw_fence_obj **p_fence)
1993 {
1994 - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
1995 struct vmw_fence_obj *fence;
1996 int ret;
1997
1998 - ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
1999 - false, false);
2000 - if (unlikely(ret != 0))
2001 - return ret;
2002 -
2003 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
2004 - if (unlikely(fence == NULL)) {
2005 - ret = -ENOMEM;
2006 - goto out_no_object;
2007 - }
2008 + if (unlikely(fence == NULL))
2009 + return -ENOMEM;
2010
2011 ret = vmw_fence_obj_init(fman, fence, seqno,
2012 vmw_fence_destroy);
2013 @@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
2014
2015 out_err_init:
2016 kfree(fence);
2017 -out_no_object:
2018 - ttm_mem_global_free(mem_glob, fman->fence_size);
2019 return ret;
2020 }
2021
2022 @@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
2023 if (ret != 0)
2024 goto out_no_queue;
2025
2026 + return 0;
2027 +
2028 out_no_queue:
2029 event->base.destroy(&event->base);
2030 out_no_event:
2031 @@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
2032
2033 BUG_ON(fence == NULL);
2034
2035 - if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
2036 - ret = vmw_event_fence_action_create(file_priv, fence,
2037 - arg->flags,
2038 - arg->user_data,
2039 - true);
2040 - else
2041 - ret = vmw_event_fence_action_create(file_priv, fence,
2042 - arg->flags,
2043 - arg->user_data,
2044 - true);
2045 -
2046 + ret = vmw_event_fence_action_create(file_priv, fence,
2047 + arg->flags,
2048 + arg->user_data,
2049 + true);
2050 if (unlikely(ret != 0)) {
2051 if (ret != -ERESTARTSYS)
2052 DRM_ERROR("Failed to attach event to fence.\n");
2053 diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
2054 index e37412da15f5..b99de00e57b8 100644
2055 --- a/drivers/iio/adc/ad799x.c
2056 +++ b/drivers/iio/adc/ad799x.c
2057 @@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
2058 case ad7998:
2059 return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
2060 val);
2061 - default:
2062 + case ad7992:
2063 + case ad7993:
2064 + case ad7994:
2065 return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
2066 val);
2067 + default:
2068 + /* Will be written when doing a conversion */
2069 + st->config = val;
2070 + return 0;
2071 }
2072 }
2073
2074 @@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
2075 case ad7997:
2076 case ad7998:
2077 return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
2078 - default:
2079 + case ad7992:
2080 + case ad7993:
2081 + case ad7994:
2082 return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
2083 + default:
2084 + /* No readback support */
2085 + return st->config;
2086 }
2087 }
2088
2089 diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
2090 index cd4174ca9a76..f14c3849e568 100644
2091 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h
2092 +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
2093 @@ -432,6 +432,7 @@ struct fast_reg_descriptor {
2094 * @cma_id: rdma_cm connection maneger handle
2095 * @qp: Connection Queue-pair
2096 * @post_recv_buf_count: post receive counter
2097 + * @sig_count: send work request signal count
2098 * @rx_wr: receive work request for batch posts
2099 * @device: reference to iser device
2100 * @comp: iser completion context
2101 @@ -452,6 +453,7 @@ struct ib_conn {
2102 struct rdma_cm_id *cma_id;
2103 struct ib_qp *qp;
2104 int post_recv_buf_count;
2105 + u8 sig_count;
2106 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
2107 struct iser_device *device;
2108 struct iser_comp *comp;
2109 diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
2110 index 5a489ea63732..3821633f1065 100644
2111 --- a/drivers/infiniband/ulp/iser/iser_initiator.c
2112 +++ b/drivers/infiniband/ulp/iser/iser_initiator.c
2113 @@ -369,7 +369,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
2114 return 0;
2115 }
2116
2117 -static inline bool iser_signal_comp(int sig_count)
2118 +static inline bool iser_signal_comp(u8 sig_count)
2119 {
2120 return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
2121 }
2122 @@ -388,7 +388,7 @@ int iser_send_command(struct iscsi_conn *conn,
2123 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2124 struct scsi_cmnd *sc = task->sc;
2125 struct iser_tx_desc *tx_desc = &iser_task->desc;
2126 - static unsigned sig_count;
2127 + u8 sig_count = ++iser_conn->ib_conn.sig_count;
2128
2129 edtl = ntohl(hdr->data_length);
2130
2131 @@ -435,7 +435,7 @@ int iser_send_command(struct iscsi_conn *conn,
2132 iser_task->status = ISER_TASK_STATUS_STARTED;
2133
2134 err = iser_post_send(&iser_conn->ib_conn, tx_desc,
2135 - iser_signal_comp(++sig_count));
2136 + iser_signal_comp(sig_count));
2137 if (!err)
2138 return 0;
2139
2140 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
2141 index 10641b7816f4..a6daabc70425 100644
2142 --- a/drivers/infiniband/ulp/isert/ib_isert.c
2143 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
2144 @@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex);
2145 static LIST_HEAD(device_list);
2146 static struct workqueue_struct *isert_rx_wq;
2147 static struct workqueue_struct *isert_comp_wq;
2148 +static struct workqueue_struct *isert_release_wq;
2149
2150 static void
2151 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
2152 @@ -54,6 +55,19 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2153 struct isert_rdma_wr *wr);
2154 static int
2155 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
2156 +static int
2157 +isert_rdma_post_recvl(struct isert_conn *isert_conn);
2158 +static int
2159 +isert_rdma_accept(struct isert_conn *isert_conn);
2160 +struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
2161 +
2162 +static inline bool
2163 +isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
2164 +{
2165 + return (conn->pi_support &&
2166 + cmd->prot_op != TARGET_PROT_NORMAL);
2167 +}
2168 +
2169
2170 static void
2171 isert_qp_event_callback(struct ib_event *e, void *context)
2172 @@ -90,8 +104,7 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
2173 }
2174
2175 static int
2176 -isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2177 - u8 protection)
2178 +isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
2179 {
2180 struct isert_device *device = isert_conn->conn_device;
2181 struct ib_qp_init_attr attr;
2182 @@ -126,7 +139,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2183 attr.cap.max_recv_sge = 1;
2184 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
2185 attr.qp_type = IB_QPT_RC;
2186 - if (protection)
2187 + if (device->pi_capable)
2188 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
2189
2190 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
2191 @@ -137,12 +150,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
2192 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
2193 if (ret) {
2194 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
2195 - return ret;
2196 + goto err;
2197 }
2198 isert_conn->conn_qp = cma_id->qp;
2199 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
2200
2201 return 0;
2202 +err:
2203 + mutex_lock(&device_list_mutex);
2204 + device->cq_active_qps[min_index]--;
2205 + mutex_unlock(&device_list_mutex);
2206 +
2207 + return ret;
2208 }
2209
2210 static void
2211 @@ -430,8 +449,68 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
2212 }
2213
2214 static int
2215 +isert_create_pi_ctx(struct fast_reg_descriptor *desc,
2216 + struct ib_device *device,
2217 + struct ib_pd *pd)
2218 +{
2219 + struct ib_mr_init_attr mr_init_attr;
2220 + struct pi_context *pi_ctx;
2221 + int ret;
2222 +
2223 + pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
2224 + if (!pi_ctx) {
2225 + pr_err("Failed to allocate pi context\n");
2226 + return -ENOMEM;
2227 + }
2228 +
2229 + pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
2230 + ISCSI_ISER_SG_TABLESIZE);
2231 + if (IS_ERR(pi_ctx->prot_frpl)) {
2232 + pr_err("Failed to allocate prot frpl err=%ld\n",
2233 + PTR_ERR(pi_ctx->prot_frpl));
2234 + ret = PTR_ERR(pi_ctx->prot_frpl);
2235 + goto err_pi_ctx;
2236 + }
2237 +
2238 + pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
2239 + if (IS_ERR(pi_ctx->prot_mr)) {
2240 + pr_err("Failed to allocate prot frmr err=%ld\n",
2241 + PTR_ERR(pi_ctx->prot_mr));
2242 + ret = PTR_ERR(pi_ctx->prot_mr);
2243 + goto err_prot_frpl;
2244 + }
2245 + desc->ind |= ISERT_PROT_KEY_VALID;
2246 +
2247 + memset(&mr_init_attr, 0, sizeof(mr_init_attr));
2248 + mr_init_attr.max_reg_descriptors = 2;
2249 + mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
2250 + pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
2251 + if (IS_ERR(pi_ctx->sig_mr)) {
2252 + pr_err("Failed to allocate signature enabled mr err=%ld\n",
2253 + PTR_ERR(pi_ctx->sig_mr));
2254 + ret = PTR_ERR(pi_ctx->sig_mr);
2255 + goto err_prot_mr;
2256 + }
2257 +
2258 + desc->pi_ctx = pi_ctx;
2259 + desc->ind |= ISERT_SIG_KEY_VALID;
2260 + desc->ind &= ~ISERT_PROTECTED;
2261 +
2262 + return 0;
2263 +
2264 +err_prot_mr:
2265 + ib_dereg_mr(desc->pi_ctx->prot_mr);
2266 +err_prot_frpl:
2267 + ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
2268 +err_pi_ctx:
2269 + kfree(desc->pi_ctx);
2270 +
2271 + return ret;
2272 +}
2273 +
2274 +static int
2275 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
2276 - struct fast_reg_descriptor *fr_desc, u8 protection)
2277 + struct fast_reg_descriptor *fr_desc)
2278 {
2279 int ret;
2280
2281 @@ -450,62 +529,12 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
2282 ret = PTR_ERR(fr_desc->data_mr);
2283 goto err_data_frpl;
2284 }
2285 - pr_debug("Create fr_desc %p page_list %p\n",
2286 - fr_desc, fr_desc->data_frpl->page_list);
2287 fr_desc->ind |= ISERT_DATA_KEY_VALID;
2288
2289 - if (protection) {
2290 - struct ib_mr_init_attr mr_init_attr = {0};
2291 - struct pi_context *pi_ctx;
2292 -
2293 - fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
2294 - if (!fr_desc->pi_ctx) {
2295 - pr_err("Failed to allocate pi context\n");
2296 - ret = -ENOMEM;
2297 - goto err_data_mr;
2298 - }
2299 - pi_ctx = fr_desc->pi_ctx;
2300 -
2301 - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
2302 - ISCSI_ISER_SG_TABLESIZE);
2303 - if (IS_ERR(pi_ctx->prot_frpl)) {
2304 - pr_err("Failed to allocate prot frpl err=%ld\n",
2305 - PTR_ERR(pi_ctx->prot_frpl));
2306 - ret = PTR_ERR(pi_ctx->prot_frpl);
2307 - goto err_pi_ctx;
2308 - }
2309 -
2310 - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
2311 - if (IS_ERR(pi_ctx->prot_mr)) {
2312 - pr_err("Failed to allocate prot frmr err=%ld\n",
2313 - PTR_ERR(pi_ctx->prot_mr));
2314 - ret = PTR_ERR(pi_ctx->prot_mr);
2315 - goto err_prot_frpl;
2316 - }
2317 - fr_desc->ind |= ISERT_PROT_KEY_VALID;
2318 -
2319 - mr_init_attr.max_reg_descriptors = 2;
2320 - mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
2321 - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
2322 - if (IS_ERR(pi_ctx->sig_mr)) {
2323 - pr_err("Failed to allocate signature enabled mr err=%ld\n",
2324 - PTR_ERR(pi_ctx->sig_mr));
2325 - ret = PTR_ERR(pi_ctx->sig_mr);
2326 - goto err_prot_mr;
2327 - }
2328 - fr_desc->ind |= ISERT_SIG_KEY_VALID;
2329 - }
2330 - fr_desc->ind &= ~ISERT_PROTECTED;
2331 + pr_debug("Created fr_desc %p\n", fr_desc);
2332
2333 return 0;
2334 -err_prot_mr:
2335 - ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
2336 -err_prot_frpl:
2337 - ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
2338 -err_pi_ctx:
2339 - kfree(fr_desc->pi_ctx);
2340 -err_data_mr:
2341 - ib_dereg_mr(fr_desc->data_mr);
2342 +
2343 err_data_frpl:
2344 ib_free_fast_reg_page_list(fr_desc->data_frpl);
2345
2346 @@ -513,7 +542,7 @@ err_data_frpl:
2347 }
2348
2349 static int
2350 -isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
2351 +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
2352 {
2353 struct fast_reg_descriptor *fr_desc;
2354 struct isert_device *device = isert_conn->conn_device;
2355 @@ -537,8 +566,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
2356 }
2357
2358 ret = isert_create_fr_desc(device->ib_device,
2359 - isert_conn->conn_pd, fr_desc,
2360 - pi_support);
2361 + isert_conn->conn_pd, fr_desc);
2362 if (ret) {
2363 pr_err("Failed to create fastreg descriptor err=%d\n",
2364 ret);
2365 @@ -563,13 +591,12 @@ err:
2366 static int
2367 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2368 {
2369 - struct iscsi_np *np = cma_id->context;
2370 - struct isert_np *isert_np = np->np_context;
2371 + struct isert_np *isert_np = cma_id->context;
2372 + struct iscsi_np *np = isert_np->np;
2373 struct isert_conn *isert_conn;
2374 struct isert_device *device;
2375 struct ib_device *ib_dev = cma_id->device;
2376 int ret = 0;
2377 - u8 pi_support;
2378
2379 spin_lock_bh(&np->np_thread_lock);
2380 if (!np->enabled) {
2381 @@ -590,6 +617,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2382 isert_conn->state = ISER_CONN_INIT;
2383 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
2384 init_completion(&isert_conn->conn_login_comp);
2385 + init_completion(&isert_conn->login_req_comp);
2386 init_completion(&isert_conn->conn_wait);
2387 init_completion(&isert_conn->conn_wait_comp_err);
2388 kref_init(&isert_conn->conn_kref);
2389 @@ -597,7 +625,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2390 spin_lock_init(&isert_conn->conn_lock);
2391 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
2392
2393 - cma_id->context = isert_conn;
2394 isert_conn->conn_cm_id = cma_id;
2395
2396 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
2397 @@ -669,15 +696,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2398 goto out_mr;
2399 }
2400
2401 - pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
2402 - if (pi_support && !device->pi_capable) {
2403 - pr_err("Protection information requested but not supported, "
2404 - "rejecting connect request\n");
2405 - ret = rdma_reject(cma_id, NULL, 0);
2406 - goto out_mr;
2407 - }
2408 + ret = isert_conn_setup_qp(isert_conn, cma_id);
2409 + if (ret)
2410 + goto out_conn_dev;
2411
2412 - ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
2413 + ret = isert_rdma_post_recvl(isert_conn);
2414 + if (ret)
2415 + goto out_conn_dev;
2416 +
2417 + ret = isert_rdma_accept(isert_conn);
2418 if (ret)
2419 goto out_conn_dev;
2420
2421 @@ -705,6 +732,7 @@ out_login_buf:
2422 kfree(isert_conn->login_buf);
2423 out:
2424 kfree(isert_conn);
2425 + rdma_reject(cma_id, NULL, 0);
2426 return ret;
2427 }
2428
2429 @@ -720,18 +748,20 @@ isert_connect_release(struct isert_conn *isert_conn)
2430 if (device && device->use_fastreg)
2431 isert_conn_free_fastreg_pool(isert_conn);
2432
2433 + isert_free_rx_descriptors(isert_conn);
2434 + rdma_destroy_id(isert_conn->conn_cm_id);
2435 +
2436 if (isert_conn->conn_qp) {
2437 cq_index = ((struct isert_cq_desc *)
2438 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
2439 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
2440 + mutex_lock(&device_list_mutex);
2441 isert_conn->conn_device->cq_active_qps[cq_index]--;
2442 + mutex_unlock(&device_list_mutex);
2443
2444 - rdma_destroy_qp(isert_conn->conn_cm_id);
2445 + ib_destroy_qp(isert_conn->conn_qp);
2446 }
2447
2448 - isert_free_rx_descriptors(isert_conn);
2449 - rdma_destroy_id(isert_conn->conn_cm_id);
2450 -
2451 ib_dereg_mr(isert_conn->conn_mr);
2452 ib_dealloc_pd(isert_conn->conn_pd);
2453
2454 @@ -754,9 +784,19 @@ isert_connect_release(struct isert_conn *isert_conn)
2455 static void
2456 isert_connected_handler(struct rdma_cm_id *cma_id)
2457 {
2458 - struct isert_conn *isert_conn = cma_id->context;
2459 + struct isert_conn *isert_conn = cma_id->qp->qp_context;
2460 +
2461 + pr_info("conn %p\n", isert_conn);
2462
2463 - kref_get(&isert_conn->conn_kref);
2464 + if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
2465 + pr_warn("conn %p connect_release is running\n", isert_conn);
2466 + return;
2467 + }
2468 +
2469 + mutex_lock(&isert_conn->conn_mutex);
2470 + if (isert_conn->state != ISER_CONN_FULL_FEATURE)
2471 + isert_conn->state = ISER_CONN_UP;
2472 + mutex_unlock(&isert_conn->conn_mutex);
2473 }
2474
2475 static void
2476 @@ -777,65 +817,108 @@ isert_put_conn(struct isert_conn *isert_conn)
2477 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
2478 }
2479
2480 +/**
2481 + * isert_conn_terminate() - Initiate connection termination
2482 + * @isert_conn: isert connection struct
2483 + *
2484 + * Notes:
2485 + * In case the connection state is FULL_FEATURE, move state
2486 + * to TEMINATING and start teardown sequence (rdma_disconnect).
2487 + * In case the connection state is UP, complete flush as well.
2488 + *
2489 + * This routine must be called with conn_mutex held. Thus it is
2490 + * safe to call multiple times.
2491 + */
2492 static void
2493 -isert_disconnect_work(struct work_struct *work)
2494 +isert_conn_terminate(struct isert_conn *isert_conn)
2495 {
2496 - struct isert_conn *isert_conn = container_of(work,
2497 - struct isert_conn, conn_logout_work);
2498 + int err;
2499
2500 - pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
2501 - mutex_lock(&isert_conn->conn_mutex);
2502 - if (isert_conn->state == ISER_CONN_UP)
2503 + switch (isert_conn->state) {
2504 + case ISER_CONN_TERMINATING:
2505 + break;
2506 + case ISER_CONN_UP:
2507 + /*
2508 + * No flush completions will occur as we didn't
2509 + * get to ISER_CONN_FULL_FEATURE yet, complete
2510 + * to allow teardown progress.
2511 + */
2512 + complete(&isert_conn->conn_wait_comp_err);
2513 + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
2514 + pr_info("Terminating conn %p state %d\n",
2515 + isert_conn, isert_conn->state);
2516 isert_conn->state = ISER_CONN_TERMINATING;
2517 -
2518 - if (isert_conn->post_recv_buf_count == 0 &&
2519 - atomic_read(&isert_conn->post_send_buf_count) == 0) {
2520 - mutex_unlock(&isert_conn->conn_mutex);
2521 - goto wake_up;
2522 - }
2523 - if (!isert_conn->conn_cm_id) {
2524 - mutex_unlock(&isert_conn->conn_mutex);
2525 - isert_put_conn(isert_conn);
2526 - return;
2527 + err = rdma_disconnect(isert_conn->conn_cm_id);
2528 + if (err)
2529 + pr_warn("Failed rdma_disconnect isert_conn %p\n",
2530 + isert_conn);
2531 + break;
2532 + default:
2533 + pr_warn("conn %p teminating in state %d\n",
2534 + isert_conn, isert_conn->state);
2535 }
2536 +}
2537
2538 - if (isert_conn->disconnect) {
2539 - /* Send DREQ/DREP towards our initiator */
2540 - rdma_disconnect(isert_conn->conn_cm_id);
2541 - }
2542 +static int
2543 +isert_np_cma_handler(struct isert_np *isert_np,
2544 + enum rdma_cm_event_type event)
2545 +{
2546 + pr_debug("isert np %p, handling event %d\n", isert_np, event);
2547
2548 - mutex_unlock(&isert_conn->conn_mutex);
2549 + switch (event) {
2550 + case RDMA_CM_EVENT_DEVICE_REMOVAL:
2551 + isert_np->np_cm_id = NULL;
2552 + break;
2553 + case RDMA_CM_EVENT_ADDR_CHANGE:
2554 + isert_np->np_cm_id = isert_setup_id(isert_np);
2555 + if (IS_ERR(isert_np->np_cm_id)) {
2556 + pr_err("isert np %p setup id failed: %ld\n",
2557 + isert_np, PTR_ERR(isert_np->np_cm_id));
2558 + isert_np->np_cm_id = NULL;
2559 + }
2560 + break;
2561 + default:
2562 + pr_err("isert np %p Unexpected event %d\n",
2563 + isert_np, event);
2564 + }
2565
2566 -wake_up:
2567 - complete(&isert_conn->conn_wait);
2568 + return -1;
2569 }
2570
2571 static int
2572 -isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
2573 +isert_disconnected_handler(struct rdma_cm_id *cma_id,
2574 + enum rdma_cm_event_type event)
2575 {
2576 + struct isert_np *isert_np = cma_id->context;
2577 struct isert_conn *isert_conn;
2578
2579 - if (!cma_id->qp) {
2580 - struct isert_np *isert_np = cma_id->context;
2581 + if (isert_np->np_cm_id == cma_id)
2582 + return isert_np_cma_handler(cma_id->context, event);
2583
2584 - isert_np->np_cm_id = NULL;
2585 - return -1;
2586 - }
2587 + isert_conn = cma_id->qp->qp_context;
2588
2589 - isert_conn = (struct isert_conn *)cma_id->context;
2590 + mutex_lock(&isert_conn->conn_mutex);
2591 + isert_conn_terminate(isert_conn);
2592 + mutex_unlock(&isert_conn->conn_mutex);
2593
2594 - isert_conn->disconnect = disconnect;
2595 - INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
2596 - schedule_work(&isert_conn->conn_logout_work);
2597 + pr_info("conn %p completing conn_wait\n", isert_conn);
2598 + complete(&isert_conn->conn_wait);
2599
2600 return 0;
2601 }
2602
2603 +static void
2604 +isert_connect_error(struct rdma_cm_id *cma_id)
2605 +{
2606 + struct isert_conn *isert_conn = cma_id->qp->qp_context;
2607 +
2608 + isert_put_conn(isert_conn);
2609 +}
2610 +
2611 static int
2612 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2613 {
2614 int ret = 0;
2615 - bool disconnect = false;
2616
2617 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
2618 event->event, event->status, cma_id->context, cma_id);
2619 @@ -853,11 +936,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2620 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
2621 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
2622 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
2623 - disconnect = true;
2624 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
2625 - ret = isert_disconnected_handler(cma_id, disconnect);
2626 + ret = isert_disconnected_handler(cma_id, event->event);
2627 break;
2628 + case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
2629 + case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
2630 case RDMA_CM_EVENT_CONNECT_ERROR:
2631 + isert_connect_error(cma_id);
2632 + break;
2633 default:
2634 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
2635 break;
2636 @@ -991,7 +1077,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2637 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
2638 */
2639 mutex_lock(&isert_conn->conn_mutex);
2640 - if (coalesce && isert_conn->state == ISER_CONN_UP &&
2641 + if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE &&
2642 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
2643 tx_desc->llnode_active = true;
2644 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
2645 @@ -1072,11 +1158,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2646 if (login->login_complete) {
2647 if (!conn->sess->sess_ops->SessionType &&
2648 isert_conn->conn_device->use_fastreg) {
2649 - /* Normal Session and fastreg is used */
2650 - u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
2651 -
2652 - ret = isert_conn_create_fastreg_pool(isert_conn,
2653 - pi_support);
2654 + ret = isert_conn_create_fastreg_pool(isert_conn);
2655 if (ret) {
2656 pr_err("Conn: %p failed to create"
2657 " fastreg pool\n", isert_conn);
2658 @@ -1092,7 +1174,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2659 if (ret)
2660 return ret;
2661
2662 - isert_conn->state = ISER_CONN_UP;
2663 + /* Now we are in FULL_FEATURE phase */
2664 + mutex_lock(&isert_conn->conn_mutex);
2665 + isert_conn->state = ISER_CONN_FULL_FEATURE;
2666 + mutex_unlock(&isert_conn->conn_mutex);
2667 goto post_send;
2668 }
2669
2670 @@ -1109,18 +1194,17 @@ post_send:
2671 }
2672
2673 static void
2674 -isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
2675 - struct isert_conn *isert_conn)
2676 +isert_rx_login_req(struct isert_conn *isert_conn)
2677 {
2678 + struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
2679 + int rx_buflen = isert_conn->login_req_len;
2680 struct iscsi_conn *conn = isert_conn->conn;
2681 struct iscsi_login *login = conn->conn_login;
2682 int size;
2683
2684 - if (!login) {
2685 - pr_err("conn->conn_login is NULL\n");
2686 - dump_stack();
2687 - return;
2688 - }
2689 + pr_info("conn %p\n", isert_conn);
2690 +
2691 + WARN_ON_ONCE(!login);
2692
2693 if (login->first_request) {
2694 struct iscsi_login_req *login_req =
2695 @@ -1483,11 +1567,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
2696 hdr->opcode, hdr->itt, hdr->flags,
2697 (int)(xfer_len - ISER_HEADERS_LEN));
2698
2699 - if ((char *)desc == isert_conn->login_req_buf)
2700 - isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
2701 - isert_conn);
2702 - else
2703 + if ((char *)desc == isert_conn->login_req_buf) {
2704 + isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
2705 + if (isert_conn->conn) {
2706 + struct iscsi_login *login = isert_conn->conn->conn_login;
2707 +
2708 + if (login && !login->first_request)
2709 + isert_rx_login_req(isert_conn);
2710 + }
2711 + mutex_lock(&isert_conn->conn_mutex);
2712 + complete(&isert_conn->login_req_comp);
2713 + mutex_unlock(&isert_conn->conn_mutex);
2714 + } else {
2715 isert_rx_do_work(desc, isert_conn);
2716 + }
2717
2718 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
2719 DMA_FROM_DEVICE);
2720 @@ -2046,7 +2139,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2721 msleep(3000);
2722
2723 mutex_lock(&isert_conn->conn_mutex);
2724 - isert_conn->state = ISER_CONN_DOWN;
2725 + isert_conn_terminate(isert_conn);
2726 mutex_unlock(&isert_conn->conn_mutex);
2727
2728 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2729 @@ -2231,8 +2324,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
2730 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2731 struct isert_device *device = isert_conn->conn_device;
2732
2733 - if (device->pi_capable)
2734 - return TARGET_PROT_ALL;
2735 + if (conn->tpg->tpg_attrib.t10_pi) {
2736 + if (device->pi_capable) {
2737 + pr_info("conn %p PI offload enabled\n", isert_conn);
2738 + isert_conn->pi_support = true;
2739 + return TARGET_PROT_ALL;
2740 + }
2741 + }
2742 +
2743 + pr_info("conn %p PI offload disabled\n", isert_conn);
2744 + isert_conn->pi_support = false;
2745
2746 return TARGET_PROT_NORMAL;
2747 }
2748 @@ -2681,10 +2782,10 @@ isert_set_prot_checks(u8 prot_checks)
2749 }
2750
2751 static int
2752 -isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2753 - struct fast_reg_descriptor *fr_desc,
2754 - struct ib_sge *data_sge, struct ib_sge *prot_sge,
2755 - struct ib_sge *sig_sge)
2756 +isert_reg_sig_mr(struct isert_conn *isert_conn,
2757 + struct se_cmd *se_cmd,
2758 + struct isert_rdma_wr *rdma_wr,
2759 + struct fast_reg_descriptor *fr_desc)
2760 {
2761 struct ib_send_wr sig_wr, inv_wr;
2762 struct ib_send_wr *bad_wr, *wr = NULL;
2763 @@ -2714,13 +2815,13 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2764 memset(&sig_wr, 0, sizeof(sig_wr));
2765 sig_wr.opcode = IB_WR_REG_SIG_MR;
2766 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2767 - sig_wr.sg_list = data_sge;
2768 + sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2769 sig_wr.num_sge = 1;
2770 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2771 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2772 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2773 if (se_cmd->t_prot_sg)
2774 - sig_wr.wr.sig_handover.prot = prot_sge;
2775 + sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2776
2777 if (!wr)
2778 wr = &sig_wr;
2779 @@ -2734,34 +2835,93 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2780 }
2781 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2782
2783 - sig_sge->lkey = pi_ctx->sig_mr->lkey;
2784 - sig_sge->addr = 0;
2785 - sig_sge->length = se_cmd->data_length;
2786 + rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2787 + rdma_wr->ib_sg[SIG].addr = 0;
2788 + rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2789 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2790 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2791 /*
2792 * We have protection guards on the wire
2793 * so we need to set a larget transfer
2794 */
2795 - sig_sge->length += se_cmd->prot_length;
2796 + rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2797
2798 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2799 - sig_sge->addr, sig_sge->length,
2800 - sig_sge->lkey);
2801 + rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2802 + rdma_wr->ib_sg[SIG].lkey);
2803 err:
2804 return ret;
2805 }
2806
2807 static int
2808 +isert_handle_prot_cmd(struct isert_conn *isert_conn,
2809 + struct isert_cmd *isert_cmd,
2810 + struct isert_rdma_wr *wr)
2811 +{
2812 + struct isert_device *device = isert_conn->conn_device;
2813 + struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2814 + int ret;
2815 +
2816 + if (!wr->fr_desc->pi_ctx) {
2817 + ret = isert_create_pi_ctx(wr->fr_desc,
2818 + device->ib_device,
2819 + isert_conn->conn_pd);
2820 + if (ret) {
2821 + pr_err("conn %p failed to allocate pi_ctx\n",
2822 + isert_conn);
2823 + return ret;
2824 + }
2825 + }
2826 +
2827 + if (se_cmd->t_prot_sg) {
2828 + ret = isert_map_data_buf(isert_conn, isert_cmd,
2829 + se_cmd->t_prot_sg,
2830 + se_cmd->t_prot_nents,
2831 + se_cmd->prot_length,
2832 + 0, wr->iser_ib_op, &wr->prot);
2833 + if (ret) {
2834 + pr_err("conn %p failed to map protection buffer\n",
2835 + isert_conn);
2836 + return ret;
2837 + }
2838 +
2839 + memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2840 + ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2841 + ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2842 + if (ret) {
2843 + pr_err("conn %p failed to fast reg mr\n",
2844 + isert_conn);
2845 + goto unmap_prot_cmd;
2846 + }
2847 + }
2848 +
2849 + ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2850 + if (ret) {
2851 + pr_err("conn %p failed to fast reg mr\n",
2852 + isert_conn);
2853 + goto unmap_prot_cmd;
2854 + }
2855 + wr->fr_desc->ind |= ISERT_PROTECTED;
2856 +
2857 + return 0;
2858 +
2859 +unmap_prot_cmd:
2860 + if (se_cmd->t_prot_sg)
2861 + isert_unmap_data_buf(isert_conn, &wr->prot);
2862 +
2863 + return ret;
2864 +}
2865 +
2866 +static int
2867 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2868 struct isert_rdma_wr *wr)
2869 {
2870 struct se_cmd *se_cmd = &cmd->se_cmd;
2871 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2872 struct isert_conn *isert_conn = conn->context;
2873 - struct ib_sge data_sge;
2874 - struct ib_send_wr *send_wr;
2875 struct fast_reg_descriptor *fr_desc = NULL;
2876 + struct ib_send_wr *send_wr;
2877 + struct ib_sge *ib_sg;
2878 u32 offset;
2879 int ret = 0;
2880 unsigned long flags;
2881 @@ -2775,8 +2935,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2882 if (ret)
2883 return ret;
2884
2885 - if (wr->data.dma_nents != 1 ||
2886 - se_cmd->prot_op != TARGET_PROT_NORMAL) {
2887 + if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2888 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2889 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2890 struct fast_reg_descriptor, list);
2891 @@ -2786,38 +2945,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2892 }
2893
2894 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2895 - ISERT_DATA_KEY_VALID, &data_sge);
2896 + ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2897 if (ret)
2898 goto unmap_cmd;
2899
2900 - if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
2901 - struct ib_sge prot_sge, sig_sge;
2902 -
2903 - if (se_cmd->t_prot_sg) {
2904 - ret = isert_map_data_buf(isert_conn, isert_cmd,
2905 - se_cmd->t_prot_sg,
2906 - se_cmd->t_prot_nents,
2907 - se_cmd->prot_length,
2908 - 0, wr->iser_ib_op, &wr->prot);
2909 - if (ret)
2910 - goto unmap_cmd;
2911 -
2912 - ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2913 - ISERT_PROT_KEY_VALID, &prot_sge);
2914 - if (ret)
2915 - goto unmap_prot_cmd;
2916 - }
2917 -
2918 - ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2919 - &data_sge, &prot_sge, &sig_sge);
2920 + if (isert_prot_cmd(isert_conn, se_cmd)) {
2921 + ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2922 if (ret)
2923 - goto unmap_prot_cmd;
2924 + goto unmap_cmd;
2925
2926 - fr_desc->ind |= ISERT_PROTECTED;
2927 - memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
2928 - } else
2929 - memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
2930 + ib_sg = &wr->ib_sg[SIG];
2931 + } else {
2932 + ib_sg = &wr->ib_sg[DATA];
2933 + }
2934
2935 + memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2936 wr->ib_sge = &wr->s_ib_sge;
2937 wr->send_wr_num = 1;
2938 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2939 @@ -2832,7 +2974,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2940 send_wr->opcode = IB_WR_RDMA_WRITE;
2941 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2942 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2943 - send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
2944 + send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2945 0 : IB_SEND_SIGNALED;
2946 } else {
2947 send_wr->opcode = IB_WR_RDMA_READ;
2948 @@ -2842,9 +2984,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2949 }
2950
2951 return 0;
2952 -unmap_prot_cmd:
2953 - if (se_cmd->t_prot_sg)
2954 - isert_unmap_data_buf(isert_conn, &wr->prot);
2955 +
2956 unmap_cmd:
2957 if (fr_desc) {
2958 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2959 @@ -2876,7 +3016,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2960 return rc;
2961 }
2962
2963 - if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
2964 + if (!isert_prot_cmd(isert_conn, se_cmd)) {
2965 /*
2966 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2967 */
2968 @@ -2899,7 +3039,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2969 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2970 }
2971
2972 - if (se_cmd->prot_op == TARGET_PROT_NORMAL)
2973 + if (!isert_prot_cmd(isert_conn, se_cmd))
2974 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2975 "READ\n", isert_cmd);
2976 else
2977 @@ -3001,13 +3141,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2978 return ret;
2979 }
2980
2981 +struct rdma_cm_id *
2982 +isert_setup_id(struct isert_np *isert_np)
2983 +{
2984 + struct iscsi_np *np = isert_np->np;
2985 + struct rdma_cm_id *id;
2986 + struct sockaddr *sa;
2987 + int ret;
2988 +
2989 + sa = (struct sockaddr *)&np->np_sockaddr;
2990 + pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2991 +
2992 + id = rdma_create_id(isert_cma_handler, isert_np,
2993 + RDMA_PS_TCP, IB_QPT_RC);
2994 + if (IS_ERR(id)) {
2995 + pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2996 + ret = PTR_ERR(id);
2997 + goto out;
2998 + }
2999 + pr_debug("id %p context %p\n", id, id->context);
3000 +
3001 + ret = rdma_bind_addr(id, sa);
3002 + if (ret) {
3003 + pr_err("rdma_bind_addr() failed: %d\n", ret);
3004 + goto out_id;
3005 + }
3006 +
3007 + ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3008 + if (ret) {
3009 + pr_err("rdma_listen() failed: %d\n", ret);
3010 + goto out_id;
3011 + }
3012 +
3013 + return id;
3014 +out_id:
3015 + rdma_destroy_id(id);
3016 +out:
3017 + return ERR_PTR(ret);
3018 +}
3019 +
3020 static int
3021 isert_setup_np(struct iscsi_np *np,
3022 struct __kernel_sockaddr_storage *ksockaddr)
3023 {
3024 struct isert_np *isert_np;
3025 struct rdma_cm_id *isert_lid;
3026 - struct sockaddr *sa;
3027 int ret;
3028
3029 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3030 @@ -3019,9 +3197,8 @@ isert_setup_np(struct iscsi_np *np,
3031 mutex_init(&isert_np->np_accept_mutex);
3032 INIT_LIST_HEAD(&isert_np->np_accept_list);
3033 init_completion(&isert_np->np_login_comp);
3034 + isert_np->np = np;
3035
3036 - sa = (struct sockaddr *)ksockaddr;
3037 - pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
3038 /*
3039 * Setup the np->np_sockaddr from the passed sockaddr setup
3040 * in iscsi_target_configfs.c code..
3041 @@ -3029,37 +3206,20 @@ isert_setup_np(struct iscsi_np *np,
3042 memcpy(&np->np_sockaddr, ksockaddr,
3043 sizeof(struct __kernel_sockaddr_storage));
3044
3045 - isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
3046 - IB_QPT_RC);
3047 + isert_lid = isert_setup_id(isert_np);
3048 if (IS_ERR(isert_lid)) {
3049 - pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3050 - PTR_ERR(isert_lid));
3051 ret = PTR_ERR(isert_lid);
3052 goto out;
3053 }
3054
3055 - ret = rdma_bind_addr(isert_lid, sa);
3056 - if (ret) {
3057 - pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
3058 - goto out_lid;
3059 - }
3060 -
3061 - ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
3062 - if (ret) {
3063 - pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
3064 - goto out_lid;
3065 - }
3066 -
3067 isert_np->np_cm_id = isert_lid;
3068 np->np_context = isert_np;
3069 - pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
3070
3071 return 0;
3072
3073 -out_lid:
3074 - rdma_destroy_id(isert_lid);
3075 out:
3076 kfree(isert_np);
3077 +
3078 return ret;
3079 }
3080
3081 @@ -3094,7 +3254,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3082 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3083 int ret;
3084
3085 - pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
3086 + pr_info("before login_req comp conn: %p\n", isert_conn);
3087 + ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3088 + if (ret) {
3089 + pr_err("isert_conn %p interrupted before got login req\n",
3090 + isert_conn);
3091 + return ret;
3092 + }
3093 + reinit_completion(&isert_conn->login_req_comp);
3094 +
3095 /*
3096 * For login requests after the first PDU, isert_rx_login_req() will
3097 * kick schedule_delayed_work(&conn->login_work) as the packet is
3098 @@ -3104,11 +3272,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3099 if (!login->first_request)
3100 return 0;
3101
3102 + isert_rx_login_req(isert_conn);
3103 +
3104 + pr_info("before conn_login_comp conn: %p\n", conn);
3105 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3106 if (ret)
3107 return ret;
3108
3109 - pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
3110 + pr_info("processing login->req: %p\n", login->req);
3111 +
3112 return 0;
3113 }
3114
3115 @@ -3186,17 +3358,10 @@ accept_wait:
3116 isert_conn->conn = conn;
3117 max_accept = 0;
3118
3119 - ret = isert_rdma_post_recvl(isert_conn);
3120 - if (ret)
3121 - return ret;
3122 -
3123 - ret = isert_rdma_accept(isert_conn);
3124 - if (ret)
3125 - return ret;
3126 -
3127 isert_set_conn_info(np, conn, isert_conn);
3128
3129 - pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
3130 + pr_debug("Processing isert_conn: %p\n", isert_conn);
3131 +
3132 return 0;
3133 }
3134
3135 @@ -3212,6 +3377,24 @@ isert_free_np(struct iscsi_np *np)
3136 kfree(isert_np);
3137 }
3138
3139 +static void isert_release_work(struct work_struct *work)
3140 +{
3141 + struct isert_conn *isert_conn = container_of(work,
3142 + struct isert_conn,
3143 + release_work);
3144 +
3145 + pr_info("Starting release conn %p\n", isert_conn);
3146 +
3147 + wait_for_completion(&isert_conn->conn_wait);
3148 +
3149 + mutex_lock(&isert_conn->conn_mutex);
3150 + isert_conn->state = ISER_CONN_DOWN;
3151 + mutex_unlock(&isert_conn->conn_mutex);
3152 +
3153 + pr_info("Destroying conn %p\n", isert_conn);
3154 + isert_put_conn(isert_conn);
3155 +}
3156 +
3157 static void isert_wait_conn(struct iscsi_conn *conn)
3158 {
3159 struct isert_conn *isert_conn = conn->context;
3160 @@ -3219,10 +3402,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3161 pr_debug("isert_wait_conn: Starting \n");
3162
3163 mutex_lock(&isert_conn->conn_mutex);
3164 - if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3165 - pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3166 - rdma_disconnect(isert_conn->conn_cm_id);
3167 - }
3168 /*
3169 * Only wait for conn_wait_comp_err if the isert_conn made it
3170 * into full feature phase..
3171 @@ -3231,14 +3410,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3172 mutex_unlock(&isert_conn->conn_mutex);
3173 return;
3174 }
3175 - if (isert_conn->state == ISER_CONN_UP)
3176 - isert_conn->state = ISER_CONN_TERMINATING;
3177 + isert_conn_terminate(isert_conn);
3178 mutex_unlock(&isert_conn->conn_mutex);
3179
3180 wait_for_completion(&isert_conn->conn_wait_comp_err);
3181
3182 - wait_for_completion(&isert_conn->conn_wait);
3183 - isert_put_conn(isert_conn);
3184 + INIT_WORK(&isert_conn->release_work, isert_release_work);
3185 + queue_work(isert_release_wq, &isert_conn->release_work);
3186 }
3187
3188 static void isert_free_conn(struct iscsi_conn *conn)
3189 @@ -3286,10 +3464,21 @@ static int __init isert_init(void)
3190 goto destroy_rx_wq;
3191 }
3192
3193 + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3194 + WQ_UNBOUND_MAX_ACTIVE);
3195 + if (!isert_release_wq) {
3196 + pr_err("Unable to allocate isert_release_wq\n");
3197 + ret = -ENOMEM;
3198 + goto destroy_comp_wq;
3199 + }
3200 +
3201 iscsit_register_transport(&iser_target_transport);
3202 - pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
3203 + pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3204 +
3205 return 0;
3206
3207 +destroy_comp_wq:
3208 + destroy_workqueue(isert_comp_wq);
3209 destroy_rx_wq:
3210 destroy_workqueue(isert_rx_wq);
3211 return ret;
3212 @@ -3298,6 +3487,7 @@ destroy_rx_wq:
3213 static void __exit isert_exit(void)
3214 {
3215 flush_scheduled_work();
3216 + destroy_workqueue(isert_release_wq);
3217 destroy_workqueue(isert_comp_wq);
3218 destroy_workqueue(isert_rx_wq);
3219 iscsit_unregister_transport(&iser_target_transport);
3220 diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
3221 index 04f51f7bf614..141905f446dd 100644
3222 --- a/drivers/infiniband/ulp/isert/ib_isert.h
3223 +++ b/drivers/infiniband/ulp/isert/ib_isert.h
3224 @@ -23,6 +23,7 @@ enum iser_ib_op_code {
3225 enum iser_conn_state {
3226 ISER_CONN_INIT,
3227 ISER_CONN_UP,
3228 + ISER_CONN_FULL_FEATURE,
3229 ISER_CONN_TERMINATING,
3230 ISER_CONN_DOWN,
3231 };
3232 @@ -81,6 +82,12 @@ struct isert_data_buf {
3233 enum dma_data_direction dma_dir;
3234 };
3235
3236 +enum {
3237 + DATA = 0,
3238 + PROT = 1,
3239 + SIG = 2,
3240 +};
3241 +
3242 struct isert_rdma_wr {
3243 struct list_head wr_list;
3244 struct isert_cmd *isert_cmd;
3245 @@ -90,6 +97,7 @@ struct isert_rdma_wr {
3246 int send_wr_num;
3247 struct ib_send_wr *send_wr;
3248 struct ib_send_wr s_send_wr;
3249 + struct ib_sge ib_sg[3];
3250 struct isert_data_buf data;
3251 struct isert_data_buf prot;
3252 struct fast_reg_descriptor *fr_desc;
3253 @@ -120,11 +128,13 @@ struct isert_conn {
3254 atomic_t post_send_buf_count;
3255 u32 responder_resources;
3256 u32 initiator_depth;
3257 + bool pi_support;
3258 u32 max_sge;
3259 char *login_buf;
3260 char *login_req_buf;
3261 char *login_rsp_buf;
3262 u64 login_req_dma;
3263 + int login_req_len;
3264 u64 login_rsp_dma;
3265 unsigned int conn_rx_desc_head;
3266 struct iser_rx_desc *conn_rx_descs;
3267 @@ -132,13 +142,13 @@ struct isert_conn {
3268 struct iscsi_conn *conn;
3269 struct list_head conn_accept_node;
3270 struct completion conn_login_comp;
3271 + struct completion login_req_comp;
3272 struct iser_tx_desc conn_login_tx_desc;
3273 struct rdma_cm_id *conn_cm_id;
3274 struct ib_pd *conn_pd;
3275 struct ib_mr *conn_mr;
3276 struct ib_qp *conn_qp;
3277 struct isert_device *conn_device;
3278 - struct work_struct conn_logout_work;
3279 struct mutex conn_mutex;
3280 struct completion conn_wait;
3281 struct completion conn_wait_comp_err;
3282 @@ -147,10 +157,10 @@ struct isert_conn {
3283 int conn_fr_pool_size;
3284 /* lock to protect fastreg pool */
3285 spinlock_t conn_lock;
3286 + struct work_struct release_work;
3287 #define ISERT_COMP_BATCH_COUNT 8
3288 int conn_comp_batch;
3289 struct llist_head conn_comp_llist;
3290 - bool disconnect;
3291 };
3292
3293 #define ISERT_MAX_CQ 64
3294 @@ -182,6 +192,7 @@ struct isert_device {
3295 };
3296
3297 struct isert_np {
3298 + struct iscsi_np *np;
3299 struct semaphore np_sem;
3300 struct rdma_cm_id *np_cm_id;
3301 struct mutex np_accept_mutex;
3302 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
3303 index f2b978026407..77ecf6d32237 100644
3304 --- a/drivers/input/mouse/elantech.c
3305 +++ b/drivers/input/mouse/elantech.c
3306 @@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd)
3307 case 7:
3308 case 8:
3309 case 9:
3310 + case 10:
3311 + case 13:
3312 etd->hw_version = 4;
3313 break;
3314 default:
3315 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3316 index faeeb1372462..1a858c86a72b 100644
3317 --- a/drivers/input/serio/i8042-x86ia64io.h
3318 +++ b/drivers/input/serio/i8042-x86ia64io.h
3319 @@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
3320 },
3321 },
3322 {
3323 + /* Acer Aspire 7738 */
3324 + .matches = {
3325 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3326 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
3327 + },
3328 + },
3329 + {
3330 /* Gericom Bellagio */
3331 .matches = {
3332 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
3333 @@ -735,6 +742,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
3334 { }
3335 };
3336
3337 +/*
3338 + * Some laptops need keyboard reset before probing for the trackpad to get
3339 + * it detected, initialised & finally work.
3340 + */
3341 +static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
3342 + {
3343 + /* Gigabyte P35 v2 - Elantech touchpad */
3344 + .matches = {
3345 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3346 + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
3347 + },
3348 + },
3349 + {
3350 + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
3351 + .matches = {
3352 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3353 + DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
3354 + },
3355 + },
3356 + {
3357 + /* Gigabyte P34 - Elantech touchpad */
3358 + .matches = {
3359 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3360 + DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
3361 + },
3362 + },
3363 + { }
3364 +};
3365 +
3366 #endif /* CONFIG_X86 */
3367
3368 #ifdef CONFIG_PNP
3369 @@ -1030,6 +1066,9 @@ static int __init i8042_platform_init(void)
3370 if (dmi_check_system(i8042_dmi_dritek_table))
3371 i8042_dritek = true;
3372
3373 + if (dmi_check_system(i8042_dmi_kbdreset_table))
3374 + i8042_kbdreset = true;
3375 +
3376 /*
3377 * A20 was already enabled during early kernel init. But some buggy
3378 * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
3379 diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
3380 index f5a98af3b325..804d2e02010a 100644
3381 --- a/drivers/input/serio/i8042.c
3382 +++ b/drivers/input/serio/i8042.c
3383 @@ -67,6 +67,10 @@ static bool i8042_notimeout;
3384 module_param_named(notimeout, i8042_notimeout, bool, 0);
3385 MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
3386
3387 +static bool i8042_kbdreset;
3388 +module_param_named(kbdreset, i8042_kbdreset, bool, 0);
3389 +MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
3390 +
3391 #ifdef CONFIG_X86
3392 static bool i8042_dritek;
3393 module_param_named(dritek, i8042_dritek, bool, 0);
3394 @@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
3395 return -1;
3396
3397 /*
3398 + * Reset keyboard (needed on some laptops to successfully detect
3399 + * touchpad, e.g., some Gigabyte laptop models with Elantech
3400 + * touchpads).
3401 + */
3402 + if (i8042_kbdreset) {
3403 + pr_warn("Attempting to reset device connected to KBD port\n");
3404 + i8042_kbd_write(NULL, (unsigned char) 0xff);
3405 + }
3406 +
3407 +/*
3408 * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
3409 * used it for a PCI card or somethig else.
3410 */
3411 diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
3412 index 64fde485dcaa..4c794f15a57f 100644
3413 --- a/drivers/leds/leds-netxbig.c
3414 +++ b/drivers/leds/leds-netxbig.c
3415 @@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
3416 led_dat->sata = 0;
3417 led_dat->cdev.brightness = LED_OFF;
3418 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
3419 - /*
3420 - * If available, expose the SATA activity blink capability through
3421 - * a "sata" sysfs attribute.
3422 - */
3423 - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
3424 - led_dat->cdev.groups = netxbig_led_groups;
3425 led_dat->mode_addr = template->mode_addr;
3426 led_dat->mode_val = template->mode_val;
3427 led_dat->bright_addr = template->bright_addr;
3428 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
3429 led_dat->timer = pdata->timer;
3430 led_dat->num_timer = pdata->num_timer;
3431 + /*
3432 + * If available, expose the SATA activity blink capability through
3433 + * a "sata" sysfs attribute.
3434 + */
3435 + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
3436 + led_dat->cdev.groups = netxbig_led_groups;
3437
3438 return led_classdev_register(&pdev->dev, &led_dat->cdev);
3439 }
3440 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3441 index 58f3927fd7cc..62c51364cf9e 100644
3442 --- a/drivers/md/dm.c
3443 +++ b/drivers/md/dm.c
3444 @@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
3445
3446 static void clone_endio(struct bio *bio, int error)
3447 {
3448 - int r = 0;
3449 + int r = error;
3450 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
3451 struct dm_io *io = tio->io;
3452 struct mapped_device *md = tio->io->md;
3453 diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
3454 index 2335529b195c..ab5d9a3adebf 100644
3455 --- a/drivers/media/i2c/smiapp-pll.c
3456 +++ b/drivers/media/i2c/smiapp-pll.c
3457 @@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
3458 {
3459 dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
3460 dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
3461 - if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
3462 + if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
3463 dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
3464 dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
3465 }
3466 @@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
3467 dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
3468 dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
3469 dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
3470 - if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
3471 + if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
3472 dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
3473 pll->op_sys_clk_freq_hz);
3474 dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
3475 diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
3476 index b10aaeda2bb4..b49254e4ea0a 100644
3477 --- a/drivers/media/i2c/smiapp/smiapp-core.c
3478 +++ b/drivers/media/i2c/smiapp/smiapp-core.c
3479 @@ -2677,7 +2677,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
3480 pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
3481 pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
3482
3483 + mutex_lock(&sensor->mutex);
3484 rval = smiapp_update_mode(sensor);
3485 + mutex_unlock(&sensor->mutex);
3486 if (rval) {
3487 dev_err(&client->dev, "update mode failed\n");
3488 goto out_nvm_release;
3489 diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
3490 index 69c2dbd2d165..501d5ef682e5 100644
3491 --- a/drivers/media/platform/vivid/vivid-vid-out.c
3492 +++ b/drivers/media/platform/vivid/vivid-vid-out.c
3493 @@ -612,7 +612,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
3494 sel->r = dev->fmt_out_rect;
3495 break;
3496 case V4L2_SEL_TGT_CROP_BOUNDS:
3497 - if (!dev->has_compose_out)
3498 + if (!dev->has_crop_out)
3499 return -EINVAL;
3500 sel->r = vivid_max_rect;
3501 break;
3502 diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
3503 index ec49f94425fc..2fd47c9bf5d8 100644
3504 --- a/drivers/media/rc/img-ir/img-ir-hw.c
3505 +++ b/drivers/media/rc/img-ir/img-ir-hw.c
3506 @@ -530,6 +530,22 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
3507 u32 ir_status, irq_en;
3508 spin_lock_irq(&priv->lock);
3509
3510 + /*
3511 + * First record that the protocol is being stopped so that the end timer
3512 + * isn't restarted while we're trying to stop it.
3513 + */
3514 + hw->stopping = true;
3515 +
3516 + /*
3517 + * Release the lock to stop the end timer, since the end timer handler
3518 + * acquires the lock and we don't want to deadlock waiting for it.
3519 + */
3520 + spin_unlock_irq(&priv->lock);
3521 + del_timer_sync(&hw->end_timer);
3522 + spin_lock_irq(&priv->lock);
3523 +
3524 + hw->stopping = false;
3525 +
3526 /* switch off and disable interrupts */
3527 img_ir_write(priv, IMG_IR_CONTROL, 0);
3528 irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
3529 @@ -541,12 +557,13 @@ static void img_ir_set_decoder(struct img_ir_priv *priv,
3530 if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
3531 ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
3532 img_ir_write(priv, IMG_IR_STATUS, ir_status);
3533 - img_ir_read(priv, IMG_IR_DATA_LW);
3534 - img_ir_read(priv, IMG_IR_DATA_UP);
3535 }
3536
3537 - /* stop the end timer and switch back to normal mode */
3538 - del_timer_sync(&hw->end_timer);
3539 + /* always read data to clear buffer if IR wakes the device */
3540 + img_ir_read(priv, IMG_IR_DATA_LW);
3541 + img_ir_read(priv, IMG_IR_DATA_UP);
3542 +
3543 + /* switch back to normal mode */
3544 hw->mode = IMG_IR_M_NORMAL;
3545
3546 /* clear the wakeup scancode filter */
3547 @@ -817,7 +834,8 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
3548 }
3549
3550
3551 - if (dec->repeat) {
3552 + /* we mustn't update the end timer while trying to stop it */
3553 + if (dec->repeat && !hw->stopping) {
3554 unsigned long interval;
3555
3556 img_ir_begin_repeat(priv);
3557 diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
3558 index 8fcc16c32c5b..307ddcd1a99e 100644
3559 --- a/drivers/media/rc/img-ir/img-ir-hw.h
3560 +++ b/drivers/media/rc/img-ir/img-ir-hw.h
3561 @@ -214,6 +214,8 @@ enum img_ir_mode {
3562 * @flags: IMG_IR_F_*.
3563 * @filters: HW filters (derived from scancode filters).
3564 * @mode: Current decode mode.
3565 + * @stopping: Indicates that decoder is being taken down and timers
3566 + * should not be restarted.
3567 * @suspend_irqen: Saved IRQ enable mask over suspend.
3568 */
3569 struct img_ir_priv_hw {
3570 @@ -229,6 +231,7 @@ struct img_ir_priv_hw {
3571 struct img_ir_filter filters[RC_FILTER_MAX];
3572
3573 enum img_ir_mode mode;
3574 + bool stopping;
3575 u32 suspend_irqen;
3576 };
3577
3578 diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
3579 index 9eb77ac2153b..da87f1cc31a9 100644
3580 --- a/drivers/media/usb/au0828/au0828-cards.c
3581 +++ b/drivers/media/usb/au0828/au0828-cards.c
3582 @@ -36,6 +36,11 @@ static void hvr950q_cs5340_audio(void *priv, int enable)
3583 au0828_clear(dev, REG_000, 0x10);
3584 }
3585
3586 +/*
3587 + * WARNING: There's a quirks table at sound/usb/quirks-table.h
3588 + * that should also be updated every time a new device with V4L2 support
3589 + * is added here.
3590 + */
3591 struct au0828_board au0828_boards[] = {
3592 [AU0828_BOARD_UNKNOWN] = {
3593 .name = "Unknown board",
3594 diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
3595 index 3f4361e48a32..efa782ed6e2d 100644
3596 --- a/drivers/media/usb/dvb-usb/af9005.c
3597 +++ b/drivers/media/usb/dvb-usb/af9005.c
3598 @@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
3599 err("usb_register failed. (%d)", result);
3600 return result;
3601 }
3602 +#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
3603 + /* FIXME: convert to todays kernel IR infrastructure */
3604 rc_decode = symbol_request(af9005_rc_decode);
3605 rc_keys = symbol_request(rc_map_af9005_table);
3606 rc_keys_size = symbol_request(rc_map_af9005_table_size);
3607 +#endif
3608 if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
3609 err("af9005_rc_decode function not found, disabling remote");
3610 af9005_properties.rc.legacy.rc_query = NULL;
3611 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
3612 index 7c8322d4fc63..3c07af96b30f 100644
3613 --- a/drivers/media/usb/uvc/uvc_driver.c
3614 +++ b/drivers/media/usb/uvc/uvc_driver.c
3615 @@ -1623,12 +1623,12 @@ static void uvc_delete(struct uvc_device *dev)
3616 {
3617 struct list_head *p, *n;
3618
3619 - usb_put_intf(dev->intf);
3620 - usb_put_dev(dev->udev);
3621 -
3622 uvc_status_cleanup(dev);
3623 uvc_ctrl_cleanup_device(dev);
3624
3625 + usb_put_intf(dev->intf);
3626 + usb_put_dev(dev->udev);
3627 +
3628 if (dev->vdev.dev)
3629 v4l2_device_unregister(&dev->vdev);
3630 #ifdef CONFIG_MEDIA_CONTROLLER
3631 diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
3632 index cca472109135..51fd6b524371 100644
3633 --- a/drivers/misc/cxl/context.c
3634 +++ b/drivers/misc/cxl/context.c
3635 @@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
3636 /*
3637 * Initialises a CXL context.
3638 */
3639 -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3640 +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3641 + struct address_space *mapping)
3642 {
3643 int i;
3644
3645 @@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3646 ctx->afu = afu;
3647 ctx->master = master;
3648 ctx->pid = NULL; /* Set in start work ioctl */
3649 + mutex_init(&ctx->mapping_lock);
3650 + ctx->mapping = mapping;
3651
3652 /*
3653 * Allocate the segment table before we put it in the IDR so that we
3654 @@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
3655 * Allocating IDR! We better make sure everything's setup that
3656 * dereferences from it.
3657 */
3658 + mutex_lock(&afu->contexts_lock);
3659 idr_preload(GFP_KERNEL);
3660 - spin_lock(&afu->contexts_lock);
3661 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
3662 ctx->afu->num_procs, GFP_NOWAIT);
3663 - spin_unlock(&afu->contexts_lock);
3664 idr_preload_end();
3665 + mutex_unlock(&afu->contexts_lock);
3666 if (i < 0)
3667 return i;
3668
3669 @@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
3670 afu_release_irqs(ctx);
3671 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
3672 wake_up_all(&ctx->wq);
3673 +
3674 + /* Release Problem State Area mapping */
3675 + mutex_lock(&ctx->mapping_lock);
3676 + if (ctx->mapping)
3677 + unmap_mapping_range(ctx->mapping, 0, 0, 1);
3678 + mutex_unlock(&ctx->mapping_lock);
3679 }
3680
3681 /*
3682 @@ -168,21 +177,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
3683 struct cxl_context *ctx;
3684 int tmp;
3685
3686 - rcu_read_lock();
3687 - idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
3688 + mutex_lock(&afu->contexts_lock);
3689 + idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
3690 /*
3691 * Anything done in here needs to be setup before the IDR is
3692 * created and torn down after the IDR removed
3693 */
3694 __detach_context(ctx);
3695 - rcu_read_unlock();
3696 + }
3697 + mutex_unlock(&afu->contexts_lock);
3698 }
3699
3700 void cxl_context_free(struct cxl_context *ctx)
3701 {
3702 - spin_lock(&ctx->afu->contexts_lock);
3703 + mutex_lock(&ctx->afu->contexts_lock);
3704 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
3705 - spin_unlock(&ctx->afu->contexts_lock);
3706 + mutex_unlock(&ctx->afu->contexts_lock);
3707 synchronize_rcu();
3708
3709 free_page((u64)ctx->sstp);
3710 diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
3711 index 3d2b8677ec8a..6ee785da574e 100644
3712 --- a/drivers/misc/cxl/cxl.h
3713 +++ b/drivers/misc/cxl/cxl.h
3714 @@ -349,7 +349,7 @@ struct cxl_afu {
3715 struct device *chardev_s, *chardev_m, *chardev_d;
3716 struct idr contexts_idr;
3717 struct dentry *debugfs;
3718 - spinlock_t contexts_lock;
3719 + struct mutex contexts_lock;
3720 struct mutex spa_mutex;
3721 spinlock_t afu_cntl_lock;
3722
3723 @@ -390,6 +390,10 @@ struct cxl_context {
3724 phys_addr_t psn_phys;
3725 u64 psn_size;
3726
3727 + /* Used to unmap any mmaps when force detaching */
3728 + struct address_space *mapping;
3729 + struct mutex mapping_lock;
3730 +
3731 spinlock_t sste_lock; /* Protects segment table entries */
3732 struct cxl_sste *sstp;
3733 u64 sstp0, sstp1;
3734 @@ -592,7 +596,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
3735 void init_cxl_native(void);
3736
3737 struct cxl_context *cxl_context_alloc(void);
3738 -int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
3739 +int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3740 + struct address_space *mapping);
3741 void cxl_context_free(struct cxl_context *ctx);
3742 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
3743
3744 diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
3745 index 378b099e7c0b..e9f2f10dbb37 100644
3746 --- a/drivers/misc/cxl/file.c
3747 +++ b/drivers/misc/cxl/file.c
3748 @@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
3749 goto err_put_afu;
3750 }
3751
3752 - if ((rc = cxl_context_init(ctx, afu, master)))
3753 + if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
3754 goto err_put_afu;
3755
3756 pr_devel("afu_open pe: %i\n", ctx->pe);
3757 @@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
3758 __func__, ctx->pe);
3759 cxl_context_detach(ctx);
3760
3761 + mutex_lock(&ctx->mapping_lock);
3762 + ctx->mapping = NULL;
3763 + mutex_unlock(&ctx->mapping_lock);
3764 +
3765 put_device(&ctx->afu->dev);
3766
3767 /*
3768 diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
3769 index d47532e8f4f1..1d9717b4d67a 100644
3770 --- a/drivers/misc/cxl/native.c
3771 +++ b/drivers/misc/cxl/native.c
3772 @@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
3773 u64 cmd, u64 pe_state)
3774 {
3775 u64 state;
3776 + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
3777
3778 WARN_ON(!ctx->afu->enabled);
3779
3780 @@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
3781 smp_mb();
3782 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
3783 while (1) {
3784 + if (time_after_eq(jiffies, timeout)) {
3785 + dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
3786 + return -EBUSY;
3787 + }
3788 state = be64_to_cpup(ctx->afu->sw_command_status);
3789 if (state == ~0ULL) {
3790 pr_err("cxl: Error adding process element to AFU\n");
3791 @@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
3792 return 0;
3793 }
3794
3795 -/*
3796 - * TODO: handle case when this is called inside a rcu_read_lock() which may
3797 - * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
3798 - * & remove use a mutex lock and schedule which will not good with lock held.
3799 - * May need to write do_process_element_cmd() that handles outstanding page
3800 - * faults synchronously.
3801 - */
3802 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
3803 {
3804 if (!ctx->pe_inserted)
3805 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
3806 index 10c98ab7f46e..0f2cc9f8b4db 100644
3807 --- a/drivers/misc/cxl/pci.c
3808 +++ b/drivers/misc/cxl/pci.c
3809 @@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
3810 afu->dev.release = cxl_release_afu;
3811 afu->slice = slice;
3812 idr_init(&afu->contexts_idr);
3813 - spin_lock_init(&afu->contexts_lock);
3814 + mutex_init(&afu->contexts_lock);
3815 spin_lock_init(&afu->afu_cntl_lock);
3816 mutex_init(&afu->spa_mutex);
3817
3818 diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
3819 index ce7ec06d87d1..461bdbd5d483 100644
3820 --- a/drivers/misc/cxl/sysfs.c
3821 +++ b/drivers/misc/cxl/sysfs.c
3822 @@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
3823 int rc;
3824
3825 /* Not safe to reset if it is currently in use */
3826 - spin_lock(&afu->contexts_lock);
3827 + mutex_lock(&afu->contexts_lock);
3828 if (!idr_is_empty(&afu->contexts_idr)) {
3829 rc = -EBUSY;
3830 goto err;
3831 @@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
3832
3833 rc = count;
3834 err:
3835 - spin_unlock(&afu->contexts_lock);
3836 + mutex_unlock(&afu->contexts_lock);
3837 return rc;
3838 }
3839
3840 @@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3841 int rc = -EBUSY;
3842
3843 /* can't change this if we have a user */
3844 - spin_lock(&afu->contexts_lock);
3845 + mutex_lock(&afu->contexts_lock);
3846 if (!idr_is_empty(&afu->contexts_idr))
3847 goto err;
3848
3849 @@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3850 afu->current_mode = 0;
3851 afu->num_procs = 0;
3852
3853 - spin_unlock(&afu->contexts_lock);
3854 + mutex_unlock(&afu->contexts_lock);
3855
3856 if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
3857 return rc;
3858 @@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
3859
3860 return count;
3861 err:
3862 - spin_unlock(&afu->contexts_lock);
3863 + mutex_unlock(&afu->contexts_lock);
3864 return rc;
3865 }
3866
3867 diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
3868 index 4f2fd6fc1e23..432aec8dd3ce 100644
3869 --- a/drivers/misc/mei/hw-me.c
3870 +++ b/drivers/misc/mei/hw-me.c
3871 @@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
3872 struct mei_me_hw *hw = to_me_hw(dev);
3873 u32 hcsr = mei_hcsr_read(hw);
3874
3875 + /* H_RST may be found lit before reset is started,
3876 + * for example if preceding reset flow hasn't completed.
3877 + * In that case asserting H_RST will be ignored, therefore
3878 + * we need to clean H_RST bit to start a successful reset sequence.
3879 + */
3880 + if ((hcsr & H_RST) == H_RST) {
3881 + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
3882 + hcsr &= ~H_RST;
3883 + mei_me_reg_write(hw, H_CSR, hcsr);
3884 + hcsr = mei_hcsr_read(hw);
3885 + }
3886 +
3887 hcsr |= H_RST | H_IG | H_IS;
3888
3889 if (intr_enable)
3890 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3891 index 7625bd791fca..023c2010cd75 100644
3892 --- a/drivers/mmc/host/sdhci.c
3893 +++ b/drivers/mmc/host/sdhci.c
3894 @@ -1239,6 +1239,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
3895 spin_unlock_irq(&host->lock);
3896 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
3897 spin_lock_irq(&host->lock);
3898 +
3899 + if (mode != MMC_POWER_OFF)
3900 + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
3901 + else
3902 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
3903 +
3904 return;
3905 }
3906
3907 diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
3908 index 541fb7a05625..cc7bfc0c0a71 100644
3909 --- a/drivers/net/can/usb/kvaser_usb.c
3910 +++ b/drivers/net/can/usb/kvaser_usb.c
3911 @@ -1246,6 +1246,9 @@ static int kvaser_usb_close(struct net_device *netdev)
3912 if (err)
3913 netdev_warn(netdev, "Cannot stop device, error %d\n", err);
3914
3915 + /* reset tx contexts */
3916 + kvaser_usb_unlink_tx_urbs(priv);
3917 +
3918 priv->can.state = CAN_STATE_STOPPED;
3919 close_candev(priv->netdev);
3920
3921 @@ -1294,12 +1297,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3922 if (!urb) {
3923 netdev_err(netdev, "No memory left for URBs\n");
3924 stats->tx_dropped++;
3925 - goto nourbmem;
3926 + dev_kfree_skb(skb);
3927 + return NETDEV_TX_OK;
3928 }
3929
3930 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
3931 if (!buf) {
3932 stats->tx_dropped++;
3933 + dev_kfree_skb(skb);
3934 goto nobufmem;
3935 }
3936
3937 @@ -1334,6 +1339,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3938 }
3939 }
3940
3941 + /* This should never happen; it implies a flow control bug */
3942 if (!context) {
3943 netdev_warn(netdev, "cannot find free context\n");
3944 ret = NETDEV_TX_BUSY;
3945 @@ -1364,9 +1370,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
3946 if (unlikely(err)) {
3947 can_free_echo_skb(netdev, context->echo_index);
3948
3949 - skb = NULL; /* set to NULL to avoid double free in
3950 - * dev_kfree_skb(skb) */
3951 -
3952 atomic_dec(&priv->active_tx_urbs);
3953 usb_unanchor_urb(urb);
3954
3955 @@ -1388,8 +1391,6 @@ releasebuf:
3956 kfree(buf);
3957 nobufmem:
3958 usb_free_urb(urb);
3959 -nourbmem:
3960 - dev_kfree_skb(skb);
3961 return ret;
3962 }
3963
3964 @@ -1502,6 +1503,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
3965 struct kvaser_usb_net_priv *priv;
3966 int i, err;
3967
3968 + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
3969 + if (err)
3970 + return err;
3971 +
3972 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
3973 if (!netdev) {
3974 dev_err(&intf->dev, "Cannot alloc candev\n");
3975 @@ -1606,9 +1611,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
3976
3977 usb_set_intfdata(intf, dev);
3978
3979 - for (i = 0; i < MAX_NET_DEVICES; i++)
3980 - kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
3981 -
3982 err = kvaser_usb_get_software_info(dev);
3983 if (err) {
3984 dev_err(&intf->dev,
3985 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
3986 index e398eda07298..c8af3ce3ea38 100644
3987 --- a/drivers/net/ethernet/atheros/alx/main.c
3988 +++ b/drivers/net/ethernet/atheros/alx/main.c
3989 @@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
3990 schedule_work(&alx->reset_wk);
3991 }
3992
3993 -static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
3994 +static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
3995 {
3996 struct alx_rx_queue *rxq = &alx->rxq;
3997 struct alx_rrd *rrd;
3998 struct alx_buffer *rxb;
3999 struct sk_buff *skb;
4000 u16 length, rfd_cleaned = 0;
4001 + int work = 0;
4002
4003 - while (budget > 0) {
4004 + while (work < budget) {
4005 rrd = &rxq->rrd[rxq->rrd_read_idx];
4006 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
4007 break;
4008 @@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
4009 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
4010 RRD_NOR) != 1) {
4011 alx_schedule_reset(alx);
4012 - return 0;
4013 + return work;
4014 }
4015
4016 rxb = &rxq->bufs[rxq->read_idx];
4017 @@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
4018 }
4019
4020 napi_gro_receive(&alx->napi, skb);
4021 - budget--;
4022 + work++;
4023
4024 next_pkt:
4025 if (++rxq->read_idx == alx->rx_ringsz)
4026 @@ -258,21 +259,22 @@ next_pkt:
4027 if (rfd_cleaned)
4028 alx_refill_rx_ring(alx, GFP_ATOMIC);
4029
4030 - return budget > 0;
4031 + return work;
4032 }
4033
4034 static int alx_poll(struct napi_struct *napi, int budget)
4035 {
4036 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
4037 struct alx_hw *hw = &alx->hw;
4038 - bool complete = true;
4039 unsigned long flags;
4040 + bool tx_complete;
4041 + int work;
4042
4043 - complete = alx_clean_tx_irq(alx) &&
4044 - alx_clean_rx_irq(alx, budget);
4045 + tx_complete = alx_clean_tx_irq(alx);
4046 + work = alx_clean_rx_irq(alx, budget);
4047
4048 - if (!complete)
4049 - return 1;
4050 + if (!tx_complete || work == budget)
4051 + return budget;
4052
4053 napi_complete(&alx->napi);
4054
4055 @@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
4056
4057 alx_post_write(hw);
4058
4059 - return 0;
4060 + return work;
4061 }
4062
4063 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
4064 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
4065 index 77f8f836cbbe..5748542f6717 100644
4066 --- a/drivers/net/ethernet/broadcom/tg3.c
4067 +++ b/drivers/net/ethernet/broadcom/tg3.c
4068 @@ -17789,23 +17789,6 @@ static int tg3_init_one(struct pci_dev *pdev,
4069 goto err_out_apeunmap;
4070 }
4071
4072 - /*
4073 - * Reset chip in case UNDI or EFI driver did not shutdown
4074 - * DMA self test will enable WDMAC and we'll see (spurious)
4075 - * pending DMA on the PCI bus at that point.
4076 - */
4077 - if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
4078 - (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
4079 - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4080 - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4081 - }
4082 -
4083 - err = tg3_test_dma(tp);
4084 - if (err) {
4085 - dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
4086 - goto err_out_apeunmap;
4087 - }
4088 -
4089 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
4090 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
4091 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
4092 @@ -17850,6 +17833,23 @@ static int tg3_init_one(struct pci_dev *pdev,
4093 sndmbx += 0xc;
4094 }
4095
4096 + /*
4097 + * Reset chip in case UNDI or EFI driver did not shutdown
4098 + * DMA self test will enable WDMAC and we'll see (spurious)
4099 + * pending DMA on the PCI bus at that point.
4100 + */
4101 + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
4102 + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
4103 + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4104 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4105 + }
4106 +
4107 + err = tg3_test_dma(tp);
4108 + if (err) {
4109 + dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
4110 + goto err_out_apeunmap;
4111 + }
4112 +
4113 tg3_init_coal(tp);
4114
4115 pci_set_drvdata(pdev, dev);
4116 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
4117 index 73cf1653a4a3..167cd8ede397 100644
4118 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
4119 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
4120 @@ -1059,10 +1059,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
4121 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
4122 }
4123
4124 - if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
4125 - skb->csum = htons(checksum);
4126 - skb->ip_summed = CHECKSUM_COMPLETE;
4127 - }
4128 + /* Hardware does not provide whole packet checksum. It only
4129 + * provides pseudo checksum. Since hw validates the packet
4130 + * checksum but not provide us the checksum value. use
4131 + * CHECSUM_UNNECESSARY.
4132 + */
4133 + if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
4134 + ipv4_csum_ok)
4135 + skb->ip_summed = CHECKSUM_UNNECESSARY;
4136
4137 if (vlan_stripped)
4138 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
4139 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
4140 index 597c463e384d..d2975fa7e549 100644
4141 --- a/drivers/net/ethernet/emulex/benet/be_main.c
4142 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
4143 @@ -4427,9 +4427,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4144 be16_to_cpu(port));
4145 }
4146
4147 -static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4148 +static netdev_features_t be_features_check(struct sk_buff *skb,
4149 + struct net_device *dev,
4150 + netdev_features_t features)
4151 {
4152 - return vxlan_gso_check(skb);
4153 + return vxlan_features_check(skb, features);
4154 }
4155 #endif
4156
4157 @@ -4460,7 +4462,7 @@ static const struct net_device_ops be_netdev_ops = {
4158 #ifdef CONFIG_BE2NET_VXLAN
4159 .ndo_add_vxlan_port = be_add_vxlan_port,
4160 .ndo_del_vxlan_port = be_del_vxlan_port,
4161 - .ndo_gso_check = be_gso_check,
4162 + .ndo_features_check = be_features_check,
4163 #endif
4164 };
4165
4166 diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
4167 index 5b8300a32bf5..4d61ef50b465 100644
4168 --- a/drivers/net/ethernet/intel/Kconfig
4169 +++ b/drivers/net/ethernet/intel/Kconfig
4170 @@ -281,6 +281,17 @@ config I40E_DCB
4171
4172 If unsure, say N.
4173
4174 +config I40E_FCOE
4175 + bool "Fibre Channel over Ethernet (FCoE)"
4176 + default n
4177 + depends on I40E && DCB && FCOE
4178 + ---help---
4179 + Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
4180 + in the driver. This will create new netdev for exclusive FCoE
4181 + use with XL710 FCoE offloads enabled.
4182 +
4183 + If unsure, say N.
4184 +
4185 config I40EVF
4186 tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
4187 depends on PCI_MSI
4188 diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
4189 index 4b94ddb29c24..c40581999121 100644
4190 --- a/drivers/net/ethernet/intel/i40e/Makefile
4191 +++ b/drivers/net/ethernet/intel/i40e/Makefile
4192 @@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
4193 i40e_virtchnl_pf.o
4194
4195 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
4196 -i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
4197 +i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
4198 diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4199 index 045b5c4b98b3..ad802dd0f67a 100644
4200 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4201 +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
4202 @@ -78,7 +78,7 @@ do { \
4203 } while (0)
4204
4205 typedef enum i40e_status_code i40e_status;
4206 -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
4207 +#ifdef CONFIG_I40E_FCOE
4208 #define I40E_FCOE
4209 -#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
4210 +#endif
4211 #endif /* _I40E_OSDEP_H_ */
4212 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4213 index 4d69e382b4e5..6bdaa313e7ea 100644
4214 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4215 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4216 @@ -1569,8 +1569,15 @@ int mlx4_en_start_port(struct net_device *dev)
4217 mlx4_en_free_affinity_hint(priv, i);
4218 goto cq_err;
4219 }
4220 - for (j = 0; j < cq->size; j++)
4221 - cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
4222 +
4223 + for (j = 0; j < cq->size; j++) {
4224 + struct mlx4_cqe *cqe = NULL;
4225 +
4226 + cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
4227 + priv->cqe_factor;
4228 + cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
4229 + }
4230 +
4231 err = mlx4_en_set_cq_moder(priv, cq);
4232 if (err) {
4233 en_err(priv, "Failed setting cq moderation parameters\n");
4234 @@ -2356,9 +2363,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
4235 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
4236 }
4237
4238 -static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
4239 +static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
4240 + struct net_device *dev,
4241 + netdev_features_t features)
4242 {
4243 - return vxlan_gso_check(skb);
4244 + return vxlan_features_check(skb, features);
4245 }
4246 #endif
4247
4248 @@ -2391,7 +2400,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
4249 #ifdef CONFIG_MLX4_EN_VXLAN
4250 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
4251 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
4252 - .ndo_gso_check = mlx4_en_gso_check,
4253 + .ndo_features_check = mlx4_en_features_check,
4254 #endif
4255 };
4256
4257 @@ -2425,7 +2434,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
4258 #ifdef CONFIG_MLX4_EN_VXLAN
4259 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
4260 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
4261 - .ndo_gso_check = mlx4_en_gso_check,
4262 + .ndo_features_check = mlx4_en_features_check,
4263 #endif
4264 };
4265
4266 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4267 index 454d9fea640e..11ff28b5fca3 100644
4268 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4269 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4270 @@ -954,7 +954,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
4271 tx_desc->ctrl.owner_opcode = op_own;
4272 if (send_doorbell) {
4273 wmb();
4274 - iowrite32(ring->doorbell_qpn,
4275 + /* Since there is no iowrite*_native() that writes the
4276 + * value as is, without byteswapping - using the one
4277 + * the doesn't do byteswapping in the relevant arch
4278 + * endianness.
4279 + */
4280 +#if defined(__LITTLE_ENDIAN)
4281 + iowrite32(
4282 +#else
4283 + iowrite32be(
4284 +#endif
4285 + ring->doorbell_qpn,
4286 ring->bf.uar->map + MLX4_SEND_DOORBELL);
4287 } else {
4288 ring->xmit_more++;
4289 diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
4290 index 2e88a235e26b..5f1228794328 100644
4291 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c
4292 +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
4293 @@ -1647,8 +1647,8 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
4294 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
4295 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
4296 if (byte_field) {
4297 - param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
4298 - param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
4299 + param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
4300 + param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
4301 param->cqe_size = 1 << ((byte_field &
4302 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
4303 param->eqe_size = 1 << (((byte_field &
4304 diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
4305 index 193a6adb5d04..b7cdef0aebd6 100644
4306 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c
4307 +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
4308 @@ -590,6 +590,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
4309 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
4310 {
4311 mlx4_mtt_cleanup(dev, &mr->mtt);
4312 + mr->mtt.order = -1;
4313 }
4314 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
4315
4316 @@ -599,14 +600,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
4317 {
4318 int err;
4319
4320 - mpt_entry->start = cpu_to_be64(iova);
4321 - mpt_entry->length = cpu_to_be64(size);
4322 - mpt_entry->entity_size = cpu_to_be32(page_shift);
4323 -
4324 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
4325 if (err)
4326 return err;
4327
4328 + mpt_entry->start = cpu_to_be64(mr->iova);
4329 + mpt_entry->length = cpu_to_be64(mr->size);
4330 + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
4331 +
4332 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
4333 MLX4_MPT_PD_FLAG_EN_INV);
4334 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
4335 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4336 index a913b3ad2f89..477a5d33d79c 100644
4337 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4338 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4339 @@ -504,9 +504,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
4340 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
4341 }
4342
4343 -static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
4344 +static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
4345 + struct net_device *dev,
4346 + netdev_features_t features)
4347 {
4348 - return vxlan_gso_check(skb);
4349 + return vxlan_features_check(skb, features);
4350 }
4351 #endif
4352
4353 @@ -531,7 +533,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
4354 #ifdef CONFIG_QLCNIC_VXLAN
4355 .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
4356 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
4357 - .ndo_gso_check = qlcnic_gso_check,
4358 + .ndo_features_check = qlcnic_features_check,
4359 #endif
4360 #ifdef CONFIG_NET_POLL_CONTROLLER
4361 .ndo_poll_controller = qlcnic_poll_controller,
4362 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
4363 index c560f9aeb55d..64d1cef4cda1 100644
4364 --- a/drivers/net/ethernet/ti/cpsw.c
4365 +++ b/drivers/net/ethernet/ti/cpsw.c
4366 @@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
4367
4368 /* Clear all mcast from ALE */
4369 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
4370 - priv->host_port);
4371 + priv->host_port, -1);
4372
4373 /* Flood All Unicast Packets to Host port */
4374 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
4375 @@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
4376 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
4377 {
4378 struct cpsw_priv *priv = netdev_priv(ndev);
4379 + int vid;
4380 +
4381 + if (priv->data.dual_emac)
4382 + vid = priv->slaves[priv->emac_port].port_vlan;
4383 + else
4384 + vid = priv->data.default_vlan;
4385
4386 if (ndev->flags & IFF_PROMISC) {
4387 /* Enable promiscuous mode */
4388 @@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
4389 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
4390
4391 /* Clear all mcast from ALE */
4392 - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
4393 + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
4394 + vid);
4395
4396 if (!netdev_mc_empty(ndev)) {
4397 struct netdev_hw_addr *ha;
4398 @@ -757,6 +764,14 @@ requeue:
4399 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
4400 {
4401 struct cpsw_priv *priv = dev_id;
4402 + int value = irq - priv->irqs_table[0];
4403 +
4404 + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
4405 + * is to make sure we will always write the correct value to the EOI
4406 + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
4407 + * for TX Interrupt and 3 for MISC Interrupt.
4408 + */
4409 + cpdma_ctlr_eoi(priv->dma, value);
4410
4411 cpsw_intr_disable(priv);
4412 if (priv->irq_enabled == true) {
4413 @@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
4414 int num_tx, num_rx;
4415
4416 num_tx = cpdma_chan_process(priv->txch, 128);
4417 - if (num_tx)
4418 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4419
4420 num_rx = cpdma_chan_process(priv->rxch, budget);
4421 if (num_rx < budget) {
4422 @@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
4423
4424 napi_complete(napi);
4425 cpsw_intr_enable(priv);
4426 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4427 prim_cpsw = cpsw_get_slave_priv(priv, 0);
4428 if (prim_cpsw->irq_enabled == false) {
4429 prim_cpsw->irq_enabled = true;
4430 @@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
4431 napi_enable(&priv->napi);
4432 cpdma_ctlr_start(priv->dma);
4433 cpsw_intr_enable(priv);
4434 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4435 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4436
4437 prim_cpsw = cpsw_get_slave_priv(priv, 0);
4438 if (prim_cpsw->irq_enabled == false) {
4439 @@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
4440 cpdma_chan_start(priv->txch);
4441 cpdma_ctlr_int_ctrl(priv->dma, true);
4442 cpsw_intr_enable(priv);
4443 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4444 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4445 -
4446 }
4447
4448 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
4449 @@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
4450 cpsw_interrupt(ndev->irq, priv);
4451 cpdma_ctlr_int_ctrl(priv->dma, true);
4452 cpsw_intr_enable(priv);
4453 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
4454 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
4455 -
4456 }
4457 #endif
4458
4459 diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
4460 index 097ebe7077ac..5246b3a18ff8 100644
4461 --- a/drivers/net/ethernet/ti/cpsw_ale.c
4462 +++ b/drivers/net/ethernet/ti/cpsw_ale.c
4463 @@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
4464 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
4465 }
4466
4467 -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
4468 +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
4469 {
4470 u32 ale_entry[ALE_ENTRY_WORDS];
4471 int ret, idx;
4472 @@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
4473 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
4474 continue;
4475
4476 + /* if vid passed is -1 then remove all multicast entry from
4477 + * the table irrespective of vlan id, if a valid vlan id is
4478 + * passed then remove only multicast added to that vlan id.
4479 + * if vlan id doesn't match then move on to next entry.
4480 + */
4481 + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
4482 + continue;
4483 +
4484 if (cpsw_ale_get_mcast(ale_entry)) {
4485 u8 addr[6];
4486
4487 diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
4488 index c0d4127aa549..af1e7ecd87c6 100644
4489 --- a/drivers/net/ethernet/ti/cpsw_ale.h
4490 +++ b/drivers/net/ethernet/ti/cpsw_ale.h
4491 @@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
4492
4493 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
4494 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
4495 -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
4496 +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
4497 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
4498 int flags, u16 vid);
4499 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
4500 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
4501 index 2368395d8ae5..9c505c4dbe04 100644
4502 --- a/drivers/net/team/team.c
4503 +++ b/drivers/net/team/team.c
4504 @@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
4505 static void team_notify_peers_work(struct work_struct *work)
4506 {
4507 struct team *team;
4508 + int val;
4509
4510 team = container_of(work, struct team, notify_peers.dw.work);
4511
4512 @@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
4513 schedule_delayed_work(&team->notify_peers.dw, 0);
4514 return;
4515 }
4516 + val = atomic_dec_if_positive(&team->notify_peers.count_pending);
4517 + if (val < 0) {
4518 + rtnl_unlock();
4519 + return;
4520 + }
4521 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
4522 rtnl_unlock();
4523 - if (!atomic_dec_and_test(&team->notify_peers.count_pending))
4524 + if (val)
4525 schedule_delayed_work(&team->notify_peers.dw,
4526 msecs_to_jiffies(team->notify_peers.interval));
4527 }
4528 @@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
4529 static void team_mcast_rejoin_work(struct work_struct *work)
4530 {
4531 struct team *team;
4532 + int val;
4533
4534 team = container_of(work, struct team, mcast_rejoin.dw.work);
4535
4536 @@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
4537 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
4538 return;
4539 }
4540 + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
4541 + if (val < 0) {
4542 + rtnl_unlock();
4543 + return;
4544 + }
4545 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
4546 rtnl_unlock();
4547 - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
4548 + if (val)
4549 schedule_delayed_work(&team->mcast_rejoin.dw,
4550 msecs_to_jiffies(team->mcast_rejoin.interval));
4551 }
4552 diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
4553 index 8021f6eec27f..41ae16435300 100644
4554 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c
4555 +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
4556 @@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
4557 if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
4558 return false;
4559
4560 - if (!mvm->cfg->rx_with_siso_diversity)
4561 + if (mvm->cfg->rx_with_siso_diversity)
4562 return false;
4563
4564 ieee80211_iterate_active_interfaces_atomic(
4565 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
4566 index 846a2e6e34d8..c70efb9a6e78 100644
4567 --- a/drivers/net/wireless/rtlwifi/pci.c
4568 +++ b/drivers/net/wireless/rtlwifi/pci.c
4569 @@ -666,7 +666,8 @@ tx_status_ok:
4570 }
4571
4572 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4573 - u8 *entry, int rxring_idx, int desc_idx)
4574 + struct sk_buff *new_skb, u8 *entry,
4575 + int rxring_idx, int desc_idx)
4576 {
4577 struct rtl_priv *rtlpriv = rtl_priv(hw);
4578 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
4579 @@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4580 u8 tmp_one = 1;
4581 struct sk_buff *skb;
4582
4583 + if (likely(new_skb)) {
4584 + skb = new_skb;
4585 + goto remap;
4586 + }
4587 skb = dev_alloc_skb(rtlpci->rxbuffersize);
4588 if (!skb)
4589 return 0;
4590 - rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
4591
4592 +remap:
4593 /* just set skb->cb to mapping addr for pci_unmap_single use */
4594 *((dma_addr_t *)skb->cb) =
4595 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
4596 @@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
4597 bufferaddress = *((dma_addr_t *)skb->cb);
4598 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
4599 return 0;
4600 + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
4601 if (rtlpriv->use_new_trx_flow) {
4602 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
4603 HW_DESC_RX_PREPARE,
4604 @@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4605 /*rx pkt */
4606 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
4607 rtlpci->rx_ring[rxring_idx].idx];
4608 + struct sk_buff *new_skb;
4609
4610 if (rtlpriv->use_new_trx_flow) {
4611 rx_remained_cnt =
4612 @@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4613 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
4614 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
4615
4616 + /* get a new skb - if fail, old one will be reused */
4617 + new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
4618 + if (unlikely(!new_skb)) {
4619 + pr_err("Allocation of new skb failed in %s\n",
4620 + __func__);
4621 + goto no_new;
4622 + }
4623 if (rtlpriv->use_new_trx_flow) {
4624 buffer_desc =
4625 &rtlpci->rx_ring[rxring_idx].buffer_desc
4626 @@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4627 schedule_work(&rtlpriv->works.lps_change_work);
4628 }
4629 end:
4630 + skb = new_skb;
4631 +no_new:
4632 if (rtlpriv->use_new_trx_flow) {
4633 - _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
4634 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
4635 rxring_idx,
4636 - rtlpci->rx_ring[rxring_idx].idx);
4637 + rtlpci->rx_ring[rxring_idx].idx);
4638 } else {
4639 - _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
4640 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
4641 + rxring_idx,
4642 rtlpci->rx_ring[rxring_idx].idx);
4643 -
4644 if (rtlpci->rx_ring[rxring_idx].idx ==
4645 rtlpci->rxringcount - 1)
4646 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
4647 @@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
4648 rtlpci->rx_ring[rxring_idx].idx = 0;
4649 for (i = 0; i < rtlpci->rxringcount; i++) {
4650 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
4651 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
4652 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
4653 rxring_idx, i))
4654 return -ENOMEM;
4655 }
4656 @@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
4657
4658 for (i = 0; i < rtlpci->rxringcount; i++) {
4659 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
4660 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
4661 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
4662 rxring_idx, i))
4663 return -ENOMEM;
4664 }
4665 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4666 index d2ec5160bbf0..5c646d5f7bb8 100644
4667 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4668 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4669 @@ -955,6 +955,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
4670 local_save_flags(flags);
4671 local_irq_enable();
4672
4673 + rtlhal->fw_ready = false;
4674 rtlpriv->intf_ops->disable_aspm(hw);
4675 rtstatus = _rtl92ce_init_mac(hw);
4676 if (!rtstatus) {
4677 @@ -971,6 +972,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
4678 goto exit;
4679 }
4680
4681 + rtlhal->fw_ready = true;
4682 rtlhal->last_hmeboxnum = 0;
4683 rtl92c_phy_mac_config(hw);
4684 /* because last function modify RCR, so we update
4685 diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
4686 index 083ecc93fe5e..5f1fda44882b 100644
4687 --- a/drivers/net/xen-netback/common.h
4688 +++ b/drivers/net/xen-netback/common.h
4689 @@ -230,6 +230,8 @@ struct xenvif {
4690 */
4691 bool disabled;
4692 unsigned long status;
4693 + unsigned long drain_timeout;
4694 + unsigned long stall_timeout;
4695
4696 /* Queues */
4697 struct xenvif_queue *queues;
4698 @@ -328,7 +330,7 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id);
4699 extern bool separate_tx_rx_irq;
4700
4701 extern unsigned int rx_drain_timeout_msecs;
4702 -extern unsigned int rx_drain_timeout_jiffies;
4703 +extern unsigned int rx_stall_timeout_msecs;
4704 extern unsigned int xenvif_max_queues;
4705
4706 #ifdef CONFIG_DEBUG_FS
4707 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
4708 index 895fe84011e7..d752d1c5c8bd 100644
4709 --- a/drivers/net/xen-netback/interface.c
4710 +++ b/drivers/net/xen-netback/interface.c
4711 @@ -166,7 +166,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
4712 goto drop;
4713
4714 cb = XENVIF_RX_CB(skb);
4715 - cb->expires = jiffies + rx_drain_timeout_jiffies;
4716 + cb->expires = jiffies + vif->drain_timeout;
4717
4718 xenvif_rx_queue_tail(queue, skb);
4719 xenvif_kick_thread(queue);
4720 @@ -414,6 +414,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
4721 vif->ip_csum = 1;
4722 vif->dev = dev;
4723 vif->disabled = false;
4724 + vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
4725 + vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
4726
4727 /* Start out with no queues. */
4728 vif->queues = NULL;
4729 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
4730 index 6563f0713fc0..c39aace4f642 100644
4731 --- a/drivers/net/xen-netback/netback.c
4732 +++ b/drivers/net/xen-netback/netback.c
4733 @@ -60,14 +60,12 @@ module_param(separate_tx_rx_irq, bool, 0644);
4734 */
4735 unsigned int rx_drain_timeout_msecs = 10000;
4736 module_param(rx_drain_timeout_msecs, uint, 0444);
4737 -unsigned int rx_drain_timeout_jiffies;
4738
4739 /* The length of time before the frontend is considered unresponsive
4740 * because it isn't providing Rx slots.
4741 */
4742 -static unsigned int rx_stall_timeout_msecs = 60000;
4743 +unsigned int rx_stall_timeout_msecs = 60000;
4744 module_param(rx_stall_timeout_msecs, uint, 0444);
4745 -static unsigned int rx_stall_timeout_jiffies;
4746
4747 unsigned int xenvif_max_queues;
4748 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
4749 @@ -2022,7 +2020,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
4750 return !queue->stalled
4751 && prod - cons < XEN_NETBK_RX_SLOTS_MAX
4752 && time_after(jiffies,
4753 - queue->last_rx_time + rx_stall_timeout_jiffies);
4754 + queue->last_rx_time + queue->vif->stall_timeout);
4755 }
4756
4757 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
4758 @@ -2040,8 +2038,9 @@ static bool xenvif_have_rx_work(struct xenvif_queue *queue)
4759 {
4760 return (!skb_queue_empty(&queue->rx_queue)
4761 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
4762 - || xenvif_rx_queue_stalled(queue)
4763 - || xenvif_rx_queue_ready(queue)
4764 + || (queue->vif->stall_timeout &&
4765 + (xenvif_rx_queue_stalled(queue)
4766 + || xenvif_rx_queue_ready(queue)))
4767 || kthread_should_stop()
4768 || queue->vif->disabled;
4769 }
4770 @@ -2094,6 +2093,9 @@ int xenvif_kthread_guest_rx(void *data)
4771 struct xenvif_queue *queue = data;
4772 struct xenvif *vif = queue->vif;
4773
4774 + if (!vif->stall_timeout)
4775 + xenvif_queue_carrier_on(queue);
4776 +
4777 for (;;) {
4778 xenvif_wait_for_rx_work(queue);
4779
4780 @@ -2120,10 +2122,12 @@ int xenvif_kthread_guest_rx(void *data)
4781 * while it's probably not responsive, drop the
4782 * carrier so packets are dropped earlier.
4783 */
4784 - if (xenvif_rx_queue_stalled(queue))
4785 - xenvif_queue_carrier_off(queue);
4786 - else if (xenvif_rx_queue_ready(queue))
4787 - xenvif_queue_carrier_on(queue);
4788 + if (vif->stall_timeout) {
4789 + if (xenvif_rx_queue_stalled(queue))
4790 + xenvif_queue_carrier_off(queue);
4791 + else if (xenvif_rx_queue_ready(queue))
4792 + xenvif_queue_carrier_on(queue);
4793 + }
4794
4795 /* Queued packets may have foreign pages from other
4796 * domains. These cannot be queued indefinitely as
4797 @@ -2194,9 +2198,6 @@ static int __init netback_init(void)
4798 if (rc)
4799 goto failed_init;
4800
4801 - rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
4802 - rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
4803 -
4804 #ifdef CONFIG_DEBUG_FS
4805 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
4806 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
4807 diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
4808 index fab0d4b42f58..c047282c4ee0 100644
4809 --- a/drivers/net/xen-netback/xenbus.c
4810 +++ b/drivers/net/xen-netback/xenbus.c
4811 @@ -736,6 +736,7 @@ static void connect(struct backend_info *be)
4812 }
4813
4814 queue->remaining_credit = credit_bytes;
4815 + queue->credit_usec = credit_usec;
4816
4817 err = connect_rings(be, queue);
4818 if (err) {
4819 @@ -886,9 +887,15 @@ static int read_xenbus_vif_flags(struct backend_info *be)
4820 return -EOPNOTSUPP;
4821
4822 if (xenbus_scanf(XBT_NIL, dev->otherend,
4823 - "feature-rx-notify", "%d", &val) < 0 || val == 0) {
4824 - xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
4825 - return -EINVAL;
4826 + "feature-rx-notify", "%d", &val) < 0)
4827 + val = 0;
4828 + if (!val) {
4829 + /* - Reduce drain timeout to poll more frequently for
4830 + * Rx requests.
4831 + * - Disable Rx stall detection.
4832 + */
4833 + be->vif->drain_timeout = msecs_to_jiffies(30);
4834 + be->vif->stall_timeout = 0;
4835 }
4836
4837 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
4838 diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
4839 index 37040ab42890..fdff39ff5021 100644
4840 --- a/drivers/pinctrl/pinctrl-xway.c
4841 +++ b/drivers/pinctrl/pinctrl-xway.c
4842 @@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
4843
4844 /* load the gpio chip */
4845 xway_chip.dev = &pdev->dev;
4846 - of_gpiochip_add(&xway_chip);
4847 ret = gpiochip_add(&xway_chip);
4848 if (ret) {
4849 - of_gpiochip_remove(&xway_chip);
4850 dev_err(&pdev->dev, "Failed to register gpio chip\n");
4851 return ret;
4852 }
4853 diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
4854 index c1a6cd66af42..abdaed34c728 100644
4855 --- a/drivers/platform/x86/asus-nb-wmi.c
4856 +++ b/drivers/platform/x86/asus-nb-wmi.c
4857 @@ -191,6 +191,15 @@ static const struct dmi_system_id asus_quirks[] = {
4858 },
4859 {
4860 .callback = dmi_matched,
4861 + .ident = "ASUSTeK COMPUTER INC. X551CA",
4862 + .matches = {
4863 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4864 + DMI_MATCH(DMI_PRODUCT_NAME, "X551CA"),
4865 + },
4866 + .driver_data = &quirk_asus_wapf4,
4867 + },
4868 + {
4869 + .callback = dmi_matched,
4870 .ident = "ASUSTeK COMPUTER INC. X55A",
4871 .matches = {
4872 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4873 diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
4874 index 6bec745b6b92..10ce6cba4455 100644
4875 --- a/drivers/platform/x86/hp_accel.c
4876 +++ b/drivers/platform/x86/hp_accel.c
4877 @@ -246,6 +246,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
4878 AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
4879 AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
4880 AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
4881 + AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
4882 { NULL, }
4883 /* Laptop models without axis info (yet):
4884 * "NC6910" "HP Compaq 6910"
4885 diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
4886 index a94e7a7820b4..51272b5d7552 100644
4887 --- a/drivers/reset/reset-sunxi.c
4888 +++ b/drivers/reset/reset-sunxi.c
4889 @@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
4890 goto err_alloc;
4891 }
4892
4893 + spin_lock_init(&data->lock);
4894 +
4895 data->rcdev.owner = THIS_MODULE;
4896 data->rcdev.nr_resets = size * 32;
4897 data->rcdev.ops = &sunxi_reset_ops;
4898 @@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
4899 if (IS_ERR(data->membase))
4900 return PTR_ERR(data->membase);
4901
4902 + spin_lock_init(&data->lock);
4903 +
4904 data->rcdev.owner = THIS_MODULE;
4905 data->rcdev.nr_resets = resource_size(res) * 32;
4906 data->rcdev.ops = &sunxi_reset_ops;
4907 diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
4908 index 0d1d06488a28..e689bf20a3ea 100644
4909 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
4910 +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
4911 @@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
4912 &mpt2sas_phy->remote_identify);
4913 _transport_add_phy_to_an_existing_port(ioc, sas_node,
4914 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
4915 - } else {
4916 + } else
4917 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
4918 sas_identify));
4919 - _transport_del_phy_from_an_existing_port(ioc, sas_node,
4920 - mpt2sas_phy);
4921 - }
4922
4923 if (mpt2sas_phy->phy)
4924 mpt2sas_phy->phy->negotiated_linkrate =
4925 diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
4926 index d4bafaaebea9..3637ae6c0171 100644
4927 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
4928 +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
4929 @@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
4930 &mpt3sas_phy->remote_identify);
4931 _transport_add_phy_to_an_existing_port(ioc, sas_node,
4932 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
4933 - } else {
4934 + } else
4935 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
4936 sas_identify));
4937 - _transport_del_phy_from_an_existing_port(ioc, sas_node,
4938 - mpt3sas_phy);
4939 - }
4940
4941 if (mpt3sas_phy->phy)
4942 mpt3sas_phy->phy->negotiated_linkrate =
4943 diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
4944 index c1d04d4d3c6c..262ab837a704 100644
4945 --- a/drivers/scsi/scsi_devinfo.c
4946 +++ b/drivers/scsi/scsi_devinfo.c
4947 @@ -211,6 +211,7 @@ static struct {
4948 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
4949 {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
4950 {"MICROP", "4110", NULL, BLIST_NOTQ},
4951 + {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
4952 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
4953 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
4954 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
4955 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4956 index bc5ff6ff9c79..01a79473350a 100644
4957 --- a/drivers/scsi/scsi_error.c
4958 +++ b/drivers/scsi/scsi_error.c
4959 @@ -1027,7 +1027,7 @@ retry:
4960 }
4961 /* signal not to enter either branch of the if () below */
4962 timeleft = 0;
4963 - rtn = NEEDS_RETRY;
4964 + rtn = FAILED;
4965 } else {
4966 timeleft = wait_for_completion_timeout(&done, timeout);
4967 rtn = SUCCESS;
4968 @@ -1067,7 +1067,7 @@ retry:
4969 rtn = FAILED;
4970 break;
4971 }
4972 - } else if (!rtn) {
4973 + } else if (rtn != FAILED) {
4974 scsi_abort_eh_cmnd(scmd);
4975 rtn = FAILED;
4976 }
4977 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
4978 index 50a6e1ac8d9c..17fb0518c9c1 100644
4979 --- a/drivers/scsi/scsi_lib.c
4980 +++ b/drivers/scsi/scsi_lib.c
4981 @@ -1829,7 +1829,9 @@ static int scsi_mq_prep_fn(struct request *req)
4982
4983 if (scsi_host_get_prot(shost)) {
4984 cmd->prot_sdb = (void *)sg +
4985 - shost->sg_tablesize * sizeof(struct scatterlist);
4986 + min_t(unsigned int,
4987 + shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
4988 + sizeof(struct scatterlist);
4989 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
4990
4991 cmd->prot_sdb->table.sgl =
4992 diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
4993 index 733e5f759518..90af465359d6 100644
4994 --- a/drivers/scsi/storvsc_drv.c
4995 +++ b/drivers/scsi/storvsc_drv.c
4996 @@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
4997 if (ret == -EAGAIN) {
4998 /* no more space */
4999
5000 - if (cmd_request->bounce_sgl_count) {
5001 + if (cmd_request->bounce_sgl_count)
5002 destroy_bounce_buffer(cmd_request->bounce_sgl,
5003 cmd_request->bounce_sgl_count);
5004
5005 - ret = SCSI_MLQUEUE_DEVICE_BUSY;
5006 - goto queue_error;
5007 - }
5008 + ret = SCSI_MLQUEUE_DEVICE_BUSY;
5009 + goto queue_error;
5010 }
5011
5012 return 0;
5013 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
5014 index 480f2e0ecc11..18e2601527df 100644
5015 --- a/drivers/target/iscsi/iscsi_target_login.c
5016 +++ b/drivers/target/iscsi/iscsi_target_login.c
5017 @@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
5018 {
5019 struct iscsi_session *sess = NULL;
5020 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
5021 - enum target_prot_op sup_pro_ops;
5022 int ret;
5023
5024 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
5025 @@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
5026 kfree(sess);
5027 return -ENOMEM;
5028 }
5029 - sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5030
5031 - sess->se_sess = transport_init_session(sup_pro_ops);
5032 + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
5033 if (IS_ERR(sess->se_sess)) {
5034 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
5035 ISCSI_LOGIN_STATUS_NO_RESOURCES);
5036 @@ -1204,6 +1202,9 @@ old_sess_out:
5037 conn->sock = NULL;
5038 }
5039
5040 + if (conn->conn_transport->iscsit_wait_conn)
5041 + conn->conn_transport->iscsit_wait_conn(conn);
5042 +
5043 if (conn->conn_transport->iscsit_free_conn)
5044 conn->conn_transport->iscsit_free_conn(conn);
5045
5046 @@ -1364,6 +1365,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
5047 }
5048 login->zero_tsih = zero_tsih;
5049
5050 + conn->sess->se_sess->sup_prot_ops =
5051 + conn->conn_transport->iscsit_get_sup_prot_ops(conn);
5052 +
5053 tpg = conn->tpg;
5054 if (!tpg) {
5055 pr_err("Unable to locate struct iscsi_conn->tpg\n");
5056 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
5057 index ce87ce9bdb9c..0b68c2ebce95 100644
5058 --- a/drivers/target/iscsi/iscsi_target_util.c
5059 +++ b/drivers/target/iscsi/iscsi_target_util.c
5060 @@ -1358,15 +1358,15 @@ static int iscsit_do_tx_data(
5061 struct iscsi_conn *conn,
5062 struct iscsi_data_count *count)
5063 {
5064 - int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
5065 + int ret, iov_len;
5066 struct kvec *iov_p;
5067 struct msghdr msg;
5068
5069 if (!conn || !conn->sock || !conn->conn_ops)
5070 return -1;
5071
5072 - if (data <= 0) {
5073 - pr_err("Data length is: %d\n", data);
5074 + if (count->data_length <= 0) {
5075 + pr_err("Data length is: %d\n", count->data_length);
5076 return -1;
5077 }
5078
5079 @@ -1375,20 +1375,16 @@ static int iscsit_do_tx_data(
5080 iov_p = count->iov;
5081 iov_len = count->iov_count;
5082
5083 - while (total_tx < data) {
5084 - tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
5085 - (data - total_tx));
5086 - if (tx_loop <= 0) {
5087 - pr_debug("tx_loop: %d total_tx %d\n",
5088 - tx_loop, total_tx);
5089 - return tx_loop;
5090 - }
5091 - total_tx += tx_loop;
5092 - pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
5093 - tx_loop, total_tx, data);
5094 + ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
5095 + count->data_length);
5096 + if (ret != count->data_length) {
5097 + pr_err("Unexpected ret: %d send data %d\n",
5098 + ret, count->data_length);
5099 + return -EPIPE;
5100 }
5101 + pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
5102
5103 - return total_tx;
5104 + return ret;
5105 }
5106
5107 int rx_data(
5108 diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
5109 index ab3ab27d49b7..0be83e788df2 100644
5110 --- a/drivers/target/loopback/tcm_loop.c
5111 +++ b/drivers/target/loopback/tcm_loop.c
5112 @@ -190,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
5113 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
5114 goto out_done;
5115 }
5116 - tl_nexus = tl_hba->tl_nexus;
5117 + tl_nexus = tl_tpg->tl_nexus;
5118 if (!tl_nexus) {
5119 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
5120 " does not exist\n");
5121 @@ -270,16 +270,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
5122 * to struct scsi_device
5123 */
5124 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
5125 - struct tcm_loop_nexus *tl_nexus,
5126 int lun, int task, enum tcm_tmreq_table tmr)
5127 {
5128 struct se_cmd *se_cmd = NULL;
5129 struct se_session *se_sess;
5130 struct se_portal_group *se_tpg;
5131 + struct tcm_loop_nexus *tl_nexus;
5132 struct tcm_loop_cmd *tl_cmd = NULL;
5133 struct tcm_loop_tmr *tl_tmr = NULL;
5134 int ret = TMR_FUNCTION_FAILED, rc;
5135
5136 + /*
5137 + * Locate the tl_nexus and se_sess pointers
5138 + */
5139 + tl_nexus = tl_tpg->tl_nexus;
5140 + if (!tl_nexus) {
5141 + pr_err("Unable to perform device reset without"
5142 + " active I_T Nexus\n");
5143 + return ret;
5144 + }
5145 +
5146 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
5147 if (!tl_cmd) {
5148 pr_err("Unable to allocate memory for tl_cmd\n");
5149 @@ -295,7 +305,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
5150
5151 se_cmd = &tl_cmd->tl_se_cmd;
5152 se_tpg = &tl_tpg->tl_se_tpg;
5153 - se_sess = tl_nexus->se_sess;
5154 + se_sess = tl_tpg->tl_nexus->se_sess;
5155 /*
5156 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
5157 */
5158 @@ -340,7 +350,6 @@ release:
5159 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5160 {
5161 struct tcm_loop_hba *tl_hba;
5162 - struct tcm_loop_nexus *tl_nexus;
5163 struct tcm_loop_tpg *tl_tpg;
5164 int ret = FAILED;
5165
5166 @@ -348,21 +357,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5167 * Locate the tcm_loop_hba_t pointer
5168 */
5169 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
5170 - /*
5171 - * Locate the tl_nexus and se_sess pointers
5172 - */
5173 - tl_nexus = tl_hba->tl_nexus;
5174 - if (!tl_nexus) {
5175 - pr_err("Unable to perform device reset without"
5176 - " active I_T Nexus\n");
5177 - return FAILED;
5178 - }
5179 -
5180 - /*
5181 - * Locate the tl_tpg pointer from TargetID in sc->device->id
5182 - */
5183 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
5184 - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
5185 + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
5186 sc->request->tag, TMR_ABORT_TASK);
5187 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
5188 }
5189 @@ -374,7 +370,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
5190 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
5191 {
5192 struct tcm_loop_hba *tl_hba;
5193 - struct tcm_loop_nexus *tl_nexus;
5194 struct tcm_loop_tpg *tl_tpg;
5195 int ret = FAILED;
5196
5197 @@ -382,20 +377,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
5198 * Locate the tcm_loop_hba_t pointer
5199 */
5200 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
5201 - /*
5202 - * Locate the tl_nexus and se_sess pointers
5203 - */
5204 - tl_nexus = tl_hba->tl_nexus;
5205 - if (!tl_nexus) {
5206 - pr_err("Unable to perform device reset without"
5207 - " active I_T Nexus\n");
5208 - return FAILED;
5209 - }
5210 - /*
5211 - * Locate the tl_tpg pointer from TargetID in sc->device->id
5212 - */
5213 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
5214 - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
5215 +
5216 + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
5217 0, TMR_LUN_RESET);
5218 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
5219 }
5220 @@ -1005,8 +989,8 @@ static int tcm_loop_make_nexus(
5221 struct tcm_loop_nexus *tl_nexus;
5222 int ret = -ENOMEM;
5223
5224 - if (tl_tpg->tl_hba->tl_nexus) {
5225 - pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
5226 + if (tl_tpg->tl_nexus) {
5227 + pr_debug("tl_tpg->tl_nexus already exists\n");
5228 return -EEXIST;
5229 }
5230 se_tpg = &tl_tpg->tl_se_tpg;
5231 @@ -1041,7 +1025,7 @@ static int tcm_loop_make_nexus(
5232 */
5233 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
5234 tl_nexus->se_sess, tl_nexus);
5235 - tl_tpg->tl_hba->tl_nexus = tl_nexus;
5236 + tl_tpg->tl_nexus = tl_nexus;
5237 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
5238 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
5239 name);
5240 @@ -1057,12 +1041,8 @@ static int tcm_loop_drop_nexus(
5241 {
5242 struct se_session *se_sess;
5243 struct tcm_loop_nexus *tl_nexus;
5244 - struct tcm_loop_hba *tl_hba = tpg->tl_hba;
5245
5246 - if (!tl_hba)
5247 - return -ENODEV;
5248 -
5249 - tl_nexus = tl_hba->tl_nexus;
5250 + tl_nexus = tpg->tl_nexus;
5251 if (!tl_nexus)
5252 return -ENODEV;
5253
5254 @@ -1078,13 +1058,13 @@ static int tcm_loop_drop_nexus(
5255 }
5256
5257 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
5258 - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
5259 + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
5260 tl_nexus->se_sess->se_node_acl->initiatorname);
5261 /*
5262 * Release the SCSI I_T Nexus to the emulated SAS Target Port
5263 */
5264 transport_deregister_session(tl_nexus->se_sess);
5265 - tpg->tl_hba->tl_nexus = NULL;
5266 + tpg->tl_nexus = NULL;
5267 kfree(tl_nexus);
5268 return 0;
5269 }
5270 @@ -1100,7 +1080,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
5271 struct tcm_loop_nexus *tl_nexus;
5272 ssize_t ret;
5273
5274 - tl_nexus = tl_tpg->tl_hba->tl_nexus;
5275 + tl_nexus = tl_tpg->tl_nexus;
5276 if (!tl_nexus)
5277 return -ENODEV;
5278
5279 diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
5280 index 54c59d0b6608..6ae49f272ba6 100644
5281 --- a/drivers/target/loopback/tcm_loop.h
5282 +++ b/drivers/target/loopback/tcm_loop.h
5283 @@ -27,11 +27,6 @@ struct tcm_loop_tmr {
5284 };
5285
5286 struct tcm_loop_nexus {
5287 - int it_nexus_active;
5288 - /*
5289 - * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
5290 - */
5291 - struct scsi_host *sh;
5292 /*
5293 * Pointer to TCM session for I_T Nexus
5294 */
5295 @@ -51,6 +46,7 @@ struct tcm_loop_tpg {
5296 atomic_t tl_tpg_port_count;
5297 struct se_portal_group tl_se_tpg;
5298 struct tcm_loop_hba *tl_hba;
5299 + struct tcm_loop_nexus *tl_nexus;
5300 };
5301
5302 struct tcm_loop_hba {
5303 @@ -59,7 +55,6 @@ struct tcm_loop_hba {
5304 struct se_hba_s *se_hba;
5305 struct se_lun *tl_hba_lun;
5306 struct se_port *tl_hba_lun_sep;
5307 - struct tcm_loop_nexus *tl_nexus;
5308 struct device dev;
5309 struct Scsi_Host *sh;
5310 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
5311 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
5312 index c45f9e907e44..24fa5d1999af 100644
5313 --- a/drivers/target/target_core_device.c
5314 +++ b/drivers/target/target_core_device.c
5315 @@ -1169,10 +1169,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
5316 " changed for TCM/pSCSI\n", dev);
5317 return -EINVAL;
5318 }
5319 - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
5320 + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
5321 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
5322 - " greater than fabric_max_sectors: %u\n", dev,
5323 - optimal_sectors, dev->dev_attrib.fabric_max_sectors);
5324 + " greater than hw_max_sectors: %u\n", dev,
5325 + optimal_sectors, dev->dev_attrib.hw_max_sectors);
5326 return -EINVAL;
5327 }
5328
5329 @@ -1572,7 +1572,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
5330 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
5331 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
5332 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
5333 - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
5334
5335 xcopy_lun = &dev->xcopy_lun;
5336 xcopy_lun->lun_se_dev = dev;
5337 @@ -1613,6 +1612,7 @@ int target_configure_device(struct se_device *dev)
5338 dev->dev_attrib.hw_max_sectors =
5339 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
5340 dev->dev_attrib.hw_block_size);
5341 + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
5342
5343 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
5344 dev->creation_time = get_jiffies_64();
5345 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
5346 index 72c83d98662b..f018b6a3ffbf 100644
5347 --- a/drivers/target/target_core_file.c
5348 +++ b/drivers/target/target_core_file.c
5349 @@ -620,7 +620,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5350 struct fd_prot fd_prot;
5351 sense_reason_t rc;
5352 int ret = 0;
5353 -
5354 + /*
5355 + * We are currently limited by the number of iovecs (2048) per
5356 + * single vfs_[writev,readv] call.
5357 + */
5358 + if (cmd->data_length > FD_MAX_BYTES) {
5359 + pr_err("FILEIO: Not able to process I/O of %u bytes due to"
5360 + "FD_MAX_BYTES: %u iovec count limitiation\n",
5361 + cmd->data_length, FD_MAX_BYTES);
5362 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5363 + }
5364 /*
5365 * Call vectorized fileio functions to map struct scatterlist
5366 * physical memory addresses to struct iovec virtual memory.
5367 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
5368 index 7e6b857c6b3f..be27773ab829 100644
5369 --- a/drivers/target/target_core_iblock.c
5370 +++ b/drivers/target/target_core_iblock.c
5371 @@ -123,7 +123,7 @@ static int iblock_configure_device(struct se_device *dev)
5372 q = bdev_get_queue(bd);
5373
5374 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
5375 - dev->dev_attrib.hw_max_sectors = UINT_MAX;
5376 + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
5377 dev->dev_attrib.hw_queue_depth = q->nr_requests;
5378
5379 /*
5380 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
5381 index ebe62afb957d..7a88af0e32d6 100644
5382 --- a/drivers/target/target_core_sbc.c
5383 +++ b/drivers/target/target_core_sbc.c
5384 @@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
5385
5386 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
5387 unsigned long long end_lba;
5388 -
5389 - if (sectors > dev->dev_attrib.fabric_max_sectors) {
5390 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
5391 - " big sectors %u exceeds fabric_max_sectors:"
5392 - " %u\n", cdb[0], sectors,
5393 - dev->dev_attrib.fabric_max_sectors);
5394 - return TCM_INVALID_CDB_FIELD;
5395 - }
5396 - if (sectors > dev->dev_attrib.hw_max_sectors) {
5397 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
5398 - " big sectors %u exceeds backend hw_max_sectors:"
5399 - " %u\n", cdb[0], sectors,
5400 - dev->dev_attrib.hw_max_sectors);
5401 - return TCM_INVALID_CDB_FIELD;
5402 - }
5403 check_lba:
5404 end_lba = dev->transport->get_blocks(dev) + 1;
5405 if (cmd->t_task_lba + sectors > end_lba) {
5406 diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
5407 index bc286a67af7c..614005b6b08b 100644
5408 --- a/drivers/target/target_core_spc.c
5409 +++ b/drivers/target/target_core_spc.c
5410 @@ -505,7 +505,6 @@ static sense_reason_t
5411 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
5412 {
5413 struct se_device *dev = cmd->se_dev;
5414 - u32 max_sectors;
5415 int have_tp = 0;
5416 int opt, min;
5417
5418 @@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
5419 /*
5420 * Set MAXIMUM TRANSFER LENGTH
5421 */
5422 - max_sectors = min(dev->dev_attrib.fabric_max_sectors,
5423 - dev->dev_attrib.hw_max_sectors);
5424 - put_unaligned_be32(max_sectors, &buf[8]);
5425 + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
5426
5427 /*
5428 * Set OPTIMAL TRANSFER LENGTH
5429 diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
5430 index 95cb7fc20e17..6cb78497076a 100644
5431 --- a/drivers/thermal/intel_powerclamp.c
5432 +++ b/drivers/thermal/intel_powerclamp.c
5433 @@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
5434 * allowed. thus jiffies are updated properly.
5435 */
5436 preempt_disable();
5437 - tick_nohz_idle_enter();
5438 /* mwait until target jiffies is reached */
5439 while (time_before(jiffies, target_jiffies)) {
5440 unsigned long ecx = 1;
5441 @@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
5442 start_critical_timings();
5443 atomic_inc(&idle_wakeup_counter);
5444 }
5445 - tick_nohz_idle_exit();
5446 preempt_enable();
5447 }
5448 del_timer_sync(&wakeup_timer);
5449 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
5450 index eaeb9a02c7fe..a28dee9d5017 100644
5451 --- a/drivers/tty/serial/serial_core.c
5452 +++ b/drivers/tty/serial/serial_core.c
5453 @@ -2102,7 +2102,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
5454 break;
5455 }
5456
5457 - dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
5458 + printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
5459 + port->dev ? dev_name(port->dev) : "",
5460 + port->dev ? ": " : "",
5461 drv->dev_name,
5462 drv->tty_driver->name_base + port->line,
5463 address, port->irq, port->uartclk / 16, uart_type(port));
5464 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5465 index 546ea5431b8c..272e0928736e 100644
5466 --- a/drivers/usb/dwc3/gadget.c
5467 +++ b/drivers/usb/dwc3/gadget.c
5468 @@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
5469
5470 if (i == (request->num_mapped_sgs - 1) ||
5471 sg_is_last(s)) {
5472 - if (list_is_last(&req->list,
5473 - &dep->request_list))
5474 + if (list_empty(&dep->request_list))
5475 last_one = true;
5476 chain = false;
5477 }
5478 @@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
5479 if (last_one)
5480 break;
5481 }
5482 +
5483 + if (last_one)
5484 + break;
5485 } else {
5486 dma = req->request.dma;
5487 length = req->request.length;
5488 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
5489 index c744e4975d74..08048613eed6 100644
5490 --- a/drivers/usb/gadget/legacy/inode.c
5491 +++ b/drivers/usb/gadget/legacy/inode.c
5492 @@ -449,6 +449,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
5493 data->name, len, (int) value);
5494 free1:
5495 mutex_unlock(&data->lock);
5496 + kfree (kbuf);
5497 return value;
5498 }
5499
5500 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
5501 index 1529926e20a0..840856ca3e66 100644
5502 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
5503 +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
5504 @@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
5505 req->using_dma = 1;
5506 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
5507 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
5508 - | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
5509 + | USBA_DMA_END_BUF_EN;
5510
5511 - if (ep->is_in)
5512 - req->ctrl |= USBA_DMA_END_BUF_EN;
5513 + if (!ep->is_in)
5514 + req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
5515
5516 /*
5517 * Add this request to the queue and submit for DMA if
5518 @@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
5519 {
5520 struct usba_ep *ep = to_usba_ep(_ep);
5521 struct usba_udc *udc = ep->udc;
5522 - struct usba_request *req = to_usba_req(_req);
5523 + struct usba_request *req;
5524 unsigned long flags;
5525 u32 status;
5526
5527 @@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
5528
5529 spin_lock_irqsave(&udc->lock, flags);
5530
5531 + list_for_each_entry(req, &ep->queue, queue) {
5532 + if (&req->req == _req)
5533 + break;
5534 + }
5535 +
5536 + if (&req->req != _req) {
5537 + spin_unlock_irqrestore(&udc->lock, flags);
5538 + return -EINVAL;
5539 + }
5540 +
5541 if (req->using_dma) {
5542 /*
5543 * If this request is currently being transferred,
5544 @@ -1572,7 +1582,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
5545 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
5546 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
5547 receive_data(ep);
5548 - usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
5549 }
5550 }
5551
5552 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
5553 index e113fd73aeae..c399606f154e 100644
5554 --- a/drivers/usb/host/ehci-sched.c
5555 +++ b/drivers/usb/host/ehci-sched.c
5556 @@ -1581,6 +1581,10 @@ iso_stream_schedule (
5557 else
5558 next = (now + 2 + 7) & ~0x07; /* full frame cache */
5559
5560 + /* If needed, initialize last_iso_frame so that this URB will be seen */
5561 + if (ehci->isoc_count == 0)
5562 + ehci->last_iso_frame = now >> 3;
5563 +
5564 /*
5565 * Use ehci->last_iso_frame as the base. There can't be any
5566 * TDs scheduled for earlier than that.
5567 @@ -1671,10 +1675,6 @@ iso_stream_schedule (
5568 urb->start_frame = start & (mod - 1);
5569 if (!stream->highspeed)
5570 urb->start_frame >>= 3;
5571 -
5572 - /* Make sure scan_isoc() sees these */
5573 - if (ehci->isoc_count == 0)
5574 - ehci->last_iso_frame = now >> 3;
5575 return status;
5576
5577 fail:
5578 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
5579 index 2f3acebb577a..f4e6b945136c 100644
5580 --- a/drivers/usb/host/pci-quirks.c
5581 +++ b/drivers/usb/host/pci-quirks.c
5582 @@ -571,7 +571,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5583 {
5584 void __iomem *base;
5585 u32 control;
5586 - u32 fminterval;
5587 + u32 fminterval = 0;
5588 + bool no_fminterval = false;
5589 int cnt;
5590
5591 if (!mmio_resource_enabled(pdev, 0))
5592 @@ -581,6 +582,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5593 if (base == NULL)
5594 return;
5595
5596 + /*
5597 + * ULi M5237 OHCI controller locks the whole system when accessing
5598 + * the OHCI_FMINTERVAL offset.
5599 + */
5600 + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
5601 + no_fminterval = true;
5602 +
5603 control = readl(base + OHCI_CONTROL);
5604
5605 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
5606 @@ -619,7 +627,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5607 }
5608
5609 /* software reset of the controller, preserving HcFmInterval */
5610 - fminterval = readl(base + OHCI_FMINTERVAL);
5611 + if (!no_fminterval)
5612 + fminterval = readl(base + OHCI_FMINTERVAL);
5613 +
5614 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
5615
5616 /* reset requires max 10 us delay */
5617 @@ -628,7 +638,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
5618 break;
5619 udelay(1);
5620 }
5621 - writel(fminterval, base + OHCI_FMINTERVAL);
5622 +
5623 + if (!no_fminterval)
5624 + writel(fminterval, base + OHCI_FMINTERVAL);
5625
5626 /* Now the controller is safely in SUSPEND and nothing can wake it up */
5627 iounmap(base);
5628 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
5629 index 142b601f9563..7f76c8a12f89 100644
5630 --- a/drivers/usb/host/xhci-pci.c
5631 +++ b/drivers/usb/host/xhci-pci.c
5632 @@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
5633 "must be suspended extra slowly",
5634 pdev->revision);
5635 }
5636 + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
5637 + xhci->quirks |= XHCI_BROKEN_STREAMS;
5638 /* Fresco Logic confirms: all revisions of this chip do not
5639 * support MSI, even though some of them claim to in their PCI
5640 * capabilities.
5641 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5642 index 033b46c470bd..3bceabe109f7 100644
5643 --- a/drivers/usb/host/xhci.c
5644 +++ b/drivers/usb/host/xhci.c
5645 @@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
5646 return -EINVAL;
5647 }
5648
5649 + if (setup == SETUP_CONTEXT_ONLY) {
5650 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
5651 + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
5652 + SLOT_STATE_DEFAULT) {
5653 + xhci_dbg(xhci, "Slot already in default state\n");
5654 + return 0;
5655 + }
5656 + }
5657 +
5658 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
5659 if (!command)
5660 return -ENOMEM;
5661 diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
5662 index 855793d701bb..4500610356f2 100644
5663 --- a/drivers/usb/musb/musb_host.c
5664 +++ b/drivers/usb/musb/musb_host.c
5665 @@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
5666 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
5667 return;
5668 usb_remove_hcd(musb->hcd);
5669 - musb->hcd = NULL;
5670 }
5671
5672 void musb_host_free(struct musb *musb)
5673 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
5674 index 8d7fc48b1f30..29fa1c3d0089 100644
5675 --- a/drivers/usb/serial/console.c
5676 +++ b/drivers/usb/serial/console.c
5677 @@ -46,6 +46,8 @@ static struct console usbcons;
5678 * ------------------------------------------------------------
5679 */
5680
5681 +static const struct tty_operations usb_console_fake_tty_ops = {
5682 +};
5683
5684 /*
5685 * The parsing of the command line works exactly like the
5686 @@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
5687 goto reset_open_count;
5688 }
5689 kref_init(&tty->kref);
5690 - tty_port_tty_set(&port->port, tty);
5691 tty->driver = usb_serial_tty_driver;
5692 tty->index = co->index;
5693 + init_ldsem(&tty->ldisc_sem);
5694 + INIT_LIST_HEAD(&tty->tty_files);
5695 + kref_get(&tty->driver->kref);
5696 + tty->ops = &usb_console_fake_tty_ops;
5697 if (tty_init_termios(tty)) {
5698 retval = -ENOMEM;
5699 - goto free_tty;
5700 + goto put_tty;
5701 }
5702 + tty_port_tty_set(&port->port, tty);
5703 }
5704
5705 /* only call the device specific open if this
5706 @@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
5707 serial->type->set_termios(tty, port, &dummy);
5708
5709 tty_port_tty_set(&port->port, NULL);
5710 - kfree(tty);
5711 + tty_kref_put(tty);
5712 }
5713 set_bit(ASYNCB_INITIALIZED, &port->port.flags);
5714 }
5715 @@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
5716
5717 fail:
5718 tty_port_tty_set(&port->port, NULL);
5719 - free_tty:
5720 - kfree(tty);
5721 + put_tty:
5722 + tty_kref_put(tty);
5723 reset_open_count:
5724 port->port.count = 0;
5725 usb_autopm_put_interface(serial->interface);
5726 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
5727 index 6c4eb3cf5efd..f4c56fc1a9f6 100644
5728 --- a/drivers/usb/serial/cp210x.c
5729 +++ b/drivers/usb/serial/cp210x.c
5730 @@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
5731 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
5732 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
5733 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
5734 - { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
5735 + { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
5736 + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
5737 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
5738 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
5739 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
5740 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
5741 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
5742 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
5743 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
5744 diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
5745 index 077c714f1285..e07b15ed5814 100644
5746 --- a/drivers/usb/serial/keyspan.c
5747 +++ b/drivers/usb/serial/keyspan.c
5748 @@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
5749 }
5750 port = serial->port[msg->port];
5751 p_priv = usb_get_serial_port_data(port);
5752 + if (!p_priv)
5753 + goto resubmit;
5754
5755 /* Update handshaking pin state information */
5756 old_dcd_state = p_priv->dcd_state;
5757 @@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
5758
5759 if (old_dcd_state != p_priv->dcd_state)
5760 tty_port_tty_hangup(&port->port, true);
5761 -
5762 +resubmit:
5763 /* Resubmit urb so we continue receiving */
5764 err = usb_submit_urb(urb, GFP_ATOMIC);
5765 if (err != 0)
5766 @@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
5767 }
5768 port = serial->port[msg->port];
5769 p_priv = usb_get_serial_port_data(port);
5770 + if (!p_priv)
5771 + goto resubmit;
5772
5773 /* Update handshaking pin state information */
5774 old_dcd_state = p_priv->dcd_state;
5775 @@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
5776
5777 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5778 tty_port_tty_hangup(&port->port, true);
5779 -
5780 +resubmit:
5781 /* Resubmit urb so we continue receiving */
5782 err = usb_submit_urb(urb, GFP_ATOMIC);
5783 if (err != 0)
5784 @@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
5785 }
5786 port = serial->port[msg->portNumber];
5787 p_priv = usb_get_serial_port_data(port);
5788 + if (!p_priv)
5789 + goto resubmit;
5790
5791 /* Update handshaking pin state information */
5792 old_dcd_state = p_priv->dcd_state;
5793 @@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
5794
5795 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5796 tty_port_tty_hangup(&port->port, true);
5797 -
5798 +resubmit:
5799 /* Resubmit urb so we continue receiving */
5800 err = usb_submit_urb(urb, GFP_ATOMIC);
5801 if (err != 0)
5802 @@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
5803
5804 port = serial->port[0];
5805 p_priv = usb_get_serial_port_data(port);
5806 + if (!p_priv)
5807 + goto resubmit;
5808
5809 /* Update handshaking pin state information */
5810 old_dcd_state = p_priv->dcd_state;
5811 @@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
5812
5813 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5814 tty_port_tty_hangup(&port->port, true);
5815 -
5816 +resubmit:
5817 /* Resubmit urb so we continue receiving */
5818 err = usb_submit_urb(urb, GFP_ATOMIC);
5819 if (err != 0)
5820 @@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
5821
5822 port = serial->port[msg->port];
5823 p_priv = usb_get_serial_port_data(port);
5824 + if (!p_priv)
5825 + goto resubmit;
5826
5827 /* Update handshaking pin state information */
5828 old_dcd_state = p_priv->dcd_state;
5829 @@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
5830
5831 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
5832 tty_port_tty_hangup(&port->port, true);
5833 -
5834 +resubmit:
5835 /* Resubmit urb so we continue receiving */
5836 err = usb_submit_urb(urb, GFP_ATOMIC);
5837 if (err != 0)
5838 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5839 index 7a4c21b4f676..efdcee15b520 100644
5840 --- a/drivers/usb/serial/option.c
5841 +++ b/drivers/usb/serial/option.c
5842 @@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
5843
5844 #define QUALCOMM_VENDOR_ID 0x05C6
5845
5846 +#define SIERRA_VENDOR_ID 0x1199
5847 +
5848 #define CMOTECH_VENDOR_ID 0x16d8
5849 #define CMOTECH_PRODUCT_6001 0x6001
5850 #define CMOTECH_PRODUCT_CMU_300 0x6002
5851 @@ -512,7 +514,7 @@ enum option_blacklist_reason {
5852 OPTION_BLACKLIST_RESERVED_IF = 2
5853 };
5854
5855 -#define MAX_BL_NUM 8
5856 +#define MAX_BL_NUM 11
5857 struct option_blacklist_info {
5858 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
5859 const unsigned long sendsetup;
5860 @@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
5861 .reserved = BIT(1) | BIT(5),
5862 };
5863
5864 +static const struct option_blacklist_info sierra_mc73xx_blacklist = {
5865 + .sendsetup = BIT(0) | BIT(2),
5866 + .reserved = BIT(8) | BIT(10) | BIT(11),
5867 +};
5868 +
5869 static const struct usb_device_id option_ids[] = {
5870 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
5871 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
5872 @@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
5873 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
5874 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
5875 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
5876 + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
5877 + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
5878 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
5879 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
5880 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
5881 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
5882 index cb3e14780a7e..9c63897b3a56 100644
5883 --- a/drivers/usb/serial/qcserial.c
5884 +++ b/drivers/usb/serial/qcserial.c
5885 @@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
5886 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
5887 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
5888 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
5889 - {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
5890 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
5891 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
5892 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
5893 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
5894 index 18a283d6de1c..1f430bb02ca1 100644
5895 --- a/drivers/usb/storage/unusual_uas.h
5896 +++ b/drivers/usb/storage/unusual_uas.h
5897 @@ -68,6 +68,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
5898 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5899 US_FL_NO_ATA_1X),
5900
5901 +/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
5902 +UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
5903 + "Seagate",
5904 + "Backup Plus",
5905 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5906 + US_FL_NO_ATA_1X),
5907 +
5908 +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
5909 +UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
5910 + "Seagate",
5911 + "Backup Plus Desk",
5912 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5913 + US_FL_NO_ATA_1X),
5914 +
5915 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
5916 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
5917 "Seagate",
5918 @@ -82,6 +96,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
5919 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5920 US_FL_NO_ATA_1X),
5921
5922 +/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
5923 +UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
5924 + "Seagate",
5925 + "BUP Fast HDD",
5926 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5927 + US_FL_NO_ATA_1X),
5928 +
5929 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
5930 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
5931 "JMicron",
5932 @@ -104,6 +125,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
5933 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5934 US_FL_NO_ATA_1X),
5935
5936 +/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
5937 +UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
5938 + "JMicron",
5939 + "JMS566",
5940 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
5941 + US_FL_NO_REPORT_OPCODES),
5942 +
5943 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
5944 UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
5945 "Hitachi",
5946 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
5947 index 9558da3f06a0..2f8a0552d020 100644
5948 --- a/drivers/vfio/pci/vfio_pci.c
5949 +++ b/drivers/vfio/pci/vfio_pci.c
5950 @@ -839,13 +839,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
5951
5952 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5953 {
5954 - u8 type;
5955 struct vfio_pci_device *vdev;
5956 struct iommu_group *group;
5957 int ret;
5958
5959 - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
5960 - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
5961 + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
5962 return -EINVAL;
5963
5964 group = iommu_group_get(&pdev->dev);
5965 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
5966 index a17f11850669..cb84f69f76ad 100644
5967 --- a/drivers/vhost/scsi.c
5968 +++ b/drivers/vhost/scsi.c
5969 @@ -909,6 +909,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
5970 return 0;
5971 }
5972
5973 +static int vhost_scsi_to_tcm_attr(int attr)
5974 +{
5975 + switch (attr) {
5976 + case VIRTIO_SCSI_S_SIMPLE:
5977 + return MSG_SIMPLE_TAG;
5978 + case VIRTIO_SCSI_S_ORDERED:
5979 + return MSG_ORDERED_TAG;
5980 + case VIRTIO_SCSI_S_HEAD:
5981 + return MSG_HEAD_TAG;
5982 + case VIRTIO_SCSI_S_ACA:
5983 + return MSG_ACA_TAG;
5984 + default:
5985 + break;
5986 + }
5987 + return MSG_SIMPLE_TAG;
5988 +}
5989 +
5990 static void tcm_vhost_submission_work(struct work_struct *work)
5991 {
5992 struct tcm_vhost_cmd *cmd =
5993 @@ -934,9 +951,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
5994 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
5995 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
5996 cmd->tvc_lun, cmd->tvc_exp_data_len,
5997 - cmd->tvc_task_attr, cmd->tvc_data_direction,
5998 - TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
5999 - NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
6000 + vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
6001 + cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
6002 + sg_ptr, cmd->tvc_sgl_count, NULL, 0,
6003 + sg_prot_ptr, cmd->tvc_prot_sgl_count);
6004 if (rc < 0) {
6005 transport_send_check_condition_and_sense(se_cmd,
6006 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6007 diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
6008 index 900aa4ecd617..d6cab1fd9a47 100644
6009 --- a/drivers/video/fbdev/core/fb_defio.c
6010 +++ b/drivers/video/fbdev/core/fb_defio.c
6011 @@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
6012 cancel_delayed_work_sync(&info->deferred_work);
6013
6014 /* Run it immediately */
6015 - err = schedule_delayed_work(&info->deferred_work, 0);
6016 + schedule_delayed_work(&info->deferred_work, 0);
6017 mutex_unlock(&inode->i_mutex);
6018 - return err;
6019 +
6020 + return 0;
6021 }
6022 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
6023
6024 diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
6025 index 940cd196eef5..10fbfd8ab963 100644
6026 --- a/drivers/video/logo/logo.c
6027 +++ b/drivers/video/logo/logo.c
6028 @@ -21,6 +21,21 @@ static bool nologo;
6029 module_param(nologo, bool, 0);
6030 MODULE_PARM_DESC(nologo, "Disables startup logo");
6031
6032 +/*
6033 + * Logos are located in the initdata, and will be freed in kernel_init.
6034 + * Use late_init to mark the logos as freed to prevent any further use.
6035 + */
6036 +
6037 +static bool logos_freed;
6038 +
6039 +static int __init fb_logo_late_init(void)
6040 +{
6041 + logos_freed = true;
6042 + return 0;
6043 +}
6044 +
6045 +late_initcall(fb_logo_late_init);
6046 +
6047 /* logo's are marked __initdata. Use __init_refok to tell
6048 * modpost that it is intended that this function uses data
6049 * marked __initdata.
6050 @@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
6051 {
6052 const struct linux_logo *logo = NULL;
6053
6054 - if (nologo)
6055 + if (nologo || logos_freed)
6056 return NULL;
6057
6058 if (depth >= 1) {
6059 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
6060 index d1bb7ecfd201..61024987f97b 100644
6061 --- a/fs/lockd/svc.c
6062 +++ b/fs/lockd/svc.c
6063 @@ -138,10 +138,6 @@ lockd(void *vrqstp)
6064
6065 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
6066
6067 - if (!nlm_timeout)
6068 - nlm_timeout = LOCKD_DFLT_TIMEO;
6069 - nlmsvc_timeout = nlm_timeout * HZ;
6070 -
6071 /*
6072 * The main request loop. We don't terminate until the last
6073 * NFS mount or NFS daemon has gone away.
6074 @@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
6075 printk(KERN_WARNING
6076 "lockd_up: no pid, %d users??\n", nlmsvc_users);
6077
6078 + if (!nlm_timeout)
6079 + nlm_timeout = LOCKD_DFLT_TIMEO;
6080 + nlmsvc_timeout = nlm_timeout * HZ;
6081 +
6082 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
6083 if (!serv) {
6084 printk(KERN_WARNING "lockd_up: create service failed\n");
6085 diff --git a/fs/locks.c b/fs/locks.c
6086 index 735b8d3fa78c..59e2f905e4ff 100644
6087 --- a/fs/locks.c
6088 +++ b/fs/locks.c
6089 @@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
6090 break;
6091 }
6092 trace_generic_delete_lease(inode, fl);
6093 - if (fl)
6094 + if (fl && IS_LEASE(fl))
6095 error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
6096 spin_unlock(&inode->i_lock);
6097 locks_dispose_list(&dispose);
6098 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
6099 index ffdb28d86cf8..9588873d4c46 100644
6100 --- a/fs/nfs/nfs4client.c
6101 +++ b/fs/nfs/nfs4client.c
6102 @@ -572,20 +572,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
6103 }
6104
6105 /*
6106 - * Returns true if the server owners match
6107 + * Returns true if the server major ids match
6108 */
6109 static bool
6110 -nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
6111 +nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
6112 {
6113 struct nfs41_server_owner *o1 = a->cl_serverowner;
6114 struct nfs41_server_owner *o2 = b->cl_serverowner;
6115
6116 - if (o1->minor_id != o2->minor_id) {
6117 - dprintk("NFS: --> %s server owner minor IDs do not match\n",
6118 - __func__);
6119 - return false;
6120 - }
6121 -
6122 if (o1->major_id_sz != o2->major_id_sz)
6123 goto out_major_mismatch;
6124 if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
6125 @@ -661,7 +655,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
6126 if (!nfs4_match_clientids(pos, new))
6127 continue;
6128
6129 - if (!nfs4_match_serverowners(pos, new))
6130 + /*
6131 + * Note that session trunking is just a special subcase of
6132 + * client id trunking. In either case, we want to fall back
6133 + * to using the existing nfs_client.
6134 + */
6135 + if (!nfs4_check_clientid_trunking(pos, new))
6136 continue;
6137
6138 atomic_inc(&pos->cl_count);
6139 diff --git a/fs/proc/stat.c b/fs/proc/stat.c
6140 index bf2d03f8fd3e..510413eb25b8 100644
6141 --- a/fs/proc/stat.c
6142 +++ b/fs/proc/stat.c
6143 @@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
6144
6145 /* sum again ? it could be updated? */
6146 for_each_irq_nr(j)
6147 - seq_put_decimal_ull(p, ' ', kstat_irqs(j));
6148 + seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
6149
6150 seq_printf(p,
6151 "\nctxt %llu\n"
6152 diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
6153 index b9376cd5a187..25a822f6f000 100644
6154 --- a/include/linux/kernel_stat.h
6155 +++ b/include/linux/kernel_stat.h
6156 @@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
6157 * Number of interrupts per specific IRQ source, since bootup
6158 */
6159 extern unsigned int kstat_irqs(unsigned int irq);
6160 +extern unsigned int kstat_irqs_usr(unsigned int irq);
6161
6162 /*
6163 * Number of interrupts per cpu, since bootup
6164 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6165 index 74fd5d37f15a..22339b4b1c8c 100644
6166 --- a/include/linux/netdevice.h
6167 +++ b/include/linux/netdevice.h
6168 @@ -998,12 +998,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
6169 * Callback to use for xmit over the accelerated station. This
6170 * is used in place of ndo_start_xmit on accelerated net
6171 * devices.
6172 - * bool (*ndo_gso_check) (struct sk_buff *skb,
6173 - * struct net_device *dev);
6174 + * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
6175 + * struct net_device *dev
6176 + * netdev_features_t features);
6177 * Called by core transmit path to determine if device is capable of
6178 - * performing GSO on a packet. The device returns true if it is
6179 - * able to GSO the packet, false otherwise. If the return value is
6180 - * false the stack will do software GSO.
6181 + * performing offload operations on a given packet. This is to give
6182 + * the device an opportunity to implement any restrictions that cannot
6183 + * be otherwise expressed by feature flags. The check is called with
6184 + * the set of features that the stack has calculated and it returns
6185 + * those the driver believes to be appropriate.
6186 */
6187 struct net_device_ops {
6188 int (*ndo_init)(struct net_device *dev);
6189 @@ -1153,8 +1156,9 @@ struct net_device_ops {
6190 struct net_device *dev,
6191 void *priv);
6192 int (*ndo_get_lock_subclass)(struct net_device *dev);
6193 - bool (*ndo_gso_check) (struct sk_buff *skb,
6194 - struct net_device *dev);
6195 + netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
6196 + struct net_device *dev,
6197 + netdev_features_t features);
6198 };
6199
6200 /**
6201 @@ -3584,8 +3588,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
6202 netdev_features_t features)
6203 {
6204 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
6205 - (dev->netdev_ops->ndo_gso_check &&
6206 - !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
6207 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
6208 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
6209 }
6210 diff --git a/include/net/vxlan.h b/include/net/vxlan.h
6211 index 57cccd0052e5..903461aa5644 100644
6212 --- a/include/net/vxlan.h
6213 +++ b/include/net/vxlan.h
6214 @@ -1,6 +1,9 @@
6215 #ifndef __NET_VXLAN_H
6216 #define __NET_VXLAN_H 1
6217
6218 +#include <linux/ip.h>
6219 +#include <linux/ipv6.h>
6220 +#include <linux/if_vlan.h>
6221 #include <linux/skbuff.h>
6222 #include <linux/netdevice.h>
6223 #include <linux/udp.h>
6224 @@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
6225 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
6226 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
6227
6228 -static inline bool vxlan_gso_check(struct sk_buff *skb)
6229 +static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
6230 + netdev_features_t features)
6231 {
6232 - if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
6233 + u8 l4_hdr = 0;
6234 +
6235 + if (!skb->encapsulation)
6236 + return features;
6237 +
6238 + switch (vlan_get_protocol(skb)) {
6239 + case htons(ETH_P_IP):
6240 + l4_hdr = ip_hdr(skb)->protocol;
6241 + break;
6242 + case htons(ETH_P_IPV6):
6243 + l4_hdr = ipv6_hdr(skb)->nexthdr;
6244 + break;
6245 + default:
6246 + return features;;
6247 + }
6248 +
6249 + if ((l4_hdr == IPPROTO_UDP) &&
6250 (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
6251 skb->inner_protocol != htons(ETH_P_TEB) ||
6252 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
6253 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
6254 - return false;
6255 + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
6256
6257 - return true;
6258 + return features;
6259 }
6260
6261 /* IP header + UDP + VXLAN + Ethernet header */
6262 diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
6263 index 74a2a1773494..79b12b004ade 100644
6264 --- a/include/uapi/linux/in6.h
6265 +++ b/include/uapi/linux/in6.h
6266 @@ -149,7 +149,7 @@ struct in6_flowlabel_req {
6267 /*
6268 * IPV6 socket options
6269 */
6270 -
6271 +#if __UAPI_DEF_IPV6_OPTIONS
6272 #define IPV6_ADDRFORM 1
6273 #define IPV6_2292PKTINFO 2
6274 #define IPV6_2292HOPOPTS 3
6275 @@ -196,6 +196,7 @@ struct in6_flowlabel_req {
6276
6277 #define IPV6_IPSEC_POLICY 34
6278 #define IPV6_XFRM_POLICY 35
6279 +#endif
6280
6281 /*
6282 * Multicast:
6283 diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
6284 index c140620dad92..e28807ad17fa 100644
6285 --- a/include/uapi/linux/libc-compat.h
6286 +++ b/include/uapi/linux/libc-compat.h
6287 @@ -69,6 +69,7 @@
6288 #define __UAPI_DEF_SOCKADDR_IN6 0
6289 #define __UAPI_DEF_IPV6_MREQ 0
6290 #define __UAPI_DEF_IPPROTO_V6 0
6291 +#define __UAPI_DEF_IPV6_OPTIONS 0
6292
6293 #else
6294
6295 @@ -82,6 +83,7 @@
6296 #define __UAPI_DEF_SOCKADDR_IN6 1
6297 #define __UAPI_DEF_IPV6_MREQ 1
6298 #define __UAPI_DEF_IPPROTO_V6 1
6299 +#define __UAPI_DEF_IPV6_OPTIONS 1
6300
6301 #endif /* _NETINET_IN_H */
6302
6303 @@ -103,6 +105,7 @@
6304 #define __UAPI_DEF_SOCKADDR_IN6 1
6305 #define __UAPI_DEF_IPV6_MREQ 1
6306 #define __UAPI_DEF_IPPROTO_V6 1
6307 +#define __UAPI_DEF_IPV6_OPTIONS 1
6308
6309 /* Definitions for xattr.h */
6310 #define __UAPI_DEF_XATTR 1
6311 diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
6312 index 7dcfbe6771b1..b483d1909d3e 100644
6313 --- a/include/uapi/linux/target_core_user.h
6314 +++ b/include/uapi/linux/target_core_user.h
6315 @@ -6,10 +6,6 @@
6316 #include <linux/types.h>
6317 #include <linux/uio.h>
6318
6319 -#ifndef __packed
6320 -#define __packed __attribute__((packed))
6321 -#endif
6322 -
6323 #define TCMU_VERSION "1.0"
6324
6325 /*
6326 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
6327 index 4332d766619d..df553b0af936 100644
6328 --- a/kernel/irq/internals.h
6329 +++ b/kernel/irq/internals.h
6330 @@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
6331
6332 #ifdef CONFIG_SPARSE_IRQ
6333 static inline void irq_mark_irq(unsigned int irq) { }
6334 +extern void irq_lock_sparse(void);
6335 +extern void irq_unlock_sparse(void);
6336 #else
6337 extern void irq_mark_irq(unsigned int irq);
6338 +static inline void irq_lock_sparse(void) { }
6339 +static inline void irq_unlock_sparse(void) { }
6340 #endif
6341
6342 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
6343 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
6344 index a1782f88f0af..99793b9b6d23 100644
6345 --- a/kernel/irq/irqdesc.c
6346 +++ b/kernel/irq/irqdesc.c
6347 @@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
6348 static inline void free_masks(struct irq_desc *desc) { }
6349 #endif
6350
6351 +void irq_lock_sparse(void)
6352 +{
6353 + mutex_lock(&sparse_irq_lock);
6354 +}
6355 +
6356 +void irq_unlock_sparse(void)
6357 +{
6358 + mutex_unlock(&sparse_irq_lock);
6359 +}
6360 +
6361 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
6362 {
6363 struct irq_desc *desc;
6364 @@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
6365
6366 unregister_irq_proc(irq, desc);
6367
6368 + /*
6369 + * sparse_irq_lock protects also show_interrupts() and
6370 + * kstat_irq_usr(). Once we deleted the descriptor from the
6371 + * sparse tree we can free it. Access in proc will fail to
6372 + * lookup the descriptor.
6373 + */
6374 mutex_lock(&sparse_irq_lock);
6375 delete_irq_desc(irq);
6376 mutex_unlock(&sparse_irq_lock);
6377 @@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
6378 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
6379 }
6380
6381 +/**
6382 + * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
6383 + * @irq: The interrupt number
6384 + * @cpu: The cpu number
6385 + *
6386 + * Returns the sum of interrupt counts on @cpu since boot for
6387 + * @irq. The caller must ensure that the interrupt is not removed
6388 + * concurrently.
6389 + */
6390 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
6391 {
6392 struct irq_desc *desc = irq_to_desc(irq);
6393 @@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
6394 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
6395 }
6396
6397 +/**
6398 + * kstat_irqs - Get the statistics for an interrupt
6399 + * @irq: The interrupt number
6400 + *
6401 + * Returns the sum of interrupt counts on all cpus since boot for
6402 + * @irq. The caller must ensure that the interrupt is not removed
6403 + * concurrently.
6404 + */
6405 unsigned int kstat_irqs(unsigned int irq)
6406 {
6407 struct irq_desc *desc = irq_to_desc(irq);
6408 @@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
6409 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
6410 return sum;
6411 }
6412 +
6413 +/**
6414 + * kstat_irqs_usr - Get the statistics for an interrupt
6415 + * @irq: The interrupt number
6416 + *
6417 + * Returns the sum of interrupt counts on all cpus since boot for
6418 + * @irq. Contrary to kstat_irqs() this can be called from any
6419 + * preemptible context. It's protected against concurrent removal of
6420 + * an interrupt descriptor when sparse irqs are enabled.
6421 + */
6422 +unsigned int kstat_irqs_usr(unsigned int irq)
6423 +{
6424 + int sum;
6425 +
6426 + irq_lock_sparse();
6427 + sum = kstat_irqs(irq);
6428 + irq_unlock_sparse();
6429 + return sum;
6430 +}
6431 diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
6432 index ac1ba2f11032..9dc9bfd8a678 100644
6433 --- a/kernel/irq/proc.c
6434 +++ b/kernel/irq/proc.c
6435 @@ -15,6 +15,23 @@
6436
6437 #include "internals.h"
6438
6439 +/*
6440 + * Access rules:
6441 + *
6442 + * procfs protects read/write of /proc/irq/N/ files against a
6443 + * concurrent free of the interrupt descriptor. remove_proc_entry()
6444 + * immediately prevents new read/writes to happen and waits for
6445 + * already running read/write functions to complete.
6446 + *
6447 + * We remove the proc entries first and then delete the interrupt
6448 + * descriptor from the radix tree and free it. So it is guaranteed
6449 + * that irq_to_desc(N) is valid as long as the read/writes are
6450 + * permitted by procfs.
6451 + *
6452 + * The read from /proc/interrupts is a different problem because there
6453 + * is no protection. So the lookup and the access to irqdesc
6454 + * information must be protected by sparse_irq_lock.
6455 + */
6456 static struct proc_dir_entry *root_irq_dir;
6457
6458 #ifdef CONFIG_SMP
6459 @@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
6460 seq_putc(p, '\n');
6461 }
6462
6463 + irq_lock_sparse();
6464 desc = irq_to_desc(i);
6465 if (!desc)
6466 - return 0;
6467 + goto outsparse;
6468
6469 raw_spin_lock_irqsave(&desc->lock, flags);
6470 for_each_online_cpu(j)
6471 @@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
6472 seq_putc(p, '\n');
6473 out:
6474 raw_spin_unlock_irqrestore(&desc->lock, flags);
6475 +outsparse:
6476 + irq_unlock_sparse();
6477 return 0;
6478 }
6479 #endif
6480 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
6481 index 7b5741fc4110..8c30ef7a2b70 100644
6482 --- a/kernel/time/tick-sched.c
6483 +++ b/kernel/time/tick-sched.c
6484 @@ -847,7 +847,6 @@ void tick_nohz_idle_enter(void)
6485
6486 local_irq_enable();
6487 }
6488 -EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
6489
6490 /**
6491 * tick_nohz_irq_exit - update next tick event from interrupt exit
6492 @@ -974,7 +973,6 @@ void tick_nohz_idle_exit(void)
6493
6494 local_irq_enable();
6495 }
6496 -EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
6497
6498 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
6499 {
6500 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
6501 index 31c90fec4158..124e2c702ead 100644
6502 --- a/kernel/trace/ftrace.c
6503 +++ b/kernel/trace/ftrace.c
6504 @@ -2308,12 +2308,14 @@ static void ftrace_run_update_code(int command)
6505 }
6506
6507 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
6508 - struct ftrace_hash *old_hash)
6509 + struct ftrace_ops_hash *old_hash)
6510 {
6511 ops->flags |= FTRACE_OPS_FL_MODIFYING;
6512 - ops->old_hash.filter_hash = old_hash;
6513 + ops->old_hash.filter_hash = old_hash->filter_hash;
6514 + ops->old_hash.notrace_hash = old_hash->notrace_hash;
6515 ftrace_run_update_code(command);
6516 ops->old_hash.filter_hash = NULL;
6517 + ops->old_hash.notrace_hash = NULL;
6518 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
6519 }
6520
6521 @@ -3357,7 +3359,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
6522
6523 static int ftrace_probe_registered;
6524
6525 -static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
6526 +static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
6527 {
6528 int ret;
6529 int i;
6530 @@ -3415,6 +3417,7 @@ int
6531 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6532 void *data)
6533 {
6534 + struct ftrace_ops_hash old_hash_ops;
6535 struct ftrace_func_probe *entry;
6536 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
6537 struct ftrace_hash *old_hash = *orig_hash;
6538 @@ -3436,6 +3439,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6539
6540 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
6541
6542 + old_hash_ops.filter_hash = old_hash;
6543 + /* Probes only have filters */
6544 + old_hash_ops.notrace_hash = NULL;
6545 +
6546 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
6547 if (!hash) {
6548 count = -ENOMEM;
6549 @@ -3496,7 +3503,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
6550
6551 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
6552
6553 - __enable_ftrace_function_probe(old_hash);
6554 + __enable_ftrace_function_probe(&old_hash_ops);
6555
6556 if (!ret)
6557 free_ftrace_hash_rcu(old_hash);
6558 @@ -3784,10 +3791,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
6559 }
6560
6561 static void ftrace_ops_update_code(struct ftrace_ops *ops,
6562 - struct ftrace_hash *old_hash)
6563 + struct ftrace_ops_hash *old_hash)
6564 {
6565 - if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
6566 + struct ftrace_ops *op;
6567 +
6568 + if (!ftrace_enabled)
6569 + return;
6570 +
6571 + if (ops->flags & FTRACE_OPS_FL_ENABLED) {
6572 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
6573 + return;
6574 + }
6575 +
6576 + /*
6577 + * If this is the shared global_ops filter, then we need to
6578 + * check if there is another ops that shares it, is enabled.
6579 + * If so, we still need to run the modify code.
6580 + */
6581 + if (ops->func_hash != &global_ops.local_hash)
6582 + return;
6583 +
6584 + do_for_each_ftrace_op(op, ftrace_ops_list) {
6585 + if (op->func_hash == &global_ops.local_hash &&
6586 + op->flags & FTRACE_OPS_FL_ENABLED) {
6587 + ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
6588 + /* Only need to do this once */
6589 + return;
6590 + }
6591 + } while_for_each_ftrace_op(op);
6592 }
6593
6594 static int
6595 @@ -3795,6 +3826,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
6596 unsigned long ip, int remove, int reset, int enable)
6597 {
6598 struct ftrace_hash **orig_hash;
6599 + struct ftrace_ops_hash old_hash_ops;
6600 struct ftrace_hash *old_hash;
6601 struct ftrace_hash *hash;
6602 int ret;
6603 @@ -3831,9 +3863,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
6604
6605 mutex_lock(&ftrace_lock);
6606 old_hash = *orig_hash;
6607 + old_hash_ops.filter_hash = ops->func_hash->filter_hash;
6608 + old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
6609 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
6610 if (!ret) {
6611 - ftrace_ops_update_code(ops, old_hash);
6612 + ftrace_ops_update_code(ops, &old_hash_ops);
6613 free_ftrace_hash_rcu(old_hash);
6614 }
6615 mutex_unlock(&ftrace_lock);
6616 @@ -4042,6 +4076,7 @@ static void __init set_ftrace_early_filters(void)
6617 int ftrace_regex_release(struct inode *inode, struct file *file)
6618 {
6619 struct seq_file *m = (struct seq_file *)file->private_data;
6620 + struct ftrace_ops_hash old_hash_ops;
6621 struct ftrace_iterator *iter;
6622 struct ftrace_hash **orig_hash;
6623 struct ftrace_hash *old_hash;
6624 @@ -4075,10 +4110,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
6625
6626 mutex_lock(&ftrace_lock);
6627 old_hash = *orig_hash;
6628 + old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
6629 + old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
6630 ret = ftrace_hash_move(iter->ops, filter_hash,
6631 orig_hash, iter->hash);
6632 if (!ret) {
6633 - ftrace_ops_update_code(iter->ops, old_hash);
6634 + ftrace_ops_update_code(iter->ops, &old_hash_ops);
6635 free_ftrace_hash_rcu(old_hash);
6636 }
6637 mutex_unlock(&ftrace_lock);
6638 diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
6639 index 8290e0bef7ea..6dd0335ea61b 100644
6640 --- a/lib/decompress_bunzip2.c
6641 +++ b/lib/decompress_bunzip2.c
6642 @@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
6643 if (get_bits(bd, 1))
6644 return RETVAL_OBSOLETE_INPUT;
6645 origPtr = get_bits(bd, 24);
6646 - if (origPtr > dbufSize)
6647 + if (origPtr >= dbufSize)
6648 return RETVAL_DATA_ERROR;
6649 /* mapping table: if some byte values are never used (encoding things
6650 like ascii text), the compression code removes the gaps to have fewer
6651 diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
6652 index fc1835c6bb40..00f9e144cc97 100644
6653 --- a/net/batman-adv/fragmentation.c
6654 +++ b/net/batman-adv/fragmentation.c
6655 @@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
6656 kfree(entry);
6657
6658 /* Make room for the rest of the fragments. */
6659 - if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
6660 + if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
6661 kfree_skb(skb_out);
6662 skb_out = NULL;
6663 goto free;
6664 @@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
6665 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
6666 */
6667 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
6668 - max_fragment_size = (mtu - header_size - ETH_HLEN);
6669 + max_fragment_size = mtu - header_size;
6670 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
6671
6672 /* Don't even try to fragment, if we need more than 16 fragments */
6673 diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
6674 index 90cff585b37d..e0bcf9e84273 100644
6675 --- a/net/batman-adv/gateway_client.c
6676 +++ b/net/batman-adv/gateway_client.c
6677 @@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
6678 goto out;
6679
6680 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
6681 - if (!gw_node->bandwidth_down == 0)
6682 + if (!gw_node)
6683 goto out;
6684
6685 switch (atomic_read(&bat_priv->gw_mode)) {
6686 diff --git a/net/core/dev.c b/net/core/dev.c
6687 index 945bbd001359..84409688ff39 100644
6688 --- a/net/core/dev.c
6689 +++ b/net/core/dev.c
6690 @@ -1697,6 +1697,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
6691
6692 skb_scrub_packet(skb, true);
6693 skb->protocol = eth_type_trans(skb, dev);
6694 + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
6695
6696 return 0;
6697 }
6698 @@ -2565,7 +2566,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
6699
6700 netdev_features_t netif_skb_features(struct sk_buff *skb)
6701 {
6702 - const struct net_device *dev = skb->dev;
6703 + struct net_device *dev = skb->dev;
6704 netdev_features_t features = dev->features;
6705 u16 gso_segs = skb_shinfo(skb)->gso_segs;
6706 __be16 protocol = skb->protocol;
6707 @@ -2573,11 +2574,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
6708 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
6709 features &= ~NETIF_F_GSO_MASK;
6710
6711 - if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
6712 - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
6713 - protocol = veh->h_vlan_encapsulated_proto;
6714 - } else if (!vlan_tx_tag_present(skb)) {
6715 - return harmonize_features(skb, features);
6716 + /* If encapsulation offload request, verify we are testing
6717 + * hardware encapsulation features instead of standard
6718 + * features for the netdev
6719 + */
6720 + if (skb->encapsulation)
6721 + features &= dev->hw_enc_features;
6722 +
6723 + if (!vlan_tx_tag_present(skb)) {
6724 + if (unlikely(protocol == htons(ETH_P_8021Q) ||
6725 + protocol == htons(ETH_P_8021AD))) {
6726 + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
6727 + protocol = veh->h_vlan_encapsulated_proto;
6728 + } else {
6729 + goto finalize;
6730 + }
6731 }
6732
6733 features = netdev_intersect_features(features,
6734 @@ -2594,6 +2605,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
6735 NETIF_F_HW_VLAN_CTAG_TX |
6736 NETIF_F_HW_VLAN_STAG_TX);
6737
6738 +finalize:
6739 + if (dev->netdev_ops->ndo_features_check)
6740 + features &= dev->netdev_ops->ndo_features_check(skb, dev,
6741 + features);
6742 +
6743 return harmonize_features(skb, features);
6744 }
6745 EXPORT_SYMBOL(netif_skb_features);
6746 @@ -2668,19 +2684,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
6747 if (unlikely(!skb))
6748 goto out_null;
6749
6750 - /* If encapsulation offload request, verify we are testing
6751 - * hardware encapsulation features instead of standard
6752 - * features for the netdev
6753 - */
6754 - if (skb->encapsulation)
6755 - features &= dev->hw_enc_features;
6756 -
6757 if (netif_needs_gso(dev, skb, features)) {
6758 struct sk_buff *segs;
6759
6760 segs = skb_gso_segment(skb, features);
6761 if (IS_ERR(segs)) {
6762 - segs = NULL;
6763 + goto out_kfree_skb;
6764 } else if (segs) {
6765 consume_skb(skb);
6766 skb = segs;
6767 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6768 index 32e31c299631..d7543d0fd744 100644
6769 --- a/net/core/skbuff.c
6770 +++ b/net/core/skbuff.c
6771 @@ -4040,6 +4040,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
6772 skb->ignore_df = 0;
6773 skb_dst_drop(skb);
6774 skb->mark = 0;
6775 + skb_init_secmark(skb);
6776 secpath_reset(skb);
6777 nf_reset(skb);
6778 nf_reset_trace(skb);
6779 diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
6780 index dedb21e99914..2caa6ad965a6 100644
6781 --- a/net/ipv4/geneve.c
6782 +++ b/net/ipv4/geneve.c
6783 @@ -165,6 +165,15 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
6784 }
6785 }
6786
6787 +static void geneve_notify_del_rx_port(struct geneve_sock *gs)
6788 +{
6789 + struct sock *sk = gs->sock->sk;
6790 + sa_family_t sa_family = sk->sk_family;
6791 +
6792 + if (sa_family == AF_INET)
6793 + udp_del_offload(&gs->udp_offloads);
6794 +}
6795 +
6796 /* Callback from net/ipv4/udp.c to receive packets */
6797 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
6798 {
6799 @@ -293,6 +302,7 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
6800 geneve_rcv_t *rcv, void *data,
6801 bool no_share, bool ipv6)
6802 {
6803 + struct geneve_net *gn = net_generic(net, geneve_net_id);
6804 struct geneve_sock *gs;
6805
6806 gs = geneve_socket_create(net, port, rcv, data, ipv6);
6807 @@ -302,15 +312,15 @@ struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
6808 if (no_share) /* Return error if sharing is not allowed. */
6809 return ERR_PTR(-EINVAL);
6810
6811 + spin_lock(&gn->sock_lock);
6812 gs = geneve_find_sock(net, port);
6813 - if (gs) {
6814 - if (gs->rcv == rcv)
6815 - atomic_inc(&gs->refcnt);
6816 - else
6817 + if (gs && ((gs->rcv != rcv) ||
6818 + !atomic_add_unless(&gs->refcnt, 1, 0)))
6819 gs = ERR_PTR(-EBUSY);
6820 - } else {
6821 + spin_unlock(&gn->sock_lock);
6822 +
6823 + if (!gs)
6824 gs = ERR_PTR(-EINVAL);
6825 - }
6826
6827 return gs;
6828 }
6829 @@ -318,9 +328,17 @@ EXPORT_SYMBOL_GPL(geneve_sock_add);
6830
6831 void geneve_sock_release(struct geneve_sock *gs)
6832 {
6833 + struct net *net = sock_net(gs->sock->sk);
6834 + struct geneve_net *gn = net_generic(net, geneve_net_id);
6835 +
6836 if (!atomic_dec_and_test(&gs->refcnt))
6837 return;
6838
6839 + spin_lock(&gn->sock_lock);
6840 + hlist_del_rcu(&gs->hlist);
6841 + geneve_notify_del_rx_port(gs);
6842 + spin_unlock(&gn->sock_lock);
6843 +
6844 queue_work(geneve_wq, &gs->del_work);
6845 }
6846 EXPORT_SYMBOL_GPL(geneve_sock_release);
6847 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
6848 index 12055fdbe716..69aaf0a2c424 100644
6849 --- a/net/ipv4/ip_gre.c
6850 +++ b/net/ipv4/ip_gre.c
6851 @@ -252,10 +252,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6852 struct ip_tunnel *tunnel = netdev_priv(dev);
6853 const struct iphdr *tnl_params;
6854
6855 - skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
6856 - if (IS_ERR(skb))
6857 - goto out;
6858 -
6859 if (dev->header_ops) {
6860 /* Need space for new headers */
6861 if (skb_cow_head(skb, dev->needed_headroom -
6862 @@ -268,6 +264,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6863 * to gre header.
6864 */
6865 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
6866 + skb_reset_mac_header(skb);
6867 } else {
6868 if (skb_cow_head(skb, dev->needed_headroom))
6869 goto free_skb;
6870 @@ -275,6 +272,10 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
6871 tnl_params = &tunnel->parms.iph;
6872 }
6873
6874 + skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
6875 + if (IS_ERR(skb))
6876 + goto out;
6877 +
6878 __gre_xmit(skb, dev, tnl_params, skb->protocol);
6879
6880 return NETDEV_TX_OK;
6881 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
6882 index a3d453b94747..c2df40ba553f 100644
6883 --- a/net/ipv4/tcp_output.c
6884 +++ b/net/ipv4/tcp_output.c
6885 @@ -1984,7 +1984,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6886 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
6887 break;
6888
6889 - if (tso_segs == 1) {
6890 + if (tso_segs == 1 || !sk->sk_gso_max_segs) {
6891 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
6892 (tcp_skb_is_last(sk, skb) ?
6893 nonagle : TCP_NAGLE_PUSH))))
6894 @@ -2020,7 +2020,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6895 }
6896
6897 limit = mss_now;
6898 - if (tso_segs > 1 && !tcp_urg_mode(tp))
6899 + if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
6900 limit = tcp_mss_split_point(sk, skb, mss_now,
6901 min_t(unsigned int,
6902 cwnd_quota,
6903 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
6904 index c277951d783b..c1136022d8d9 100644
6905 --- a/net/ipv6/tcp_ipv6.c
6906 +++ b/net/ipv6/tcp_ipv6.c
6907 @@ -1385,6 +1385,28 @@ ipv6_pktoptions:
6908 return 0;
6909 }
6910
6911 +static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
6912 + const struct tcphdr *th)
6913 +{
6914 + /* This is tricky: we move IP6CB at its correct location into
6915 + * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
6916 + * _decode_session6() uses IP6CB().
6917 + * barrier() makes sure compiler won't play aliasing games.
6918 + */
6919 + memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
6920 + sizeof(struct inet6_skb_parm));
6921 + barrier();
6922 +
6923 + TCP_SKB_CB(skb)->seq = ntohl(th->seq);
6924 + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
6925 + skb->len - th->doff*4);
6926 + TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
6927 + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
6928 + TCP_SKB_CB(skb)->tcp_tw_isn = 0;
6929 + TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
6930 + TCP_SKB_CB(skb)->sacked = 0;
6931 +}
6932 +
6933 static int tcp_v6_rcv(struct sk_buff *skb)
6934 {
6935 const struct tcphdr *th;
6936 @@ -1416,24 +1438,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
6937
6938 th = tcp_hdr(skb);
6939 hdr = ipv6_hdr(skb);
6940 - /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
6941 - * barrier() makes sure compiler wont play fool^Waliasing games.
6942 - */
6943 - memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
6944 - sizeof(struct inet6_skb_parm));
6945 - barrier();
6946 -
6947 - TCP_SKB_CB(skb)->seq = ntohl(th->seq);
6948 - TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
6949 - skb->len - th->doff*4);
6950 - TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
6951 - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
6952 - TCP_SKB_CB(skb)->tcp_tw_isn = 0;
6953 - TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
6954 - TCP_SKB_CB(skb)->sacked = 0;
6955
6956 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
6957 - tcp_v6_iif(skb));
6958 + inet6_iif(skb));
6959 if (!sk)
6960 goto no_tcp_socket;
6961
6962 @@ -1449,6 +1456,8 @@ process:
6963 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
6964 goto discard_and_relse;
6965
6966 + tcp_v6_fill_cb(skb, hdr, th);
6967 +
6968 #ifdef CONFIG_TCP_MD5SIG
6969 if (tcp_v6_inbound_md5_hash(sk, skb))
6970 goto discard_and_relse;
6971 @@ -1480,6 +1489,8 @@ no_tcp_socket:
6972 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
6973 goto discard_it;
6974
6975 + tcp_v6_fill_cb(skb, hdr, th);
6976 +
6977 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
6978 csum_error:
6979 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
6980 @@ -1503,6 +1514,8 @@ do_time_wait:
6981 goto discard_it;
6982 }
6983
6984 + tcp_v6_fill_cb(skb, hdr, th);
6985 +
6986 if (skb->len < (th->doff<<2)) {
6987 inet_twsk_put(inet_twsk(sk));
6988 goto bad_packet;
6989 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
6990 index b6bf8e8caec7..79c965a51ab2 100644
6991 --- a/net/netlink/af_netlink.c
6992 +++ b/net/netlink/af_netlink.c
6993 @@ -526,14 +526,14 @@ out:
6994 return err;
6995 }
6996
6997 -static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
6998 +static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
6999 {
7000 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
7001 struct page *p_start, *p_end;
7002
7003 /* First page is flushed through netlink_{get,set}_status */
7004 p_start = pgvec_to_page(hdr + PAGE_SIZE);
7005 - p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
7006 + p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
7007 while (p_start <= p_end) {
7008 flush_dcache_page(p_start);
7009 p_start++;
7010 @@ -551,9 +551,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
7011 static void netlink_set_status(struct nl_mmap_hdr *hdr,
7012 enum nl_mmap_status status)
7013 {
7014 + smp_mb();
7015 hdr->nm_status = status;
7016 flush_dcache_page(pgvec_to_page(hdr));
7017 - smp_wmb();
7018 }
7019
7020 static struct nl_mmap_hdr *
7021 @@ -715,24 +715,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
7022 struct nl_mmap_hdr *hdr;
7023 struct sk_buff *skb;
7024 unsigned int maxlen;
7025 - bool excl = true;
7026 int err = 0, len = 0;
7027
7028 - /* Netlink messages are validated by the receiver before processing.
7029 - * In order to avoid userspace changing the contents of the message
7030 - * after validation, the socket and the ring may only be used by a
7031 - * single process, otherwise we fall back to copying.
7032 - */
7033 - if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
7034 - atomic_read(&nlk->mapped) > 1)
7035 - excl = false;
7036 -
7037 mutex_lock(&nlk->pg_vec_lock);
7038
7039 ring = &nlk->tx_ring;
7040 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
7041
7042 do {
7043 + unsigned int nm_len;
7044 +
7045 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
7046 if (hdr == NULL) {
7047 if (!(msg->msg_flags & MSG_DONTWAIT) &&
7048 @@ -740,35 +732,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
7049 schedule();
7050 continue;
7051 }
7052 - if (hdr->nm_len > maxlen) {
7053 +
7054 + nm_len = ACCESS_ONCE(hdr->nm_len);
7055 + if (nm_len > maxlen) {
7056 err = -EINVAL;
7057 goto out;
7058 }
7059
7060 - netlink_frame_flush_dcache(hdr);
7061 + netlink_frame_flush_dcache(hdr, nm_len);
7062
7063 - if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
7064 - skb = alloc_skb_head(GFP_KERNEL);
7065 - if (skb == NULL) {
7066 - err = -ENOBUFS;
7067 - goto out;
7068 - }
7069 - sock_hold(sk);
7070 - netlink_ring_setup_skb(skb, sk, ring, hdr);
7071 - NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
7072 - __skb_put(skb, hdr->nm_len);
7073 - netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
7074 - atomic_inc(&ring->pending);
7075 - } else {
7076 - skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
7077 - if (skb == NULL) {
7078 - err = -ENOBUFS;
7079 - goto out;
7080 - }
7081 - __skb_put(skb, hdr->nm_len);
7082 - memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
7083 - netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
7084 + skb = alloc_skb(nm_len, GFP_KERNEL);
7085 + if (skb == NULL) {
7086 + err = -ENOBUFS;
7087 + goto out;
7088 }
7089 + __skb_put(skb, nm_len);
7090 + memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
7091 + netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
7092
7093 netlink_increment_head(ring);
7094
7095 @@ -814,7 +794,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
7096 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
7097 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
7098 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
7099 - netlink_frame_flush_dcache(hdr);
7100 + netlink_frame_flush_dcache(hdr, hdr->nm_len);
7101 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
7102
7103 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
7104 diff --git a/net/wireless/chan.c b/net/wireless/chan.c
7105 index 72d81e2154d5..92ae263ebbf3 100644
7106 --- a/net/wireless/chan.c
7107 +++ b/net/wireless/chan.c
7108 @@ -602,7 +602,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7109 {
7110 struct ieee80211_sta_ht_cap *ht_cap;
7111 struct ieee80211_sta_vht_cap *vht_cap;
7112 - u32 width, control_freq;
7113 + u32 width, control_freq, cap;
7114
7115 if (WARN_ON(!cfg80211_chandef_valid(chandef)))
7116 return false;
7117 @@ -642,7 +642,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7118 return false;
7119 break;
7120 case NL80211_CHAN_WIDTH_80P80:
7121 - if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
7122 + cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
7123 + if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
7124 return false;
7125 case NL80211_CHAN_WIDTH_80:
7126 if (!vht_cap->vht_supported)
7127 @@ -653,7 +654,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
7128 case NL80211_CHAN_WIDTH_160:
7129 if (!vht_cap->vht_supported)
7130 return false;
7131 - if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
7132 + cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
7133 + if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
7134 + cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
7135 return false;
7136 prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
7137 width = 160;
7138 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
7139 index 5839c85075f1..ea558e07981f 100644
7140 --- a/net/wireless/nl80211.c
7141 +++ b/net/wireless/nl80211.c
7142 @@ -5799,7 +5799,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
7143 }
7144
7145 /* there was no other matchset, so the RSSI one is alone */
7146 - if (i == 0)
7147 + if (i == 0 && n_match_sets)
7148 request->match_sets[0].rssi_thold = default_match_rssi;
7149
7150 request->min_rssi_thold = INT_MAX;
7151 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
7152 index b725a31a4751..6fd53ea30193 100644
7153 --- a/net/wireless/reg.c
7154 +++ b/net/wireless/reg.c
7155 @@ -1760,7 +1760,7 @@ static enum reg_request_treatment
7156 reg_process_hint_driver(struct wiphy *wiphy,
7157 struct regulatory_request *driver_request)
7158 {
7159 - const struct ieee80211_regdomain *regd;
7160 + const struct ieee80211_regdomain *regd, *tmp;
7161 enum reg_request_treatment treatment;
7162
7163 treatment = __reg_process_hint_driver(driver_request);
7164 @@ -1780,7 +1780,10 @@ reg_process_hint_driver(struct wiphy *wiphy,
7165 reg_free_request(driver_request);
7166 return REG_REQ_IGNORE;
7167 }
7168 +
7169 + tmp = get_wiphy_regdom(wiphy);
7170 rcu_assign_pointer(wiphy->regd, regd);
7171 + rcu_free_regdom(tmp);
7172 }
7173
7174
7175 @@ -1839,11 +1842,8 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
7176 return REG_REQ_IGNORE;
7177 return REG_REQ_ALREADY_SET;
7178 }
7179 - /*
7180 - * Two consecutive Country IE hints on the same wiphy.
7181 - * This should be picked up early by the driver/stack
7182 - */
7183 - if (WARN_ON(regdom_changes(country_ie_request->alpha2)))
7184 +
7185 + if (regdom_changes(country_ie_request->alpha2))
7186 return REG_REQ_OK;
7187 return REG_REQ_ALREADY_SET;
7188 }
7189 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
7190 index b1c668dc6815..a609552a86dc 100644
7191 --- a/scripts/Makefile.clean
7192 +++ b/scripts/Makefile.clean
7193 @@ -45,19 +45,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
7194
7195 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
7196
7197 -# as clean-files is given relative to the current directory, this adds
7198 -# a $(obj) prefix, except for absolute paths
7199 +# clean-files is given relative to the current directory, unless it
7200 +# starts with $(objtree)/ (which means "./", so do not add "./" unless
7201 +# you want to delete a file from the toplevel object directory).
7202
7203 __clean-files := $(wildcard \
7204 - $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
7205 - $(filter /%, $(__clean-files)))
7206 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
7207 + $(filter $(objtree)/%, $(__clean-files)))
7208
7209 -# as clean-dirs is given relative to the current directory, this adds
7210 -# a $(obj) prefix, except for absolute paths
7211 +# same as clean-files
7212
7213 __clean-dirs := $(wildcard \
7214 - $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
7215 - $(filter /%, $(clean-dirs)))
7216 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
7217 + $(filter $(objtree)/%, $(clean-dirs)))
7218
7219 # ==========================================================================
7220
7221 diff --git a/scripts/coccinelle/misc/bugon.cocci b/scripts/coccinelle/misc/bugon.cocci
7222 index 556456ca761c..3b7eec24fb5a 100644
7223 --- a/scripts/coccinelle/misc/bugon.cocci
7224 +++ b/scripts/coccinelle/misc/bugon.cocci
7225 @@ -8,7 +8,7 @@
7226 // Confidence: High
7227 // Copyright: (C) 2014 Himangi Saraogi. GPLv2.
7228 // Comments:
7229 -// Options: --no-includes, --include-headers
7230 +// Options: --no-includes --include-headers
7231
7232 virtual patch
7233 virtual context
7234 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
7235 index c657752a420c..83bddbdb90e9 100644
7236 --- a/sound/usb/quirks-table.h
7237 +++ b/sound/usb/quirks-table.h
7238 @@ -2804,133 +2804,45 @@ YAMAHA_DEVICE(0x7010, "UB99"),
7239 }
7240 },
7241
7242 -/* Hauppauge HVR-950Q and HVR-850 */
7243 -{
7244 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
7245 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7246 - USB_DEVICE_ID_MATCH_INT_CLASS |
7247 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7248 - .bInterfaceClass = USB_CLASS_AUDIO,
7249 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7250 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7251 - .vendor_name = "Hauppauge",
7252 - .product_name = "HVR-950Q",
7253 - .ifnum = QUIRK_ANY_INTERFACE,
7254 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7255 - }
7256 -},
7257 -{
7258 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x7210),
7259 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7260 - USB_DEVICE_ID_MATCH_INT_CLASS |
7261 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7262 - .bInterfaceClass = USB_CLASS_AUDIO,
7263 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7264 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7265 - .vendor_name = "Hauppauge",
7266 - .product_name = "HVR-950Q",
7267 - .ifnum = QUIRK_ANY_INTERFACE,
7268 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7269 - }
7270 -},
7271 -{
7272 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x7217),
7273 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7274 - USB_DEVICE_ID_MATCH_INT_CLASS |
7275 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7276 - .bInterfaceClass = USB_CLASS_AUDIO,
7277 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7278 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7279 - .vendor_name = "Hauppauge",
7280 - .product_name = "HVR-950Q",
7281 - .ifnum = QUIRK_ANY_INTERFACE,
7282 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7283 - }
7284 -},
7285 -{
7286 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x721b),
7287 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7288 - USB_DEVICE_ID_MATCH_INT_CLASS |
7289 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7290 - .bInterfaceClass = USB_CLASS_AUDIO,
7291 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7292 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7293 - .vendor_name = "Hauppauge",
7294 - .product_name = "HVR-950Q",
7295 - .ifnum = QUIRK_ANY_INTERFACE,
7296 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7297 - }
7298 -},
7299 -{
7300 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x721e),
7301 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7302 - USB_DEVICE_ID_MATCH_INT_CLASS |
7303 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7304 - .bInterfaceClass = USB_CLASS_AUDIO,
7305 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7306 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7307 - .vendor_name = "Hauppauge",
7308 - .product_name = "HVR-950Q",
7309 - .ifnum = QUIRK_ANY_INTERFACE,
7310 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7311 - }
7312 -},
7313 -{
7314 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x721f),
7315 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7316 - USB_DEVICE_ID_MATCH_INT_CLASS |
7317 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7318 - .bInterfaceClass = USB_CLASS_AUDIO,
7319 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7320 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7321 - .vendor_name = "Hauppauge",
7322 - .product_name = "HVR-950Q",
7323 - .ifnum = QUIRK_ANY_INTERFACE,
7324 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7325 - }
7326 -},
7327 -{
7328 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x7240),
7329 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7330 - USB_DEVICE_ID_MATCH_INT_CLASS |
7331 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7332 - .bInterfaceClass = USB_CLASS_AUDIO,
7333 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7334 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7335 - .vendor_name = "Hauppauge",
7336 - .product_name = "HVR-850",
7337 - .ifnum = QUIRK_ANY_INTERFACE,
7338 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7339 - }
7340 -},
7341 -{
7342 - USB_DEVICE_VENDOR_SPEC(0x2040, 0x7280),
7343 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7344 - USB_DEVICE_ID_MATCH_INT_CLASS |
7345 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7346 - .bInterfaceClass = USB_CLASS_AUDIO,
7347 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7348 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7349 - .vendor_name = "Hauppauge",
7350 - .product_name = "HVR-950Q",
7351 - .ifnum = QUIRK_ANY_INTERFACE,
7352 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7353 - }
7354 -},
7355 -{
7356 - USB_DEVICE_VENDOR_SPEC(0x0fd9, 0x0008),
7357 - .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7358 - USB_DEVICE_ID_MATCH_INT_CLASS |
7359 - USB_DEVICE_ID_MATCH_INT_SUBCLASS,
7360 - .bInterfaceClass = USB_CLASS_AUDIO,
7361 - .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
7362 - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
7363 - .vendor_name = "Hauppauge",
7364 - .product_name = "HVR-950Q",
7365 - .ifnum = QUIRK_ANY_INTERFACE,
7366 - .type = QUIRK_AUDIO_ALIGN_TRANSFER,
7367 - }
7368 -},
7369 +/*
7370 + * Auvitek au0828 devices with audio interface.
7371 + * This should be kept in sync with drivers/media/usb/au0828/au0828-cards.c
7372 + * Please notice that some drivers are DVB only, and don't need to be
7373 + * here. That's the case, for example, of DVICO_FUSIONHDTV7.
7374 + */
7375 +
7376 +#define AU0828_DEVICE(vid, pid, vname, pname) { \
7377 + USB_DEVICE_VENDOR_SPEC(vid, pid), \
7378 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
7379 + USB_DEVICE_ID_MATCH_INT_CLASS | \
7380 + USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
7381 + .bInterfaceClass = USB_CLASS_AUDIO, \
7382 + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, \
7383 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { \
7384 + .vendor_name = vname, \
7385 + .product_name = pname, \
7386 + .ifnum = QUIRK_ANY_INTERFACE, \
7387 + .type = QUIRK_AUDIO_ALIGN_TRANSFER, \
7388 + } \
7389 +}
7390 +
7391 +AU0828_DEVICE(0x2040, 0x7200, "Hauppauge", "HVR-950Q"),
7392 +AU0828_DEVICE(0x2040, 0x7240, "Hauppauge", "HVR-850"),
7393 +AU0828_DEVICE(0x2040, 0x7210, "Hauppauge", "HVR-950Q"),
7394 +AU0828_DEVICE(0x2040, 0x7217, "Hauppauge", "HVR-950Q"),
7395 +AU0828_DEVICE(0x2040, 0x721b, "Hauppauge", "HVR-950Q"),
7396 +AU0828_DEVICE(0x2040, 0x721e, "Hauppauge", "HVR-950Q"),
7397 +AU0828_DEVICE(0x2040, 0x721f, "Hauppauge", "HVR-950Q"),
7398 +AU0828_DEVICE(0x2040, 0x7280, "Hauppauge", "HVR-950Q"),
7399 +AU0828_DEVICE(0x0fd9, 0x0008, "Hauppauge", "HVR-950Q"),
7400 +AU0828_DEVICE(0x2040, 0x7201, "Hauppauge", "HVR-950Q-MXL"),
7401 +AU0828_DEVICE(0x2040, 0x7211, "Hauppauge", "HVR-950Q-MXL"),
7402 +AU0828_DEVICE(0x2040, 0x7281, "Hauppauge", "HVR-950Q-MXL"),
7403 +AU0828_DEVICE(0x05e1, 0x0480, "Hauppauge", "Woodbury"),
7404 +AU0828_DEVICE(0x2040, 0x8200, "Hauppauge", "Woodbury"),
7405 +AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
7406 +AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
7407 +AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
7408
7409 /* Digidesign Mbox */
7410 {