Magellan Linux

Contents of /trunk/kernel-alx/patches-3.14/0102-3.14.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (show annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 6 months ago) by niro
File size: 201774 byte(s)
-patches for 3.14
1 diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
2 index a590ca51be75..f762e11bfe11 100644
3 --- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
4 +++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
5 @@ -3,7 +3,7 @@
6 Required properties:
7 - #address-cells: see spi-bus.txt
8 - #size-cells: see spi-bus.txt
9 -- compatible: should be "efm32,spi"
10 +- compatible: should be "energymicro,efm32-spi"
11 - reg: Offset and length of the register set for the controller
12 - interrupts: pair specifying rx and tx irq
13 - clocks: phandle to the spi clock
14 @@ -15,7 +15,7 @@ Example:
15 spi1: spi@0x4000c400 { /* USART1 */
16 #address-cells = <1>;
17 #size-cells = <0>;
18 - compatible = "efm32,spi";
19 + compatible = "energymicro,efm32-spi";
20 reg = <0x4000c400 0x400>;
21 interrupts = <15 16>;
22 clocks = <&cmu 20>;
23 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
24 index e55124e7c40c..855d9b37bf76 100644
25 --- a/Documentation/sysctl/kernel.txt
26 +++ b/Documentation/sysctl/kernel.txt
27 @@ -317,6 +317,7 @@ for more than this value report a warning.
28 This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
29
30 0: means infinite timeout - no checking done.
31 +Possible values to set are in range {0..LONG_MAX/HZ}.
32
33 ==============================================================
34
35 diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
36 index 1e6b6531bbcc..d2ba80bb7af5 100644
37 --- a/Documentation/video4linux/gspca.txt
38 +++ b/Documentation/video4linux/gspca.txt
39 @@ -55,6 +55,7 @@ zc3xx 0458:700f Genius VideoCam Web V2
40 sonixj 0458:7025 Genius Eye 311Q
41 sn9c20x 0458:7029 Genius Look 320s
42 sonixj 0458:702e Genius Slim 310 NB
43 +sn9c20x 0458:7045 Genius Look 1320 V2
44 sn9c20x 0458:704a Genius Slim 1320
45 sn9c20x 0458:704c Genius i-Look 1321
46 sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
47 diff --git a/Makefile b/Makefile
48 index b2f7de81e9a2..eed07f3f9308 100644
49 --- a/Makefile
50 +++ b/Makefile
51 @@ -1,6 +1,6 @@
52 VERSION = 3
53 PATCHLEVEL = 14
54 -SUBLEVEL = 2
55 +SUBLEVEL = 3
56 EXTRAVERSION =
57 NAME = Shuffling Zombie Juror
58
59 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
60 index 15949459611f..44298add8a48 100644
61 --- a/arch/arm/Kconfig
62 +++ b/arch/arm/Kconfig
63 @@ -898,7 +898,7 @@ config ARCH_MULTI_V5
64 bool "ARMv5 based platforms (ARM926T, XSCALE, PJ1, ...)"
65 depends on !ARCH_MULTI_V6_V7
66 select ARCH_MULTI_V4_V5
67 - select CPU_ARM926T if (!CPU_ARM946E || CPU_ARM1020 || \
68 + select CPU_ARM926T if !(CPU_ARM946E || CPU_ARM1020 || \
69 CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \
70 CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_FEROCEON)
71
72 diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
73 index e3f27ec31718..2e7d932887b5 100644
74 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi
75 +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
76 @@ -183,7 +183,7 @@
77 &usb {
78 status = "okay";
79
80 - control@44e10000 {
81 + control@44e10620 {
82 status = "okay";
83 };
84
85 @@ -204,7 +204,7 @@
86 dr_mode = "host";
87 };
88
89 - dma-controller@07402000 {
90 + dma-controller@47402000 {
91 status = "okay";
92 };
93 };
94 diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
95 index 7e6c64ed966d..801055a42998 100644
96 --- a/arch/arm/boot/dts/am335x-evm.dts
97 +++ b/arch/arm/boot/dts/am335x-evm.dts
98 @@ -325,7 +325,7 @@
99 &usb {
100 status = "okay";
101
102 - control@44e10000 {
103 + control@44e10620 {
104 status = "okay";
105 };
106
107 @@ -346,7 +346,7 @@
108 dr_mode = "host";
109 };
110
111 - dma-controller@07402000 {
112 + dma-controller@47402000 {
113 status = "okay";
114 };
115 };
116 diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
117 index 486880b74831..dd142bc677ca 100644
118 --- a/arch/arm/boot/dts/am335x-evmsk.dts
119 +++ b/arch/arm/boot/dts/am335x-evmsk.dts
120 @@ -334,7 +334,7 @@
121 &usb {
122 status = "okay";
123
124 - control@44e10000 {
125 + control@44e10620 {
126 status = "okay";
127 };
128
129 diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
130 index 7063311a58d9..5942e7dab94f 100644
131 --- a/arch/arm/boot/dts/am335x-igep0033.dtsi
132 +++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
133 @@ -202,7 +202,7 @@
134 &usb {
135 status = "okay";
136
137 - control@44e10000 {
138 + control@44e10620 {
139 status = "okay";
140 };
141
142 @@ -223,7 +223,7 @@
143 dr_mode = "host";
144 };
145
146 - dma-controller@07402000 {
147 + dma-controller@47402000 {
148 status = "okay";
149 };
150 };
151 diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
152 index 6d95d3df33c7..79087ccf64bc 100644
153 --- a/arch/arm/boot/dts/am33xx.dtsi
154 +++ b/arch/arm/boot/dts/am33xx.dtsi
155 @@ -448,7 +448,7 @@
156 ti,hwmods = "usb_otg_hs";
157 status = "disabled";
158
159 - usb_ctrl_mod: control@44e10000 {
160 + usb_ctrl_mod: control@44e10620 {
161 compatible = "ti,am335x-usb-ctrl-module";
162 reg = <0x44e10620 0x10
163 0x44e10648 0x4>;
164 @@ -551,7 +551,7 @@
165 "tx14", "tx15";
166 };
167
168 - cppi41dma: dma-controller@07402000 {
169 + cppi41dma: dma-controller@47402000 {
170 compatible = "ti,am3359-cppi41";
171 reg = <0x47400000 0x1000
172 0x47402000 0x1000
173 diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
174 index 74b5964430ac..e5d4902c630f 100644
175 --- a/arch/arm/boot/dts/armada-370-xp.dtsi
176 +++ b/arch/arm/boot/dts/armada-370-xp.dtsi
177 @@ -226,6 +226,7 @@
178 #size-cells = <0>;
179 compatible = "marvell,orion-mdio";
180 reg = <0x72004 0x4>;
181 + clocks = <&gateclk 4>;
182 };
183
184 eth1: ethernet@74000 {
185 diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
186 index b42e658876e5..457112d659ea 100644
187 --- a/arch/arm/boot/dts/exynos5250-arndale.dts
188 +++ b/arch/arm/boot/dts/exynos5250-arndale.dts
189 @@ -287,6 +287,7 @@
190 regulator-name = "vdd_g3d";
191 regulator-min-microvolt = <1000000>;
192 regulator-max-microvolt = <1000000>;
193 + regulator-always-on;
194 regulator-boot-on;
195 op_mode = <1>;
196 };
197 diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
198 index d3f8a6e8ca20..69409f7e05dc 100644
199 --- a/arch/arm/boot/dts/omap4.dtsi
200 +++ b/arch/arm/boot/dts/omap4.dtsi
201 @@ -275,6 +275,8 @@
202 gpmc,num-waitpins = <4>;
203 ti,hwmods = "gpmc";
204 ti,no-idle-on-init;
205 + clocks = <&l3_div_ck>;
206 + clock-names = "fck";
207 };
208
209 uart1: serial@4806a000 {
210 diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
211 index a72813a9663e..7a16647c76f4 100644
212 --- a/arch/arm/boot/dts/omap5.dtsi
213 +++ b/arch/arm/boot/dts/omap5.dtsi
214 @@ -302,6 +302,8 @@
215 gpmc,num-cs = <8>;
216 gpmc,num-waitpins = <4>;
217 ti,hwmods = "gpmc";
218 + clocks = <&l3_iclk_div>;
219 + clock-names = "fck";
220 };
221
222 i2c1: i2c@48070000 {
223 diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
224 index ec0698a8354a..9c2ba74a8b60 100644
225 --- a/arch/arm/boot/dts/tegra124.dtsi
226 +++ b/arch/arm/boot/dts/tegra124.dtsi
227 @@ -164,19 +164,6 @@
228 status = "disabled";
229 };
230
231 - serial@70006400 {
232 - compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
233 - reg = <0x70006400 0x40>;
234 - reg-shift = <2>;
235 - interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
236 - clocks = <&tegra_car TEGRA124_CLK_UARTE>;
237 - resets = <&tegra_car 66>;
238 - reset-names = "serial";
239 - dmas = <&apbdma 20>, <&apbdma 20>;
240 - dma-names = "rx", "tx";
241 - status = "disabled";
242 - };
243 -
244 pwm@7000a000 {
245 compatible = "nvidia,tegra124-pwm", "nvidia,tegra20-pwm";
246 reg = <0x7000a000 0x100>;
247 diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
248 index 191ada6e4d2d..662c7bd06108 100644
249 --- a/arch/arm/include/asm/div64.h
250 +++ b/arch/arm/include/asm/div64.h
251 @@ -156,7 +156,7 @@
252 /* Select the best insn combination to perform the */ \
253 /* actual __m * __n / (__p << 64) operation. */ \
254 if (!__c) { \
255 - asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
256 + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
257 "mov %Q0, #0" \
258 : "=&r" (__res) \
259 : "r" (__m), "r" (__n) \
260 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
261 index e42cf597f6e6..2aff798fbef4 100644
262 --- a/arch/arm/include/asm/futex.h
263 +++ b/arch/arm/include/asm/futex.h
264 @@ -3,11 +3,6 @@
265
266 #ifdef __KERNEL__
267
268 -#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
269 -/* ARM doesn't provide unprivileged exclusive memory accessors */
270 -#include <asm-generic/futex.h>
271 -#else
272 -
273 #include <linux/futex.h>
274 #include <linux/uaccess.h>
275 #include <asm/errno.h>
276 @@ -164,6 +159,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
277 return ret;
278 }
279
280 -#endif /* !(CPU_USE_DOMAINS && SMP) */
281 #endif /* __KERNEL__ */
282 #endif /* _ASM_ARM_FUTEX_H */
283 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
284 index dfff709fda3c..219ac88a9542 100644
285 --- a/arch/arm/include/asm/pgtable-2level.h
286 +++ b/arch/arm/include/asm/pgtable-2level.h
287 @@ -140,6 +140,7 @@
288 #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
289 #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
290 #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
291 +#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
292 #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
293
294 #ifndef __ASSEMBLY__
295 diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
296 index acabef1a75df..43876245fc57 100644
297 --- a/arch/arm/include/asm/unistd.h
298 +++ b/arch/arm/include/asm/unistd.h
299 @@ -48,6 +48,5 @@
300 */
301 #define __IGNORE_fadvise64_64
302 #define __IGNORE_migrate_pages
303 -#define __IGNORE_kcmp
304
305 #endif /* __ASM_ARM_UNISTD_H */
306 diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
307 index f0d180d8b29f..8cf0996aa1a8 100644
308 --- a/arch/arm/kernel/machine_kexec.c
309 +++ b/arch/arm/kernel/machine_kexec.c
310 @@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
311
312 soft_restart(reboot_entry_phys);
313 }
314 +
315 +void arch_crash_save_vmcoreinfo(void)
316 +{
317 +#ifdef CONFIG_ARM_LPAE
318 + VMCOREINFO_CONFIG(ARM_LPAE);
319 +#endif
320 +}
321 diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
322 index e022a869bff2..6037a9a01ed5 100644
323 --- a/arch/arm/mach-omap2/irq.c
324 +++ b/arch/arm/mach-omap2/irq.c
325 @@ -222,6 +222,7 @@ void __init ti81xx_init_irq(void)
326 static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs)
327 {
328 u32 irqnr;
329 + int handled_irq = 0;
330
331 do {
332 irqnr = readl_relaxed(base_addr + 0x98);
333 @@ -249,8 +250,15 @@ out:
334 if (irqnr) {
335 irqnr = irq_find_mapping(domain, irqnr);
336 handle_IRQ(irqnr, regs);
337 + handled_irq = 1;
338 }
339 } while (irqnr);
340 +
341 + /* If an irq is masked or deasserted while active, we will
342 + * keep ending up here with no irq handled. So remove it from
343 + * the INTC with an ack.*/
344 + if (!handled_irq)
345 + omap_ack_irq(NULL);
346 }
347
348 asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs)
349 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
350 index 1f33f5db10d5..66c60fe1104c 100644
351 --- a/arch/arm/mach-omap2/omap_hwmod.c
352 +++ b/arch/arm/mach-omap2/omap_hwmod.c
353 @@ -2546,11 +2546,12 @@ static int __init _init(struct omap_hwmod *oh, void *data)
354 return -EINVAL;
355 }
356
357 - if (np)
358 + if (np) {
359 if (of_find_property(np, "ti,no-reset-on-init", NULL))
360 oh->flags |= HWMOD_INIT_NO_RESET;
361 if (of_find_property(np, "ti,no-idle-on-init", NULL))
362 oh->flags |= HWMOD_INIT_NO_IDLE;
363 + }
364
365 oh->_state = _HWMOD_STATE_INITIALIZED;
366
367 diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
368 index 4c3b1e6df508..ea4643abbe61 100644
369 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
370 +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
371 @@ -1968,7 +1968,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
372 static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
373 .name = "usb_host_hs",
374 .class = &omap3xxx_usb_host_hs_hwmod_class,
375 - .clkdm_name = "l3_init_clkdm",
376 + .clkdm_name = "usbhost_clkdm",
377 .mpu_irqs = omap3xxx_usb_host_hs_irqs,
378 .main_clk = "usbhost_48m_fck",
379 .prcm = {
380 @@ -2053,7 +2053,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
381 static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
382 .name = "usb_tll_hs",
383 .class = &omap3xxx_usb_tll_hs_hwmod_class,
384 - .clkdm_name = "l3_init_clkdm",
385 + .clkdm_name = "core_l4_clkdm",
386 .mpu_irqs = omap3xxx_usb_tll_hs_irqs,
387 .main_clk = "usbtll_fck",
388 .prcm = {
389 diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
390 index 7bdd22afce69..d4d0fce325c7 100644
391 --- a/arch/arm/mach-omap2/pm.h
392 +++ b/arch/arm/mach-omap2/pm.h
393 @@ -103,7 +103,7 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
394
395 #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
396
397 -#if defined(CONFIG_ARCH_OMAP4)
398 +#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
399 extern u16 pm44xx_errata;
400 #define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
401 #else
402 diff --git a/arch/arm/mach-pxa/include/mach/hx4700.h b/arch/arm/mach-pxa/include/mach/hx4700.h
403 index 8bc02913517c..0e1bb46264f9 100644
404 --- a/arch/arm/mach-pxa/include/mach/hx4700.h
405 +++ b/arch/arm/mach-pxa/include/mach/hx4700.h
406 @@ -14,6 +14,7 @@
407
408 #include <linux/gpio.h>
409 #include <linux/mfd/asic3.h>
410 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
411
412 #define HX4700_ASIC3_GPIO_BASE PXA_NR_BUILTIN_GPIO
413 #define HX4700_EGPIO_BASE (HX4700_ASIC3_GPIO_BASE + ASIC3_NUM_GPIOS)
414 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
415 index 1f8fed94c2a4..ca8ecdee47d8 100644
416 --- a/arch/arm/mm/Kconfig
417 +++ b/arch/arm/mm/Kconfig
418 @@ -446,7 +446,6 @@ config CPU_32v5
419
420 config CPU_32v6
421 bool
422 - select CPU_USE_DOMAINS if CPU_V6 && MMU
423 select TLS_REG_EMUL if !CPU_32v6K && !MMU
424
425 config CPU_32v6K
426 @@ -671,7 +670,7 @@ config ARM_VIRT_EXT
427
428 config SWP_EMULATE
429 bool "Emulate SWP/SWPB instructions"
430 - depends on !CPU_USE_DOMAINS && CPU_V7
431 + depends on CPU_V7
432 default y if SMP
433 select HAVE_PROC_CPU if PROC_FS
434 help
435 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
436 index a623cb3ad012..b68c6b22e1c8 100644
437 --- a/arch/arm/mm/mmu.c
438 +++ b/arch/arm/mm/mmu.c
439 @@ -516,6 +516,16 @@ static void __init build_mem_type_table(void)
440 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
441
442 /*
443 + * We don't use domains on ARMv6 (since this causes problems with
444 + * v6/v7 kernels), so we must use a separate memory type for user
445 + * r/o, kernel r/w to map the vectors page.
446 + */
447 +#ifndef CONFIG_ARM_LPAE
448 + if (cpu_arch == CPU_ARCH_ARMv6)
449 + vecs_pgprot |= L_PTE_MT_VECTORS;
450 +#endif
451 +
452 + /*
453 * ARMv6 and above have extended page tables.
454 */
455 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
456 diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
457 index e3c48a3fe063..ee1d80593958 100644
458 --- a/arch/arm/mm/proc-macros.S
459 +++ b/arch/arm/mm/proc-macros.S
460 @@ -112,13 +112,9 @@
461 * 100x 1 0 1 r/o no acc
462 * 10x0 1 0 1 r/o no acc
463 * 1011 0 0 1 r/w no acc
464 - * 110x 0 1 0 r/w r/o
465 - * 11x0 0 1 0 r/w r/o
466 - * 1111 0 1 1 r/w r/w
467 - *
468 - * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
469 * 110x 1 1 1 r/o r/o
470 * 11x0 1 1 1 r/o r/o
471 + * 1111 0 1 1 r/w r/w
472 */
473 .macro armv6_mt_table pfx
474 \pfx\()_mt_table:
475 @@ -137,7 +133,7 @@
476 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
477 .long 0x00 @ unused
478 .long 0x00 @ unused
479 - .long 0x00 @ unused
480 + .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
481 .endm
482
483 .macro armv6_set_pte_ext pfx
484 @@ -158,24 +154,21 @@
485
486 tst r1, #L_PTE_USER
487 orrne r3, r3, #PTE_EXT_AP1
488 -#ifdef CONFIG_CPU_USE_DOMAINS
489 - @ allow kernel read/write access to read-only user pages
490 tstne r3, #PTE_EXT_APX
491 - bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
492 -#endif
493 +
494 + @ user read-only -> kernel read-only
495 + bicne r3, r3, #PTE_EXT_AP0
496
497 tst r1, #L_PTE_XN
498 orrne r3, r3, #PTE_EXT_XN
499
500 - orr r3, r3, r2
501 + eor r3, r3, r2
502
503 tst r1, #L_PTE_YOUNG
504 tstne r1, #L_PTE_PRESENT
505 moveq r3, #0
506 -#ifndef CONFIG_CPU_USE_DOMAINS
507 tstne r1, #L_PTE_NONE
508 movne r3, #0
509 -#endif
510
511 str r3, [r0]
512 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
513 diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
514 index bdd3be4be77a..1f52915f2b28 100644
515 --- a/arch/arm/mm/proc-v7-2level.S
516 +++ b/arch/arm/mm/proc-v7-2level.S
517 @@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
518
519 tst r1, #L_PTE_USER
520 orrne r3, r3, #PTE_EXT_AP1
521 -#ifdef CONFIG_CPU_USE_DOMAINS
522 - @ allow kernel read/write access to read-only user pages
523 - tstne r3, #PTE_EXT_APX
524 - bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
525 -#endif
526
527 tst r1, #L_PTE_XN
528 orrne r3, r3, #PTE_EXT_XN
529
530 tst r1, #L_PTE_YOUNG
531 tstne r1, #L_PTE_VALID
532 -#ifndef CONFIG_CPU_USE_DOMAINS
533 eorne r1, r1, #L_PTE_NONE
534 tstne r1, #L_PTE_NONE
535 -#endif
536 moveq r3, #0
537
538 ARM( str r3, [r0, #2048]! )
539 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
540 index aa3917c8b623..acb17e0d5e24 100644
541 --- a/arch/arm64/include/asm/pgtable.h
542 +++ b/arch/arm64/include/asm/pgtable.h
543 @@ -199,7 +199,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
544 pte_t *ptep, pte_t pte)
545 {
546 if (pte_valid_user(pte)) {
547 - if (pte_exec(pte))
548 + if (!pte_special(pte) && pte_exec(pte))
549 __sync_icache_dcache(pte, addr);
550 if (pte_dirty(pte) && pte_write(pte))
551 pte_val(pte) &= ~PTE_RDONLY;
552 @@ -286,11 +286,11 @@ static inline int has_transparent_hugepage(void)
553 * Mark the prot value as uncacheable and unbufferable.
554 */
555 #define pgprot_noncached(prot) \
556 - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
557 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
558 #define pgprot_writecombine(prot) \
559 - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
560 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
561 #define pgprot_dmacoherent(prot) \
562 - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
563 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
564 #define __HAVE_PHYS_MEM_ACCESS_PROT
565 struct file;
566 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
567 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
568 index bb8eb8a78e67..faa0e1ce59df 100644
569 --- a/arch/arm64/include/asm/unistd32.h
570 +++ b/arch/arm64/include/asm/unistd32.h
571 @@ -404,7 +404,7 @@ __SYSCALL(379, sys_finit_module)
572 __SYSCALL(380, sys_sched_setattr)
573 __SYSCALL(381, sys_sched_getattr)
574
575 -#define __NR_compat_syscalls 379
576 +#define __NR_compat_syscalls 382
577
578 /*
579 * Compat syscall numbers used by the AArch64 kernel.
580 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
581 index fa9aaf7144b7..1d4706114a45 100644
582 --- a/arch/s390/include/asm/atomic.h
583 +++ b/arch/s390/include/asm/atomic.h
584 @@ -15,23 +15,29 @@
585
586 #include <linux/compiler.h>
587 #include <linux/types.h>
588 +#include <asm/barrier.h>
589 #include <asm/cmpxchg.h>
590
591 #define ATOMIC_INIT(i) { (i) }
592
593 +#define __ATOMIC_NO_BARRIER "\n"
594 +
595 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
596
597 #define __ATOMIC_OR "lao"
598 #define __ATOMIC_AND "lan"
599 #define __ATOMIC_ADD "laa"
600 +#define __ATOMIC_BARRIER "bcr 14,0\n"
601
602 -#define __ATOMIC_LOOP(ptr, op_val, op_string) \
603 +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
604 ({ \
605 int old_val; \
606 \
607 typecheck(atomic_t *, ptr); \
608 asm volatile( \
609 + __barrier \
610 op_string " %0,%2,%1\n" \
611 + __barrier \
612 : "=d" (old_val), "+Q" ((ptr)->counter) \
613 : "d" (op_val) \
614 : "cc", "memory"); \
615 @@ -43,8 +49,9 @@
616 #define __ATOMIC_OR "or"
617 #define __ATOMIC_AND "nr"
618 #define __ATOMIC_ADD "ar"
619 +#define __ATOMIC_BARRIER "\n"
620
621 -#define __ATOMIC_LOOP(ptr, op_val, op_string) \
622 +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
623 ({ \
624 int old_val, new_val; \
625 \
626 @@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i)
627
628 static inline int atomic_add_return(int i, atomic_t *v)
629 {
630 - return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
631 + return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
632 }
633
634 static inline void atomic_add(int i, atomic_t *v)
635 @@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v)
636 : "+Q" (v->counter)
637 : "i" (i)
638 : "cc", "memory");
639 - } else {
640 - atomic_add_return(i, v);
641 + return;
642 }
643 -#else
644 - atomic_add_return(i, v);
645 #endif
646 + __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
647 }
648
649 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
650 @@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v)
651
652 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
653 {
654 - __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
655 + __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
656 }
657
658 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
659 {
660 - __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
661 + __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
662 }
663
664 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
665 @@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
666
667 #ifdef CONFIG_64BIT
668
669 +#define __ATOMIC64_NO_BARRIER "\n"
670 +
671 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
672
673 #define __ATOMIC64_OR "laog"
674 #define __ATOMIC64_AND "lang"
675 #define __ATOMIC64_ADD "laag"
676 +#define __ATOMIC64_BARRIER "bcr 14,0\n"
677
678 -#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
679 +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
680 ({ \
681 long long old_val; \
682 \
683 typecheck(atomic64_t *, ptr); \
684 asm volatile( \
685 + __barrier \
686 op_string " %0,%2,%1\n" \
687 + __barrier \
688 : "=d" (old_val), "+Q" ((ptr)->counter) \
689 : "d" (op_val) \
690 : "cc", "memory"); \
691 @@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
692 #define __ATOMIC64_OR "ogr"
693 #define __ATOMIC64_AND "ngr"
694 #define __ATOMIC64_ADD "agr"
695 +#define __ATOMIC64_BARRIER "\n"
696
697 -#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
698 +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
699 ({ \
700 long long old_val, new_val; \
701 \
702 @@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i)
703
704 static inline long long atomic64_add_return(long long i, atomic64_t *v)
705 {
706 - return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
707 + return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
708 +}
709 +
710 +static inline void atomic64_add(long long i, atomic64_t *v)
711 +{
712 +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
713 + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
714 + asm volatile(
715 + "agsi %0,%1\n"
716 + : "+Q" (v->counter)
717 + : "i" (i)
718 + : "cc", "memory");
719 + return;
720 + }
721 +#endif
722 + __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
723 }
724
725 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
726 {
727 - __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
728 + __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
729 }
730
731 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
732 {
733 - __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
734 + __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
735 }
736
737 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
738 @@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
739 } while (atomic64_cmpxchg(v, old, new) != old);
740 }
741
742 -#endif /* CONFIG_64BIT */
743 -
744 static inline void atomic64_add(long long i, atomic64_t *v)
745 {
746 -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
747 - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
748 - asm volatile(
749 - "agsi %0,%1\n"
750 - : "+Q" (v->counter)
751 - : "i" (i)
752 - : "cc", "memory");
753 - } else {
754 - atomic64_add_return(i, v);
755 - }
756 -#else
757 atomic64_add_return(i, v);
758 -#endif
759 }
760
761 +#endif /* CONFIG_64BIT */
762 +
763 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
764 {
765 long long c, old;
766 diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
767 index 6e6ad0680829..4e206123b439 100644
768 --- a/arch/s390/include/asm/bitops.h
769 +++ b/arch/s390/include/asm/bitops.h
770 @@ -47,14 +47,18 @@
771
772 #include <linux/typecheck.h>
773 #include <linux/compiler.h>
774 +#include <asm/barrier.h>
775 +
776 +#define __BITOPS_NO_BARRIER "\n"
777
778 #ifndef CONFIG_64BIT
779
780 #define __BITOPS_OR "or"
781 #define __BITOPS_AND "nr"
782 #define __BITOPS_XOR "xr"
783 +#define __BITOPS_BARRIER "\n"
784
785 -#define __BITOPS_LOOP(__addr, __val, __op_string) \
786 +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
787 ({ \
788 unsigned long __old, __new; \
789 \
790 @@ -67,7 +71,7 @@
791 " jl 0b" \
792 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
793 : "d" (__val) \
794 - : "cc"); \
795 + : "cc", "memory"); \
796 __old; \
797 })
798
799 @@ -78,17 +82,20 @@
800 #define __BITOPS_OR "laog"
801 #define __BITOPS_AND "lang"
802 #define __BITOPS_XOR "laxg"
803 +#define __BITOPS_BARRIER "bcr 14,0\n"
804
805 -#define __BITOPS_LOOP(__addr, __val, __op_string) \
806 +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
807 ({ \
808 unsigned long __old; \
809 \
810 typecheck(unsigned long *, (__addr)); \
811 asm volatile( \
812 + __barrier \
813 __op_string " %0,%2,%1\n" \
814 + __barrier \
815 : "=d" (__old), "+Q" (*(__addr)) \
816 : "d" (__val) \
817 - : "cc"); \
818 + : "cc", "memory"); \
819 __old; \
820 })
821
822 @@ -97,8 +104,9 @@
823 #define __BITOPS_OR "ogr"
824 #define __BITOPS_AND "ngr"
825 #define __BITOPS_XOR "xgr"
826 +#define __BITOPS_BARRIER "\n"
827
828 -#define __BITOPS_LOOP(__addr, __val, __op_string) \
829 +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
830 ({ \
831 unsigned long __old, __new; \
832 \
833 @@ -111,7 +119,7 @@
834 " jl 0b" \
835 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
836 : "d" (__val) \
837 - : "cc"); \
838 + : "cc", "memory"); \
839 __old; \
840 })
841
842 @@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
843 "oi %0,%b1\n"
844 : "+Q" (*caddr)
845 : "i" (1 << (nr & 7))
846 - : "cc");
847 + : "cc", "memory");
848 return;
849 }
850 #endif
851 mask = 1UL << (nr & (BITS_PER_LONG - 1));
852 - __BITOPS_LOOP(addr, mask, __BITOPS_OR);
853 + __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
854 }
855
856 static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
857 @@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
858 "ni %0,%b1\n"
859 : "+Q" (*caddr)
860 : "i" (~(1 << (nr & 7)))
861 - : "cc");
862 + : "cc", "memory");
863 return;
864 }
865 #endif
866 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
867 - __BITOPS_LOOP(addr, mask, __BITOPS_AND);
868 + __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
869 }
870
871 static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
872 @@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
873 "xi %0,%b1\n"
874 : "+Q" (*caddr)
875 : "i" (1 << (nr & 7))
876 - : "cc");
877 + : "cc", "memory");
878 return;
879 }
880 #endif
881 mask = 1UL << (nr & (BITS_PER_LONG - 1));
882 - __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
883 + __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
884 }
885
886 static inline int
887 @@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
888 unsigned long old, mask;
889
890 mask = 1UL << (nr & (BITS_PER_LONG - 1));
891 - old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
892 - barrier();
893 + old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
894 return (old & mask) != 0;
895 }
896
897 @@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
898 unsigned long old, mask;
899
900 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
901 - old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
902 - barrier();
903 + old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
904 return (old & ~mask) != 0;
905 }
906
907 @@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
908 unsigned long old, mask;
909
910 mask = 1UL << (nr & (BITS_PER_LONG - 1));
911 - old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
912 - barrier();
913 + old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
914 return (old & mask) != 0;
915 }
916
917 diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
918 index f201af8be580..31b5ca8f8c3d 100644
919 --- a/arch/s390/include/asm/ccwdev.h
920 +++ b/arch/s390/include/asm/ccwdev.h
921 @@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
922 #define to_ccwdev(n) container_of(n, struct ccw_device, dev)
923 #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
924
925 -extern struct ccw_device *ccw_device_probe_console(void);
926 +extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *);
927 extern void ccw_device_wait_idle(struct ccw_device *);
928 extern int ccw_device_force_console(struct ccw_device *);
929
930 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
931 index f6be6087a0e9..a48bc79a111f 100644
932 --- a/arch/s390/kernel/ptrace.c
933 +++ b/arch/s390/kernel/ptrace.c
934 @@ -64,7 +64,7 @@ void update_cr_regs(struct task_struct *task)
935 if (task->thread.per_flags & PER_FLAG_NO_TE)
936 cr_new &= ~(1UL << 55);
937 if (cr_new != cr)
938 - __ctl_load(cr, 0, 0);
939 + __ctl_load(cr_new, 0, 0);
940 /* Set or clear transaction execution TDC bits 62 and 63. */
941 __ctl_store(cr, 2, 2);
942 cr_new = cr & ~3UL;
943 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
944 index e0676f390d57..95f4a976c160 100644
945 --- a/arch/s390/kvm/kvm-s390.c
946 +++ b/arch/s390/kvm/kvm-s390.c
947 @@ -753,7 +753,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
948
949 if (rc == 0) {
950 if (kvm_is_ucontrol(vcpu->kvm))
951 - rc = -EOPNOTSUPP;
952 + /* Don't exit for host interrupts. */
953 + rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
954 else
955 rc = kvm_handle_sie_intercept(vcpu);
956 }
957 diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
958 index b959f5592604..8dfe645bcc4b 100644
959 --- a/arch/sh/kernel/dumpstack.c
960 +++ b/arch/sh/kernel/dumpstack.c
961 @@ -115,7 +115,7 @@ static int print_trace_stack(void *data, char *name)
962 */
963 static void print_trace_address(void *data, unsigned long addr, int reliable)
964 {
965 - printk(data);
966 + printk("%s", (char *)data);
967 printk_address(addr, reliable);
968 }
969
970 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
971 index eeda43abed6e..f8842c444560 100644
972 --- a/arch/x86/Makefile
973 +++ b/arch/x86/Makefile
974 @@ -152,6 +152,7 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI
975
976 # does binutils support specific instructions?
977 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
978 +asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
979 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
980 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
981
982 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
983 index a7677babf946..78cbb2db5a85 100644
984 --- a/arch/x86/boot/compressed/eboot.c
985 +++ b/arch/x86/boot/compressed/eboot.c
986 @@ -425,6 +425,9 @@ void setup_graphics(struct boot_params *boot_params)
987 * Because the x86 boot code expects to be passed a boot_params we
988 * need to create one ourselves (usually the bootloader would create
989 * one for us).
990 + *
991 + * The caller is responsible for filling out ->code32_start in the
992 + * returned boot_params.
993 */
994 struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
995 {
996 @@ -483,8 +486,6 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
997 hdr->vid_mode = 0xffff;
998 hdr->boot_flag = 0xAA55;
999
1000 - hdr->code32_start = (__u64)(unsigned long)image->image_base;
1001 -
1002 hdr->type_of_loader = 0x21;
1003
1004 /* Convert unicode cmdline to ascii */
1005 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
1006 index 9116aac232c7..f45ab7a36fb6 100644
1007 --- a/arch/x86/boot/compressed/head_32.S
1008 +++ b/arch/x86/boot/compressed/head_32.S
1009 @@ -50,6 +50,13 @@ ENTRY(efi_pe_entry)
1010 pushl %eax
1011 pushl %esi
1012 pushl %ecx
1013 +
1014 + call reloc
1015 +reloc:
1016 + popl %ecx
1017 + subl reloc, %ecx
1018 + movl %ecx, BP_code32_start(%eax)
1019 +
1020 sub $0x4, %esp
1021
1022 ENTRY(efi_stub_entry)
1023 @@ -63,12 +70,7 @@ ENTRY(efi_stub_entry)
1024 hlt
1025 jmp 1b
1026 2:
1027 - call 3f
1028 -3:
1029 - popl %eax
1030 - subl $3b, %eax
1031 - subl BP_pref_address(%esi), %eax
1032 - add BP_code32_start(%esi), %eax
1033 + movl BP_code32_start(%esi), %eax
1034 leal preferred_addr(%eax), %eax
1035 jmp *%eax
1036
1037 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
1038 index c5c1ae0997e7..b10fa66a2540 100644
1039 --- a/arch/x86/boot/compressed/head_64.S
1040 +++ b/arch/x86/boot/compressed/head_64.S
1041 @@ -217,6 +217,8 @@ ENTRY(efi_pe_entry)
1042 cmpq $0,%rax
1043 je 1f
1044 mov %rax, %rdx
1045 + leaq startup_32(%rip), %rax
1046 + movl %eax, BP_code32_start(%rdx)
1047 popq %rsi
1048 popq %rdi
1049
1050 @@ -230,12 +232,7 @@ ENTRY(efi_stub_entry)
1051 hlt
1052 jmp 1b
1053 2:
1054 - call 3f
1055 -3:
1056 - popq %rax
1057 - subq $3b, %rax
1058 - subq BP_pref_address(%rsi), %rax
1059 - add BP_code32_start(%esi), %eax
1060 + movl BP_code32_start(%esi), %eax
1061 leaq preferred_addr(%rax), %rax
1062 jmp *%rax
1063
1064 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
1065 index e099f9502ace..5f1296872aed 100644
1066 --- a/arch/x86/include/asm/cpufeature.h
1067 +++ b/arch/x86/include/asm/cpufeature.h
1068 @@ -217,9 +217,13 @@
1069 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
1070 #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
1071 #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
1072 +#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
1073 #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
1074 #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
1075 #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
1076 +#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
1077 +#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
1078 +#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
1079
1080 /*
1081 * BUG word(s)
1082 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
1083 index 554738963b28..6c1d7411eb00 100644
1084 --- a/arch/x86/include/asm/xsave.h
1085 +++ b/arch/x86/include/asm/xsave.h
1086 @@ -6,11 +6,14 @@
1087
1088 #define XSTATE_CPUID 0x0000000d
1089
1090 -#define XSTATE_FP 0x1
1091 -#define XSTATE_SSE 0x2
1092 -#define XSTATE_YMM 0x4
1093 -#define XSTATE_BNDREGS 0x8
1094 -#define XSTATE_BNDCSR 0x10
1095 +#define XSTATE_FP 0x1
1096 +#define XSTATE_SSE 0x2
1097 +#define XSTATE_YMM 0x4
1098 +#define XSTATE_BNDREGS 0x8
1099 +#define XSTATE_BNDCSR 0x10
1100 +#define XSTATE_OPMASK 0x20
1101 +#define XSTATE_ZMM_Hi256 0x40
1102 +#define XSTATE_Hi16_ZMM 0x80
1103
1104 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
1105
1106 @@ -23,7 +26,8 @@
1107 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
1108
1109 /* Supported features which support lazy state saving */
1110 -#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
1111 +#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
1112 + | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
1113
1114 /* Supported features which require eager state saving */
1115 #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
1116 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1117 index e6253195a301..1ffc32dbe450 100644
1118 --- a/arch/x86/kernel/ftrace.c
1119 +++ b/arch/x86/kernel/ftrace.c
1120 @@ -659,8 +659,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
1121 ret = -EPERM;
1122 goto out;
1123 }
1124 - run_sync();
1125 out:
1126 + run_sync();
1127 return ret;
1128
1129 fail_update:
1130 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
1131 index ebc987398923..af1d14a9ebda 100644
1132 --- a/arch/x86/kernel/ldt.c
1133 +++ b/arch/x86/kernel/ldt.c
1134 @@ -229,6 +229,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
1135 }
1136 }
1137
1138 + /*
1139 + * On x86-64 we do not support 16-bit segments due to
1140 + * IRET leaking the high bits of the kernel stack address.
1141 + */
1142 +#ifdef CONFIG_X86_64
1143 + if (!ldt_info.seg_32bit) {
1144 + error = -EINVAL;
1145 + goto out_unlock;
1146 + }
1147 +#endif
1148 +
1149 fill_ldt(&ldt, &ldt_info);
1150 if (oldmode)
1151 ldt.avl = 0;
1152 diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
1153 index 3056702e81fb..060cc4415bd9 100644
1154 --- a/arch/x86/lib/hash.c
1155 +++ b/arch/x86/lib/hash.c
1156 @@ -39,7 +39,11 @@
1157
1158 static inline u32 crc32_u32(u32 crc, u32 val)
1159 {
1160 +#ifdef CONFIG_AS_CRC32
1161 asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
1162 +#else
1163 + asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val));
1164 +#endif
1165 return crc;
1166 }
1167
1168 diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
1169 index a05afff50eb9..b6f6863c07f9 100644
1170 --- a/drivers/acpi/acpica/nsrepair.c
1171 +++ b/drivers/acpi/acpica/nsrepair.c
1172 @@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
1173 * this predefined name. Either one return value is expected, or none,
1174 * for both methods and other objects.
1175 *
1176 - * Exit now if there is no return object. Warning if one was expected.
1177 + * Try to fix if there was no return object. Warning if failed to fix.
1178 */
1179 if (!return_object) {
1180 if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
1181 - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
1182 - ACPI_WARN_ALWAYS,
1183 - "Missing expected return value"));
1184 + if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
1185 + ACPI_WARN_PREDEFINED((AE_INFO,
1186 + info->full_pathname,
1187 + ACPI_WARN_ALWAYS,
1188 + "Found unexpected NULL package element"));
1189 +
1190 + status =
1191 + acpi_ns_repair_null_element(info,
1192 + expected_btypes,
1193 + package_index,
1194 + return_object_ptr);
1195 + if (ACPI_SUCCESS(status)) {
1196 + return (AE_OK); /* Repair was successful */
1197 + }
1198 + } else {
1199 + ACPI_WARN_PREDEFINED((AE_INFO,
1200 + info->full_pathname,
1201 + ACPI_WARN_ALWAYS,
1202 + "Missing expected return value"));
1203 + }
1204
1205 return (AE_AML_NO_RETURN_VALUE);
1206 }
1207 diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
1208 index d777bb7cea93..a8939b98f9c3 100644
1209 --- a/drivers/block/mtip32xx/mtip32xx.c
1210 +++ b/drivers/block/mtip32xx/mtip32xx.c
1211 @@ -252,38 +252,45 @@ static void mtip_async_complete(struct mtip_port *port,
1212 void *data,
1213 int status)
1214 {
1215 - struct mtip_cmd *command;
1216 + struct mtip_cmd *cmd;
1217 struct driver_data *dd = data;
1218 - int cb_status = status ? -EIO : 0;
1219 + int unaligned, cb_status = status ? -EIO : 0;
1220 + void (*func)(void *, int);
1221
1222 if (unlikely(!dd) || unlikely(!port))
1223 return;
1224
1225 - command = &port->commands[tag];
1226 + cmd = &port->commands[tag];
1227
1228 if (unlikely(status == PORT_IRQ_TF_ERR)) {
1229 dev_warn(&port->dd->pdev->dev,
1230 "Command tag %d failed due to TFE\n", tag);
1231 }
1232
1233 + /* Clear the active flag */
1234 + atomic_set(&port->commands[tag].active, 0);
1235 +
1236 /* Upper layer callback */
1237 - if (likely(command->async_callback))
1238 - command->async_callback(command->async_data, cb_status);
1239 + func = cmd->async_callback;
1240 + if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) {
1241
1242 - command->async_callback = NULL;
1243 - command->comp_func = NULL;
1244 + /* Unmap the DMA scatter list entries */
1245 + dma_unmap_sg(&dd->pdev->dev,
1246 + cmd->sg,
1247 + cmd->scatter_ents,
1248 + cmd->direction);
1249
1250 - /* Unmap the DMA scatter list entries */
1251 - dma_unmap_sg(&dd->pdev->dev,
1252 - command->sg,
1253 - command->scatter_ents,
1254 - command->direction);
1255 + func(cmd->async_data, cb_status);
1256 + unaligned = cmd->unaligned;
1257
1258 - /* Clear the allocated and active bits for the command */
1259 - atomic_set(&port->commands[tag].active, 0);
1260 - release_slot(port, tag);
1261 + /* Clear the allocated bit for the command */
1262 + release_slot(port, tag);
1263
1264 - up(&port->cmd_slot);
1265 + if (unlikely(unaligned))
1266 + up(&port->cmd_slot_unal);
1267 + else
1268 + up(&port->cmd_slot);
1269 + }
1270 }
1271
1272 /*
1273 @@ -660,11 +667,12 @@ static void mtip_timeout_function(unsigned long int data)
1274 {
1275 struct mtip_port *port = (struct mtip_port *) data;
1276 struct host_to_dev_fis *fis;
1277 - struct mtip_cmd *command;
1278 - int tag, cmdto_cnt = 0;
1279 + struct mtip_cmd *cmd;
1280 + int unaligned, tag, cmdto_cnt = 0;
1281 unsigned int bit, group;
1282 unsigned int num_command_slots;
1283 unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
1284 + void (*func)(void *, int);
1285
1286 if (unlikely(!port))
1287 return;
1288 @@ -694,8 +702,8 @@ static void mtip_timeout_function(unsigned long int data)
1289 group = tag >> 5;
1290 bit = tag & 0x1F;
1291
1292 - command = &port->commands[tag];
1293 - fis = (struct host_to_dev_fis *) command->command;
1294 + cmd = &port->commands[tag];
1295 + fis = (struct host_to_dev_fis *) cmd->command;
1296
1297 set_bit(tag, tagaccum);
1298 cmdto_cnt++;
1299 @@ -709,27 +717,30 @@ static void mtip_timeout_function(unsigned long int data)
1300 */
1301 writel(1 << bit, port->completed[group]);
1302
1303 - /* Call the async completion callback. */
1304 - if (likely(command->async_callback))
1305 - command->async_callback(command->async_data,
1306 - -EIO);
1307 - command->async_callback = NULL;
1308 - command->comp_func = NULL;
1309 + /* Clear the active flag for the command */
1310 + atomic_set(&port->commands[tag].active, 0);
1311
1312 - /* Unmap the DMA scatter list entries */
1313 - dma_unmap_sg(&port->dd->pdev->dev,
1314 - command->sg,
1315 - command->scatter_ents,
1316 - command->direction);
1317 + func = cmd->async_callback;
1318 + if (func &&
1319 + cmpxchg(&cmd->async_callback, func, 0) == func) {
1320
1321 - /*
1322 - * Clear the allocated bit and active tag for the
1323 - * command.
1324 - */
1325 - atomic_set(&port->commands[tag].active, 0);
1326 - release_slot(port, tag);
1327 + /* Unmap the DMA scatter list entries */
1328 + dma_unmap_sg(&port->dd->pdev->dev,
1329 + cmd->sg,
1330 + cmd->scatter_ents,
1331 + cmd->direction);
1332
1333 - up(&port->cmd_slot);
1334 + func(cmd->async_data, -EIO);
1335 + unaligned = cmd->unaligned;
1336 +
1337 + /* Clear the allocated bit for the command. */
1338 + release_slot(port, tag);
1339 +
1340 + if (unaligned)
1341 + up(&port->cmd_slot_unal);
1342 + else
1343 + up(&port->cmd_slot);
1344 + }
1345 }
1346 }
1347
1348 @@ -4213,6 +4224,7 @@ skip_create_disk:
1349 blk_queue_max_hw_sectors(dd->queue, 0xffff);
1350 blk_queue_max_segment_size(dd->queue, 0x400000);
1351 blk_queue_io_min(dd->queue, 4096);
1352 + blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
1353
1354 /*
1355 * write back cache is not supported in the device. FUA depends on
1356 diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
1357 index 54174cb32feb..ffb955e7ccb9 100644
1358 --- a/drivers/block/mtip32xx/mtip32xx.h
1359 +++ b/drivers/block/mtip32xx/mtip32xx.h
1360 @@ -92,7 +92,7 @@
1361
1362 /* Driver name and version strings */
1363 #define MTIP_DRV_NAME "mtip32xx"
1364 -#define MTIP_DRV_VERSION "1.3.0"
1365 +#define MTIP_DRV_VERSION "1.3.1"
1366
1367 /* Maximum number of minor device numbers per device. */
1368 #define MTIP_MAX_MINORS 16
1369 diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
1370 index 00a3abe103a5..27c83e45eaed 100644
1371 --- a/drivers/clk/clk-s2mps11.c
1372 +++ b/drivers/clk/clk-s2mps11.c
1373 @@ -130,7 +130,7 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
1374 int i;
1375
1376 if (!iodev->dev->of_node)
1377 - return NULL;
1378 + return ERR_PTR(-EINVAL);
1379
1380 clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
1381 if (!clk_np) {
1382 diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
1383 index 166e02f16c8a..cc37c342c4cb 100644
1384 --- a/drivers/clk/tegra/clk-tegra124.c
1385 +++ b/drivers/clk/tegra/clk-tegra124.c
1386 @@ -764,7 +764,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
1387 [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
1388 [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
1389 [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
1390 - [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
1391 [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
1392 [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
1393 [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
1394 @@ -809,7 +808,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
1395 [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
1396 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
1397 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
1398 - [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
1399 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
1400 [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
1401 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
1402 @@ -952,7 +950,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
1403 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
1404 [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
1405 [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
1406 - [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
1407 };
1408
1409 static struct tegra_devclk devclks[] __initdata = {
1410 diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
1411 index ae00218b5da3..02517a8206bd 100644
1412 --- a/drivers/clk/ti/clk-44xx.c
1413 +++ b/drivers/clk/ti/clk-44xx.c
1414 @@ -222,7 +222,6 @@ static struct ti_dt_clk omap44xx_clks[] = {
1415 DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"),
1416 DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"),
1417 DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"),
1418 - DT_CLK("50000000.gpmc", "fck", "dummy_ck"),
1419 DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
1420 DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
1421 DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
1422 diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
1423 index 0ef9f581286b..08f3d1b915b3 100644
1424 --- a/drivers/clk/ti/clk-54xx.c
1425 +++ b/drivers/clk/ti/clk-54xx.c
1426 @@ -182,7 +182,6 @@ static struct ti_dt_clk omap54xx_clks[] = {
1427 DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"),
1428 DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"),
1429 DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"),
1430 - DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
1431 DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
1432 DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
1433 DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
1434 diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
1435 index 9977653f2d63..f7e40734c819 100644
1436 --- a/drivers/clk/ti/clk-7xx.c
1437 +++ b/drivers/clk/ti/clk-7xx.c
1438 @@ -262,7 +262,6 @@ static struct ti_dt_clk dra7xx_clks[] = {
1439 DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"),
1440 DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"),
1441 DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"),
1442 - DT_CLK(NULL, "gpmc_ck", "dummy_ck"),
1443 DT_CLK("omap_i2c.1", "ick", "dummy_ck"),
1444 DT_CLK("omap_i2c.2", "ick", "dummy_ck"),
1445 DT_CLK("omap_i2c.3", "ick", "dummy_ck"),
1446 diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c
1447 index b6bffbfd3be7..3a74def216a5 100644
1448 --- a/drivers/firmware/efi/efi-stub-helper.c
1449 +++ b/drivers/firmware/efi/efi-stub-helper.c
1450 @@ -468,7 +468,7 @@ grow:
1451 chunksize = EFI_READ_CHUNK_SIZE;
1452 else
1453 chunksize = size;
1454 - status = efi_call_phys3(fh->read,
1455 + status = efi_call_phys3(files[j].handle->read,
1456 files[j].handle,
1457 &chunksize,
1458 (void *)addr);
1459 @@ -480,7 +480,7 @@ grow:
1460 size -= chunksize;
1461 }
1462
1463 - efi_call_phys1(fh->close, files[j].handle);
1464 + efi_call_phys1(files[j].handle->close, files[j].handle);
1465 }
1466
1467 }
1468 @@ -497,7 +497,7 @@ free_file_total:
1469
1470 close_handles:
1471 for (k = j; k < i; k++)
1472 - efi_call_phys1(fh->close, files[k].handle);
1473 + efi_call_phys1(files[k].handle->close, files[k].handle);
1474 free_files:
1475 efi_call_phys1(sys_table_arg->boottime->free_pool, files);
1476 fail:
1477 diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
1478 index d8e398275ca8..66a492aa4f1e 100644
1479 --- a/drivers/gpu/drm/armada/armada_crtc.c
1480 +++ b/drivers/gpu/drm/armada/armada_crtc.c
1481 @@ -678,6 +678,7 @@ static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
1482 base + LCD_SPU_SRAM_WRDAT);
1483 writel_relaxed(addr | SRAM_WRITE,
1484 base + LCD_SPU_SRAM_CTRL);
1485 + readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN);
1486 addr += 1;
1487 if ((addr & 0x00ff) == 0)
1488 addr += 0xf00;
1489 diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
1490 index f2d7bf90c9fe..2e7801af466e 100644
1491 --- a/drivers/hv/connection.c
1492 +++ b/drivers/hv/connection.c
1493 @@ -55,6 +55,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
1494 case (VERSION_WIN8):
1495 return VERSION_WIN7;
1496
1497 + case (VERSION_WIN8_1):
1498 + return VERSION_WIN8;
1499 +
1500 case (VERSION_WS2008):
1501 default:
1502 return VERSION_INVAL;
1503 @@ -77,7 +80,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
1504 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
1505 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
1506 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
1507 - if (version == VERSION_WIN8)
1508 + if (version == VERSION_WIN8_1)
1509 msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
1510
1511 /*
1512 diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
1513 index 5b1aa027c034..bbba014c9939 100644
1514 --- a/drivers/iio/adc/at91_adc.c
1515 +++ b/drivers/iio/adc/at91_adc.c
1516 @@ -765,14 +765,17 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
1517 if (!pdata)
1518 return -EINVAL;
1519
1520 + st->caps = (struct at91_adc_caps *)
1521 + platform_get_device_id(pdev)->driver_data;
1522 +
1523 st->use_external = pdata->use_external_triggers;
1524 st->vref_mv = pdata->vref;
1525 st->channels_mask = pdata->channels_used;
1526 - st->num_channels = pdata->num_channels;
1527 + st->num_channels = st->caps->num_channels;
1528 st->startup_time = pdata->startup_time;
1529 st->trigger_number = pdata->trigger_number;
1530 st->trigger_list = pdata->trigger_list;
1531 - st->registers = pdata->registers;
1532 + st->registers = &st->caps->registers;
1533
1534 return 0;
1535 }
1536 @@ -1101,7 +1104,6 @@ static int at91_adc_remove(struct platform_device *pdev)
1537 return 0;
1538 }
1539
1540 -#ifdef CONFIG_OF
1541 static struct at91_adc_caps at91sam9260_caps = {
1542 .calc_startup_ticks = calc_startup_ticks_9260,
1543 .num_channels = 4,
1544 @@ -1154,11 +1156,27 @@ static const struct of_device_id at91_adc_dt_ids[] = {
1545 {},
1546 };
1547 MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
1548 -#endif
1549 +
1550 +static const struct platform_device_id at91_adc_ids[] = {
1551 + {
1552 + .name = "at91sam9260-adc",
1553 + .driver_data = (unsigned long)&at91sam9260_caps,
1554 + }, {
1555 + .name = "at91sam9g45-adc",
1556 + .driver_data = (unsigned long)&at91sam9g45_caps,
1557 + }, {
1558 + .name = "at91sam9x5-adc",
1559 + .driver_data = (unsigned long)&at91sam9x5_caps,
1560 + }, {
1561 + /* terminator */
1562 + }
1563 +};
1564 +MODULE_DEVICE_TABLE(platform, at91_adc_ids);
1565
1566 static struct platform_driver at91_adc_driver = {
1567 .probe = at91_adc_probe,
1568 .remove = at91_adc_remove,
1569 + .id_table = at91_adc_ids,
1570 .driver = {
1571 .name = DRIVER_NAME,
1572 .of_match_table = of_match_ptr(at91_adc_dt_ids),
1573 diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
1574 index c67d83bdc8f0..fe25042f056a 100644
1575 --- a/drivers/iio/industrialio-buffer.c
1576 +++ b/drivers/iio/industrialio-buffer.c
1577 @@ -165,7 +165,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
1578 int ret;
1579 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1580
1581 - ret = test_bit(to_iio_dev_attr(attr)->address,
1582 + /* Ensure ret is 0 or 1. */
1583 + ret = !!test_bit(to_iio_dev_attr(attr)->address,
1584 indio_dev->buffer->scan_mask);
1585
1586 return sprintf(buf, "%d\n", ret);
1587 @@ -866,7 +867,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
1588 if (!buffer->scan_mask)
1589 return 0;
1590
1591 - return test_bit(bit, buffer->scan_mask);
1592 + /* Ensure return value is 0 or 1. */
1593 + return !!test_bit(bit, buffer->scan_mask);
1594 };
1595 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
1596
1597 diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
1598 index 47a6dbac2d0c..d976e6ce60db 100644
1599 --- a/drivers/iio/light/cm32181.c
1600 +++ b/drivers/iio/light/cm32181.c
1601 @@ -221,6 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
1602 *val = cm32181->calibscale;
1603 return IIO_VAL_INT;
1604 case IIO_CHAN_INFO_INT_TIME:
1605 + *val = 0;
1606 ret = cm32181_read_als_it(cm32181, val2);
1607 return ret;
1608 }
1609 diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
1610 index a45e07492db3..39fc67e82138 100644
1611 --- a/drivers/iio/light/cm36651.c
1612 +++ b/drivers/iio/light/cm36651.c
1613 @@ -652,7 +652,19 @@ static int cm36651_probe(struct i2c_client *client,
1614 cm36651->client = client;
1615 cm36651->ps_client = i2c_new_dummy(client->adapter,
1616 CM36651_I2C_ADDR_PS);
1617 + if (!cm36651->ps_client) {
1618 + dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
1619 + ret = -ENODEV;
1620 + goto error_disable_reg;
1621 + }
1622 +
1623 cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
1624 + if (!cm36651->ara_client) {
1625 + dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
1626 + ret = -ENODEV;
1627 + goto error_i2c_unregister_ps;
1628 + }
1629 +
1630 mutex_init(&cm36651->lock);
1631 indio_dev->dev.parent = &client->dev;
1632 indio_dev->channels = cm36651_channels;
1633 @@ -664,7 +676,7 @@ static int cm36651_probe(struct i2c_client *client,
1634 ret = cm36651_setup_reg(cm36651);
1635 if (ret) {
1636 dev_err(&client->dev, "%s: register setup failed\n", __func__);
1637 - goto error_disable_reg;
1638 + goto error_i2c_unregister_ara;
1639 }
1640
1641 ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
1642 @@ -672,7 +684,7 @@ static int cm36651_probe(struct i2c_client *client,
1643 "cm36651", indio_dev);
1644 if (ret) {
1645 dev_err(&client->dev, "%s: request irq failed\n", __func__);
1646 - goto error_disable_reg;
1647 + goto error_i2c_unregister_ara;
1648 }
1649
1650 ret = iio_device_register(indio_dev);
1651 @@ -685,6 +697,10 @@ static int cm36651_probe(struct i2c_client *client,
1652
1653 error_free_irq:
1654 free_irq(client->irq, indio_dev);
1655 +error_i2c_unregister_ara:
1656 + i2c_unregister_device(cm36651->ara_client);
1657 +error_i2c_unregister_ps:
1658 + i2c_unregister_device(cm36651->ps_client);
1659 error_disable_reg:
1660 regulator_disable(cm36651->vled_reg);
1661 return ret;
1662 @@ -698,6 +714,8 @@ static int cm36651_remove(struct i2c_client *client)
1663 iio_device_unregister(indio_dev);
1664 regulator_disable(cm36651->vled_reg);
1665 free_irq(client->irq, indio_dev);
1666 + i2c_unregister_device(cm36651->ps_client);
1667 + i2c_unregister_device(cm36651->ara_client);
1668
1669 return 0;
1670 }
1671 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
1672 index 0601b9daf840..c3239170d8b7 100644
1673 --- a/drivers/infiniband/core/cm.c
1674 +++ b/drivers/infiniband/core/cm.c
1675 @@ -349,23 +349,6 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
1676 grh, &av->ah_attr);
1677 }
1678
1679 -int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
1680 -{
1681 - struct cm_id_private *cm_id_priv;
1682 -
1683 - cm_id_priv = container_of(id, struct cm_id_private, id);
1684 -
1685 - if (smac != NULL)
1686 - memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));
1687 -
1688 - if (alt_smac != NULL)
1689 - memcpy(cm_id_priv->alt_av.smac, alt_smac,
1690 - sizeof(cm_id_priv->alt_av.smac));
1691 -
1692 - return 0;
1693 -}
1694 -EXPORT_SYMBOL(ib_update_cm_av);
1695 -
1696 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
1697 {
1698 struct cm_device *cm_dev;
1699 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1700 index 199958d9ddc8..42c3058e6e9c 100644
1701 --- a/drivers/infiniband/core/cma.c
1702 +++ b/drivers/infiniband/core/cma.c
1703 @@ -1284,15 +1284,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1704 struct rdma_id_private *listen_id, *conn_id;
1705 struct rdma_cm_event event;
1706 int offset, ret;
1707 - u8 smac[ETH_ALEN];
1708 - u8 alt_smac[ETH_ALEN];
1709 - u8 *psmac = smac;
1710 - u8 *palt_smac = alt_smac;
1711 - int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
1712 - RDMA_TRANSPORT_IB) &&
1713 - (rdma_port_get_link_layer(cm_id->device,
1714 - ib_event->param.req_rcvd.port) ==
1715 - IB_LINK_LAYER_ETHERNET));
1716
1717 listen_id = cm_id->context;
1718 if (!cma_check_req_qp_type(&listen_id->id, ib_event))
1719 @@ -1336,28 +1327,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1720 ret = conn_id->id.event_handler(&conn_id->id, &event);
1721 if (ret)
1722 goto err3;
1723 -
1724 - if (is_iboe) {
1725 - if (ib_event->param.req_rcvd.primary_path != NULL)
1726 - rdma_addr_find_smac_by_sgid(
1727 - &ib_event->param.req_rcvd.primary_path->sgid,
1728 - psmac, NULL);
1729 - else
1730 - psmac = NULL;
1731 - if (ib_event->param.req_rcvd.alternate_path != NULL)
1732 - rdma_addr_find_smac_by_sgid(
1733 - &ib_event->param.req_rcvd.alternate_path->sgid,
1734 - palt_smac, NULL);
1735 - else
1736 - palt_smac = NULL;
1737 - }
1738 /*
1739 * Acquire mutex to prevent user executing rdma_destroy_id()
1740 * while we're accessing the cm_id.
1741 */
1742 mutex_lock(&lock);
1743 - if (is_iboe)
1744 - ib_update_cm_av(cm_id, psmac, palt_smac);
1745 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1746 (conn_id->id.qp_type != IB_QPT_UD))
1747 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1748 diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
1749 index 212150c25ea0..8cc837537768 100644
1750 --- a/drivers/infiniband/hw/ehca/ehca_cq.c
1751 +++ b/drivers/infiniband/hw/ehca/ehca_cq.c
1752 @@ -283,6 +283,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
1753 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
1754 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1755 ehca_err(device, "Copy to udata failed.");
1756 + cq = ERR_PTR(-EFAULT);
1757 goto create_cq_exit4;
1758 }
1759 }
1760 diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
1761 index 714293b78518..e2f9a51f4a38 100644
1762 --- a/drivers/infiniband/hw/ipath/ipath_diag.c
1763 +++ b/drivers/infiniband/hw/ipath/ipath_diag.c
1764 @@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1765 size_t count, loff_t *off)
1766 {
1767 u32 __iomem *piobuf;
1768 - u32 plen, clen, pbufn;
1769 + u32 plen, pbufn, maxlen_reserve;
1770 struct ipath_diag_pkt odp;
1771 struct ipath_diag_xpkt dp;
1772 u32 *tmpbuf = NULL;
1773 @@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1774 u64 val;
1775 u32 l_state, lt_state; /* LinkState, LinkTrainingState */
1776
1777 - if (count < sizeof(odp)) {
1778 - ret = -EINVAL;
1779 - goto bail;
1780 - }
1781
1782 if (count == sizeof(dp)) {
1783 if (copy_from_user(&dp, data, sizeof(dp))) {
1784 ret = -EFAULT;
1785 goto bail;
1786 }
1787 - } else if (copy_from_user(&odp, data, sizeof(odp))) {
1788 - ret = -EFAULT;
1789 + } else if (count == sizeof(odp)) {
1790 + if (copy_from_user(&odp, data, sizeof(odp))) {
1791 + ret = -EFAULT;
1792 + goto bail;
1793 + }
1794 + } else {
1795 + ret = -EINVAL;
1796 goto bail;
1797 }
1798
1799 - /*
1800 - * Due to padding/alignment issues (lessened with new struct)
1801 - * the old and new structs are the same length. We need to
1802 - * disambiguate them, which we can do because odp.len has never
1803 - * been less than the total of LRH+BTH+DETH so far, while
1804 - * dp.unit (same offset) unit is unlikely to get that high.
1805 - * Similarly, dp.data, the pointer to user at the same offset
1806 - * as odp.unit, is almost certainly at least one (512byte)page
1807 - * "above" NULL. The if-block below can be omitted if compatibility
1808 - * between a new driver and older diagnostic code is unimportant.
1809 - * compatibility the other direction (new diags, old driver) is
1810 - * handled in the diagnostic code, with a warning.
1811 - */
1812 - if (dp.unit >= 20 && dp.data < 512) {
1813 - /* very probable version mismatch. Fix it up */
1814 - memcpy(&odp, &dp, sizeof(odp));
1815 - /* We got a legacy dp, copy elements to dp */
1816 - dp.unit = odp.unit;
1817 - dp.data = odp.data;
1818 - dp.len = odp.len;
1819 - dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
1820 - }
1821 -
1822 /* send count must be an exact number of dwords */
1823 if (dp.len & 3) {
1824 ret = -EINVAL;
1825 goto bail;
1826 }
1827
1828 - clen = dp.len >> 2;
1829 + plen = dp.len >> 2;
1830
1831 dd = ipath_lookup(dp.unit);
1832 if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
1833 @@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1834 goto bail;
1835 }
1836
1837 - /* need total length before first word written */
1838 - /* +1 word is for the qword padding */
1839 - plen = sizeof(u32) + dp.len;
1840 -
1841 - if ((plen + 4) > dd->ipath_ibmaxlen) {
1842 + /*
1843 + * need total length before first word written, plus 2 Dwords. One Dword
1844 + * is for padding so we get the full user data when not aligned on
1845 + * a word boundary. The other Dword is to make sure we have room for the
1846 + * ICRC which gets tacked on later.
1847 + */
1848 + maxlen_reserve = 2 * sizeof(u32);
1849 + if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
1850 ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
1851 - plen - 4, dd->ipath_ibmaxlen);
1852 + dp.len, dd->ipath_ibmaxlen);
1853 ret = -EINVAL;
1854 - goto bail; /* before writing pbc */
1855 + goto bail;
1856 }
1857 +
1858 + plen = sizeof(u32) + dp.len;
1859 +
1860 tmpbuf = vmalloc(plen);
1861 if (!tmpbuf) {
1862 dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
1863 @@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
1864 */
1865 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
1866 ipath_flush_wc();
1867 - __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
1868 + __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
1869 ipath_flush_wc();
1870 - __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
1871 + __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
1872 } else
1873 - __iowrite32_copy(piobuf + 2, tmpbuf, clen);
1874 + __iowrite32_copy(piobuf + 2, tmpbuf, plen);
1875
1876 ipath_flush_wc();
1877
1878 diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
1879 index 5b71d43bd89c..42dde06fdb91 100644
1880 --- a/drivers/infiniband/hw/mthca/mthca_provider.c
1881 +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
1882 @@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
1883
1884 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
1885 mthca_free_cq(to_mdev(ibdev), cq);
1886 + err = -EFAULT;
1887 goto err_free;
1888 }
1889
1890 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
1891 index 8308e3634767..eb624611f94b 100644
1892 --- a/drivers/infiniband/hw/nes/nes_verbs.c
1893 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
1894 @@ -1186,7 +1186,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1895 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1896 kfree(nesqp->allocated_buffer);
1897 nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
1898 - return NULL;
1899 + return ERR_PTR(-EFAULT);
1900 }
1901 if (req.user_wqe_buffers) {
1902 virt_wqs = 1;
1903 diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
1904 index 275f247f9fca..2023cd61b897 100644
1905 --- a/drivers/infiniband/hw/qib/qib_file_ops.c
1906 +++ b/drivers/infiniband/hw/qib/qib_file_ops.c
1907 @@ -1578,7 +1578,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
1908 struct qib_ctxtdata *rcd = fd->rcd;
1909 struct qib_devdata *dd = rcd->dd;
1910
1911 - if (dd->flags & QIB_HAS_SEND_DMA)
1912 + if (dd->flags & QIB_HAS_SEND_DMA) {
1913
1914 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1915 dd->unit,
1916 @@ -1586,6 +1586,7 @@ static int do_qib_user_sdma_queue_create(struct file *fp)
1917 fd->subctxt);
1918 if (!fd->pq)
1919 return -ENOMEM;
1920 + }
1921
1922 return 0;
1923 }
1924 diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
1925 index 24e802f4ea2f..76c3e177164d 100644
1926 --- a/drivers/infiniband/hw/qib/qib_init.c
1927 +++ b/drivers/infiniband/hw/qib/qib_init.c
1928 @@ -1097,14 +1097,10 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1929 int ret;
1930
1931 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
1932 - if (!dd) {
1933 - dd = ERR_PTR(-ENOMEM);
1934 - goto bail;
1935 - }
1936 + if (!dd)
1937 + return ERR_PTR(-ENOMEM);
1938
1939 -#ifdef CONFIG_DEBUG_FS
1940 - qib_dbg_ibdev_init(&dd->verbs_dev);
1941 -#endif
1942 + INIT_LIST_HEAD(&dd->list);
1943
1944 idr_preload(GFP_KERNEL);
1945 spin_lock_irqsave(&qib_devs_lock, flags);
1946 @@ -1121,11 +1117,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1947 if (ret < 0) {
1948 qib_early_err(&pdev->dev,
1949 "Could not allocate unit ID: error %d\n", -ret);
1950 -#ifdef CONFIG_DEBUG_FS
1951 - qib_dbg_ibdev_exit(&dd->verbs_dev);
1952 -#endif
1953 - ib_dealloc_device(&dd->verbs_dev.ibdev);
1954 - dd = ERR_PTR(ret);
1955 goto bail;
1956 }
1957
1958 @@ -1139,9 +1130,15 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1959 qib_early_err(&pdev->dev,
1960 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1961 }
1962 -
1963 -bail:
1964 +#ifdef CONFIG_DEBUG_FS
1965 + qib_dbg_ibdev_init(&dd->verbs_dev);
1966 +#endif
1967 return dd;
1968 +bail:
1969 + if (!list_empty(&dd->list))
1970 + list_del_init(&dd->list);
1971 + ib_dealloc_device(&dd->verbs_dev.ibdev);
1972 + return ERR_PTR(ret);;
1973 }
1974
1975 /*
1976 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1977 index 8ee228e9ab5a..f596b413a35b 100644
1978 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1979 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1980 @@ -436,11 +436,18 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
1981 {
1982 struct fast_reg_descriptor *fr_desc;
1983 struct isert_device *device = isert_conn->conn_device;
1984 - int i, ret;
1985 + struct se_session *se_sess = isert_conn->conn->sess->se_sess;
1986 + struct se_node_acl *se_nacl = se_sess->se_node_acl;
1987 + int i, ret, tag_num;
1988 + /*
1989 + * Setup the number of FRMRs based upon the number of tags
1990 + * available to session in iscsi_target_locate_portal().
1991 + */
1992 + tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
1993 + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
1994
1995 - INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
1996 isert_conn->conn_fr_pool_size = 0;
1997 - for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
1998 + for (i = 0; i < tag_num; i++) {
1999 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
2000 if (!fr_desc) {
2001 pr_err("Failed to allocate fast_reg descriptor\n");
2002 @@ -498,6 +505,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2003 kref_get(&isert_conn->conn_kref);
2004 mutex_init(&isert_conn->conn_mutex);
2005 spin_lock_init(&isert_conn->conn_lock);
2006 + INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
2007
2008 cma_id->context = isert_conn;
2009 isert_conn->conn_cm_id = cma_id;
2010 @@ -569,15 +577,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2011 goto out_mr;
2012 }
2013
2014 - if (device->use_fastreg) {
2015 - ret = isert_conn_create_fastreg_pool(isert_conn);
2016 - if (ret) {
2017 - pr_err("Conn: %p failed to create fastreg pool\n",
2018 - isert_conn);
2019 - goto out_fastreg;
2020 - }
2021 - }
2022 -
2023 ret = isert_conn_setup_qp(isert_conn, cma_id);
2024 if (ret)
2025 goto out_conn_dev;
2026 @@ -591,9 +590,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
2027 return 0;
2028
2029 out_conn_dev:
2030 - if (device->use_fastreg)
2031 - isert_conn_free_fastreg_pool(isert_conn);
2032 -out_fastreg:
2033 ib_dereg_mr(isert_conn->conn_mr);
2034 out_mr:
2035 ib_dealloc_pd(isert_conn->conn_pd);
2036 @@ -967,6 +963,15 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
2037 }
2038 if (!login->login_failed) {
2039 if (login->login_complete) {
2040 + if (isert_conn->conn_device->use_fastreg) {
2041 + ret = isert_conn_create_fastreg_pool(isert_conn);
2042 + if (ret) {
2043 + pr_err("Conn: %p failed to create"
2044 + " fastreg pool\n", isert_conn);
2045 + return ret;
2046 + }
2047 + }
2048 +
2049 ret = isert_alloc_rx_descriptors(isert_conn);
2050 if (ret)
2051 return ret;
2052 @@ -1451,7 +1456,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
2053 }
2054
2055 static void
2056 -isert_put_cmd(struct isert_cmd *isert_cmd)
2057 +isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
2058 {
2059 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2060 struct isert_conn *isert_conn = isert_cmd->conn;
2061 @@ -1467,8 +1472,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
2062 list_del_init(&cmd->i_conn_node);
2063 spin_unlock_bh(&conn->cmd_lock);
2064
2065 - if (cmd->data_direction == DMA_TO_DEVICE)
2066 + if (cmd->data_direction == DMA_TO_DEVICE) {
2067 iscsit_stop_dataout_timer(cmd);
2068 + /*
2069 + * Check for special case during comp_err where
2070 + * WRITE_PENDING has been handed off from core,
2071 + * but requires an extra target_put_sess_cmd()
2072 + * before transport_generic_free_cmd() below.
2073 + */
2074 + if (comp_err &&
2075 + cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
2076 + struct se_cmd *se_cmd = &cmd->se_cmd;
2077 +
2078 + target_put_sess_cmd(se_cmd->se_sess, se_cmd);
2079 + }
2080 + }
2081
2082 device->unreg_rdma_mem(isert_cmd, isert_conn);
2083 transport_generic_free_cmd(&cmd->se_cmd, 0);
2084 @@ -1523,7 +1541,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
2085
2086 static void
2087 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
2088 - struct ib_device *ib_dev)
2089 + struct ib_device *ib_dev, bool comp_err)
2090 {
2091 if (isert_cmd->pdu_buf_dma != 0) {
2092 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
2093 @@ -1533,7 +1551,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
2094 }
2095
2096 isert_unmap_tx_desc(tx_desc, ib_dev);
2097 - isert_put_cmd(isert_cmd);
2098 + isert_put_cmd(isert_cmd, comp_err);
2099 }
2100
2101 static void
2102 @@ -1577,14 +1595,14 @@ isert_do_control_comp(struct work_struct *work)
2103 iscsit_tmr_post_handler(cmd, cmd->conn);
2104
2105 cmd->i_state = ISTATE_SENT_STATUS;
2106 - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
2107 + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
2108 break;
2109 case ISTATE_SEND_REJECT:
2110 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
2111 atomic_dec(&isert_conn->post_send_buf_count);
2112
2113 cmd->i_state = ISTATE_SENT_STATUS;
2114 - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
2115 + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
2116 break;
2117 case ISTATE_SEND_LOGOUTRSP:
2118 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
2119 @@ -1598,7 +1616,7 @@ isert_do_control_comp(struct work_struct *work)
2120 case ISTATE_SEND_TEXTRSP:
2121 atomic_dec(&isert_conn->post_send_buf_count);
2122 cmd->i_state = ISTATE_SENT_STATUS;
2123 - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
2124 + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
2125 break;
2126 default:
2127 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
2128 @@ -1629,7 +1647,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
2129 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2130
2131 cmd->i_state = ISTATE_SENT_STATUS;
2132 - isert_completion_put(tx_desc, isert_cmd, ib_dev);
2133 + isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
2134 }
2135
2136 static void
2137 @@ -1710,7 +1728,7 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
2138 wr = &t->isert_cmd->rdma_wr;
2139
2140 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2141 - isert_completion_put(t, t->isert_cmd, ib_dev);
2142 + isert_completion_put(t, t->isert_cmd, ib_dev, true);
2143 }
2144 }
2145
2146 @@ -1729,14 +1747,14 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
2147 wr = &t->isert_cmd->rdma_wr;
2148
2149 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2150 - isert_completion_put(t, t->isert_cmd, ib_dev);
2151 + isert_completion_put(t, t->isert_cmd, ib_dev, true);
2152 }
2153 tx_desc->comp_llnode_batch = NULL;
2154
2155 if (!isert_cmd)
2156 isert_unmap_tx_desc(tx_desc, ib_dev);
2157 else
2158 - isert_completion_put(tx_desc, isert_cmd, ib_dev);
2159 + isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
2160 }
2161
2162 static void
2163 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
2164 index 0e537d8d0e47..d1078ce73095 100644
2165 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
2166 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
2167 @@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2168 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2169 struct srpt_send_ioctx *ioctx)
2170 {
2171 + struct ib_device *dev = ch->sport->sdev->device;
2172 struct se_cmd *cmd;
2173 struct scatterlist *sg, *sg_orig;
2174 int sg_cnt;
2175 @@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2176
2177 db = ioctx->rbufs;
2178 tsize = cmd->data_length;
2179 - dma_len = sg_dma_len(&sg[0]);
2180 + dma_len = ib_sg_dma_len(dev, &sg[0]);
2181 riu = ioctx->rdma_ius;
2182
2183 /*
2184 @@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2185 ++j;
2186 if (j < count) {
2187 sg = sg_next(sg);
2188 - dma_len = sg_dma_len(sg);
2189 + dma_len = ib_sg_dma_len(
2190 + dev, sg);
2191 }
2192 }
2193 } else {
2194 @@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2195 tsize = cmd->data_length;
2196 riu = ioctx->rdma_ius;
2197 sg = sg_orig;
2198 - dma_len = sg_dma_len(&sg[0]);
2199 - dma_addr = sg_dma_address(&sg[0]);
2200 + dma_len = ib_sg_dma_len(dev, &sg[0]);
2201 + dma_addr = ib_sg_dma_address(dev, &sg[0]);
2202
2203 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
2204 for (i = 0, j = 0;
2205 @@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
2206 ++j;
2207 if (j < count) {
2208 sg = sg_next(sg);
2209 - dma_len = sg_dma_len(sg);
2210 - dma_addr = sg_dma_address(sg);
2211 + dma_len = ib_sg_dma_len(
2212 + dev, sg);
2213 + dma_addr = ib_sg_dma_address(
2214 + dev, sg);
2215 }
2216 }
2217 } else {
2218 diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
2219 index b2351466b0da..32cffca14d0b 100644
2220 --- a/drivers/media/dvb-frontends/m88rs2000.c
2221 +++ b/drivers/media/dvb-frontends/m88rs2000.c
2222 @@ -715,6 +715,22 @@ static int m88rs2000_get_frontend(struct dvb_frontend *fe)
2223 return 0;
2224 }
2225
2226 +static int m88rs2000_get_tune_settings(struct dvb_frontend *fe,
2227 + struct dvb_frontend_tune_settings *tune)
2228 +{
2229 + struct dtv_frontend_properties *c = &fe->dtv_property_cache;
2230 +
2231 + if (c->symbol_rate > 3000000)
2232 + tune->min_delay_ms = 2000;
2233 + else
2234 + tune->min_delay_ms = 3000;
2235 +
2236 + tune->step_size = c->symbol_rate / 16000;
2237 + tune->max_drift = c->symbol_rate / 2000;
2238 +
2239 + return 0;
2240 +}
2241 +
2242 static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
2243 {
2244 struct m88rs2000_state *state = fe->demodulator_priv;
2245 @@ -746,7 +762,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
2246 .symbol_rate_tolerance = 500, /* ppm */
2247 .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
2248 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
2249 - FE_CAN_QPSK |
2250 + FE_CAN_QPSK | FE_CAN_INVERSION_AUTO |
2251 FE_CAN_FEC_AUTO
2252 },
2253
2254 @@ -766,6 +782,7 @@ static struct dvb_frontend_ops m88rs2000_ops = {
2255
2256 .set_frontend = m88rs2000_set_frontend,
2257 .get_frontend = m88rs2000_get_frontend,
2258 + .get_tune_settings = m88rs2000_get_tune_settings,
2259 };
2260
2261 struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config,
2262 diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
2263 index c9b2350e92c8..6e4bdb90aa92 100644
2264 --- a/drivers/media/pci/saa7134/saa7134-cards.c
2265 +++ b/drivers/media/pci/saa7134/saa7134-cards.c
2266 @@ -8045,8 +8045,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
2267 break;
2268 } /* switch() */
2269
2270 - /* initialize tuner */
2271 - if (TUNER_ABSENT != dev->tuner_type) {
2272 + /* initialize tuner (don't do this when resuming) */
2273 + if (!dev->insuspend && TUNER_ABSENT != dev->tuner_type) {
2274 int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
2275
2276 /* Note: radio tuner address is always filled in,
2277 diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
2278 index 1c776c1186f1..1dbff1472809 100644
2279 --- a/drivers/media/platform/omap3isp/isppreview.c
2280 +++ b/drivers/media/platform/omap3isp/isppreview.c
2281 @@ -1079,6 +1079,7 @@ static void preview_config_input_format(struct isp_prev_device *prev,
2282 */
2283 static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
2284 {
2285 + const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
2286 struct isp_device *isp = to_isp_device(prev);
2287 unsigned int sph = prev->crop.left;
2288 unsigned int eph = prev->crop.left + prev->crop.width - 1;
2289 @@ -1086,6 +1087,14 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
2290 unsigned int elv = prev->crop.top + prev->crop.height - 1;
2291 u32 features;
2292
2293 + if (format->code != V4L2_MBUS_FMT_Y8_1X8 &&
2294 + format->code != V4L2_MBUS_FMT_Y10_1X10) {
2295 + sph -= 2;
2296 + eph += 2;
2297 + slv -= 2;
2298 + elv += 2;
2299 + }
2300 +
2301 features = (prev->params.params[0].features & active)
2302 | (prev->params.params[1].features & ~active);
2303
2304 diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
2305 index 05e9bd11a3ff..dfdfa772eb1e 100644
2306 --- a/drivers/media/usb/em28xx/em28xx-audio.c
2307 +++ b/drivers/media/usb/em28xx/em28xx-audio.c
2308 @@ -252,7 +252,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
2309 {
2310 struct em28xx *dev = snd_pcm_substream_chip(substream);
2311 struct snd_pcm_runtime *runtime = substream->runtime;
2312 - int ret = 0;
2313 + int nonblock, ret = 0;
2314
2315 if (!dev) {
2316 em28xx_err("BUG: em28xx can't find device struct."
2317 @@ -265,15 +265,15 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
2318
2319 dprintk("opening device and trying to acquire exclusive lock\n");
2320
2321 + nonblock = !!(substream->f_flags & O_NONBLOCK);
2322 + if (nonblock) {
2323 + if (!mutex_trylock(&dev->lock))
2324 + return -EAGAIN;
2325 + } else
2326 + mutex_lock(&dev->lock);
2327 +
2328 runtime->hw = snd_em28xx_hw_capture;
2329 if ((dev->alt == 0 || dev->is_audio_only) && dev->adev.users == 0) {
2330 - int nonblock = !!(substream->f_flags & O_NONBLOCK);
2331 -
2332 - if (nonblock) {
2333 - if (!mutex_trylock(&dev->lock))
2334 - return -EAGAIN;
2335 - } else
2336 - mutex_lock(&dev->lock);
2337 if (dev->is_audio_only)
2338 /* vendor audio is on a separate interface */
2339 dev->alt = 1;
2340 @@ -299,11 +299,11 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
2341 ret = em28xx_audio_analog_set(dev);
2342 if (ret < 0)
2343 goto err;
2344 -
2345 - dev->adev.users++;
2346 - mutex_unlock(&dev->lock);
2347 }
2348
2349 + dev->adev.users++;
2350 + mutex_unlock(&dev->lock);
2351 +
2352 /* Dynamically adjust the period size */
2353 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
2354 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
2355 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
2356 index a0a669e81362..1373cfa4e974 100644
2357 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
2358 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
2359 @@ -693,7 +693,8 @@ static void pctv_520e_init(struct em28xx *dev)
2360 static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
2361 {
2362 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
2363 - struct em28xx *dev = fe->dvb->priv;
2364 + struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
2365 + struct em28xx *dev = i2c_bus->dev;
2366 #ifdef CONFIG_GPIOLIB
2367 struct em28xx_dvb *dvb = dev->dvb;
2368 int ret;
2369 diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
2370 index 2a38621cf718..41a9a892f79c 100644
2371 --- a/drivers/media/usb/gspca/sn9c20x.c
2372 +++ b/drivers/media/usb/gspca/sn9c20x.c
2373 @@ -2359,6 +2359,7 @@ static const struct usb_device_id device_table[] = {
2374 {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
2375 {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
2376 {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
2377 + {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
2378 {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
2379 {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
2380 {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
2381 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
2382 index 898c208889cd..e32d6a59eaca 100644
2383 --- a/drivers/media/usb/uvc/uvc_video.c
2384 +++ b/drivers/media/usb/uvc/uvc_video.c
2385 @@ -1847,7 +1847,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
2386
2387 if (!enable) {
2388 uvc_uninit_video(stream, 1);
2389 - usb_set_interface(stream->dev->udev, stream->intfnum, 0);
2390 + if (stream->intf->num_altsetting > 1) {
2391 + usb_set_interface(stream->dev->udev,
2392 + stream->intfnum, 0);
2393 + } else {
2394 + /* UVC doesn't specify how to inform a bulk-based device
2395 + * when the video stream is stopped. Windows sends a
2396 + * CLEAR_FEATURE(HALT) request to the video streaming
2397 + * bulk endpoint, mimic the same behaviour.
2398 + */
2399 + unsigned int epnum = stream->header.bEndpointAddress
2400 + & USB_ENDPOINT_NUMBER_MASK;
2401 + unsigned int dir = stream->header.bEndpointAddress
2402 + & USB_ENDPOINT_DIR_MASK;
2403 + unsigned int pipe;
2404 +
2405 + pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
2406 + usb_clear_halt(stream->dev->udev, pipe);
2407 + }
2408 +
2409 uvc_queue_enable(&stream->queue, 0);
2410 uvc_video_clock_cleanup(stream);
2411 return 0;
2412 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2413 index 8f7a6a454a4c..b63a5e584aa0 100644
2414 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2415 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
2416 @@ -787,8 +787,8 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
2417 #define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
2418 #define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
2419 #define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
2420 -#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 63, struct v4l2_subdev_edid32)
2421 -#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 64, struct v4l2_subdev_edid32)
2422 +#define VIDIOC_SUBDEV_G_EDID32 _IOWR('V', 40, struct v4l2_subdev_edid32)
2423 +#define VIDIOC_SUBDEV_S_EDID32 _IOWR('V', 41, struct v4l2_subdev_edid32)
2424 #define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
2425 #define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
2426 #define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
2427 diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
2428 index f7902fe8a526..a2e257970fec 100644
2429 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c
2430 +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
2431 @@ -26,6 +26,10 @@
2432 #include <linux/v4l2-dv-timings.h>
2433 #include <media/v4l2-dv-timings.h>
2434
2435 +MODULE_AUTHOR("Hans Verkuil");
2436 +MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
2437 +MODULE_LICENSE("GPL");
2438 +
2439 const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
2440 V4L2_DV_BT_CEA_640X480P59_94,
2441 V4L2_DV_BT_CEA_720X480I59_94,
2442 diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
2443 index 7dca1e640970..841717a2842c 100644
2444 --- a/drivers/mfd/88pm800.c
2445 +++ b/drivers/mfd/88pm800.c
2446 @@ -571,7 +571,7 @@ static int pm800_probe(struct i2c_client *client,
2447 ret = pm800_pages_init(chip);
2448 if (ret) {
2449 dev_err(&client->dev, "pm800_pages_init failed!\n");
2450 - goto err_page_init;
2451 + goto err_device_init;
2452 }
2453
2454 ret = device_800_init(chip, pdata);
2455 @@ -587,7 +587,6 @@ static int pm800_probe(struct i2c_client *client,
2456
2457 err_device_init:
2458 pm800_pages_exit(chip);
2459 -err_page_init:
2460 err_subchip_alloc:
2461 pm80x_deinit();
2462 out_init:
2463 diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
2464 index c9b1f6422941..bcfc9e85b4a0 100644
2465 --- a/drivers/mfd/88pm860x-core.c
2466 +++ b/drivers/mfd/88pm860x-core.c
2467 @@ -1179,12 +1179,18 @@ static int pm860x_probe(struct i2c_client *client,
2468 chip->companion_addr = pdata->companion_addr;
2469 chip->companion = i2c_new_dummy(chip->client->adapter,
2470 chip->companion_addr);
2471 + if (!chip->companion) {
2472 + dev_err(&client->dev,
2473 + "Failed to allocate I2C companion device\n");
2474 + return -ENODEV;
2475 + }
2476 chip->regmap_companion = regmap_init_i2c(chip->companion,
2477 &pm860x_regmap_config);
2478 if (IS_ERR(chip->regmap_companion)) {
2479 ret = PTR_ERR(chip->regmap_companion);
2480 dev_err(&chip->companion->dev,
2481 "Failed to allocate register map: %d\n", ret);
2482 + i2c_unregister_device(chip->companion);
2483 return ret;
2484 }
2485 i2c_set_clientdata(chip->companion, chip);
2486 diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
2487 index d3e23278d299..38917a822335 100644
2488 --- a/drivers/mfd/kempld-core.c
2489 +++ b/drivers/mfd/kempld-core.c
2490 @@ -322,9 +322,12 @@ static int kempld_detect_device(struct kempld_device_data *pld)
2491 return -ENODEV;
2492 }
2493
2494 - /* Release hardware mutex if aquired */
2495 - if (!(index_reg & KEMPLD_MUTEX_KEY))
2496 + /* Release hardware mutex if acquired */
2497 + if (!(index_reg & KEMPLD_MUTEX_KEY)) {
2498 iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
2499 + /* PXT and COMe-cPC2 boards may require a second release */
2500 + iowrite8(KEMPLD_MUTEX_KEY, pld->io_index);
2501 + }
2502
2503 mutex_unlock(&pld->lock);
2504
2505 diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
2506 index f53d5823a3f7..e5fce765accb 100644
2507 --- a/drivers/mfd/max77686.c
2508 +++ b/drivers/mfd/max77686.c
2509 @@ -121,6 +121,10 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
2510 dev_info(max77686->dev, "device found\n");
2511
2512 max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
2513 + if (!max77686->rtc) {
2514 + dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n");
2515 + return -ENODEV;
2516 + }
2517 i2c_set_clientdata(max77686->rtc, max77686);
2518
2519 max77686_irq_init(max77686);
2520 diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
2521 index e0859987ab6b..c5535f018466 100644
2522 --- a/drivers/mfd/max77693.c
2523 +++ b/drivers/mfd/max77693.c
2524 @@ -148,9 +148,18 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
2525 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
2526
2527 max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
2528 + if (!max77693->muic) {
2529 + dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
2530 + return -ENODEV;
2531 + }
2532 i2c_set_clientdata(max77693->muic, max77693);
2533
2534 max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
2535 + if (!max77693->haptic) {
2536 + dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
2537 + ret = -ENODEV;
2538 + goto err_i2c_haptic;
2539 + }
2540 i2c_set_clientdata(max77693->haptic, max77693);
2541
2542 /*
2543 @@ -184,8 +193,9 @@ err_mfd:
2544 max77693_irq_exit(max77693);
2545 err_irq:
2546 err_regmap_muic:
2547 - i2c_unregister_device(max77693->muic);
2548 i2c_unregister_device(max77693->haptic);
2549 +err_i2c_haptic:
2550 + i2c_unregister_device(max77693->muic);
2551 return ret;
2552 }
2553
2554 diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
2555 index 176aa26fc787..a83eed5c15ca 100644
2556 --- a/drivers/mfd/max8925-i2c.c
2557 +++ b/drivers/mfd/max8925-i2c.c
2558 @@ -181,9 +181,18 @@ static int max8925_probe(struct i2c_client *client,
2559 mutex_init(&chip->io_lock);
2560
2561 chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
2562 + if (!chip->rtc) {
2563 + dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
2564 + return -ENODEV;
2565 + }
2566 i2c_set_clientdata(chip->rtc, chip);
2567
2568 chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
2569 + if (!chip->adc) {
2570 + dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
2571 + i2c_unregister_device(chip->rtc);
2572 + return -ENODEV;
2573 + }
2574 i2c_set_clientdata(chip->adc, chip);
2575
2576 device_init_wakeup(&client->dev, 1);
2577 diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
2578 index 5adede0fb04c..8cf7a015cfe5 100644
2579 --- a/drivers/mfd/max8997.c
2580 +++ b/drivers/mfd/max8997.c
2581 @@ -208,10 +208,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
2582 mutex_init(&max8997->iolock);
2583
2584 max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
2585 + if (!max8997->rtc) {
2586 + dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
2587 + return -ENODEV;
2588 + }
2589 i2c_set_clientdata(max8997->rtc, max8997);
2590 +
2591 max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
2592 + if (!max8997->haptic) {
2593 + dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
2594 + ret = -ENODEV;
2595 + goto err_i2c_haptic;
2596 + }
2597 i2c_set_clientdata(max8997->haptic, max8997);
2598 +
2599 max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
2600 + if (!max8997->muic) {
2601 + dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
2602 + ret = -ENODEV;
2603 + goto err_i2c_muic;
2604 + }
2605 i2c_set_clientdata(max8997->muic, max8997);
2606
2607 pm_runtime_set_active(max8997->dev);
2608 @@ -239,7 +255,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
2609 err_mfd:
2610 mfd_remove_devices(max8997->dev);
2611 i2c_unregister_device(max8997->muic);
2612 +err_i2c_muic:
2613 i2c_unregister_device(max8997->haptic);
2614 +err_i2c_haptic:
2615 i2c_unregister_device(max8997->rtc);
2616 return ret;
2617 }
2618 diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
2619 index 5d5e186b5d8b..592db06098e6 100644
2620 --- a/drivers/mfd/max8998.c
2621 +++ b/drivers/mfd/max8998.c
2622 @@ -215,6 +215,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
2623 mutex_init(&max8998->iolock);
2624
2625 max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
2626 + if (!max8998->rtc) {
2627 + dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
2628 + return -ENODEV;
2629 + }
2630 i2c_set_clientdata(max8998->rtc, max8998);
2631
2632 max8998_irq_init(max8998);
2633 diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
2634 index 714e2135210e..a76cd85a2868 100644
2635 --- a/drivers/mfd/sec-core.c
2636 +++ b/drivers/mfd/sec-core.c
2637 @@ -252,6 +252,10 @@ static int sec_pmic_probe(struct i2c_client *i2c,
2638 }
2639
2640 sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
2641 + if (!sec_pmic->rtc) {
2642 + dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
2643 + return -ENODEV;
2644 + }
2645 i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
2646
2647 sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
2648 diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
2649 index 1f142d76cbbc..d6573318977f 100644
2650 --- a/drivers/mfd/tps65910.c
2651 +++ b/drivers/mfd/tps65910.c
2652 @@ -255,8 +255,10 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
2653 ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
2654 IRQF_ONESHOT, pdata->irq_base,
2655 tps6591x_irqs_chip, &tps65910->irq_data);
2656 - if (ret < 0)
2657 + if (ret < 0) {
2658 dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
2659 + tps65910->chip_irq = 0;
2660 + }
2661 return ret;
2662 }
2663
2664 diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
2665 index ed718328eff1..e87140bef667 100644
2666 --- a/drivers/mfd/twl-core.c
2667 +++ b/drivers/mfd/twl-core.c
2668 @@ -282,11 +282,11 @@ static struct reg_default twl4030_49_defaults[] = {
2669 static bool twl4030_49_nop_reg(struct device *dev, unsigned int reg)
2670 {
2671 switch (reg) {
2672 - case 0:
2673 - case 3:
2674 - case 40:
2675 - case 41:
2676 - case 42:
2677 + case 0x00:
2678 + case 0x03:
2679 + case 0x40:
2680 + case 0x41:
2681 + case 0x42:
2682 return false;
2683 default:
2684 return true;
2685 diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
2686 index 89a557972d1b..5a9bfa71df86 100644
2687 --- a/drivers/misc/mei/client.c
2688 +++ b/drivers/misc/mei/client.c
2689 @@ -74,23 +74,69 @@ int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
2690
2691
2692 /**
2693 - * mei_io_list_flush - removes list entry belonging to cl.
2694 + * mei_cl_cmp_id - tells if the clients are the same
2695 *
2696 - * @list: An instance of our list structure
2697 - * @cl: host client
2698 + * @cl1: host client 1
2699 + * @cl2: host client 2
2700 + *
2701 + * returns true - if the clients has same host and me ids
2702 + * false - otherwise
2703 + */
2704 +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
2705 + const struct mei_cl *cl2)
2706 +{
2707 + return cl1 && cl2 &&
2708 + (cl1->host_client_id == cl2->host_client_id) &&
2709 + (cl1->me_client_id == cl2->me_client_id);
2710 +}
2711 +
2712 +/**
2713 + * mei_io_list_flush - removes cbs belonging to cl.
2714 + *
2715 + * @list: an instance of our list structure
2716 + * @cl: host client, can be NULL for flushing the whole list
2717 + * @free: whether to free the cbs
2718 */
2719 -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
2720 +static void __mei_io_list_flush(struct mei_cl_cb *list,
2721 + struct mei_cl *cl, bool free)
2722 {
2723 struct mei_cl_cb *cb;
2724 struct mei_cl_cb *next;
2725
2726 + /* enable removing everything if no cl is specified */
2727 list_for_each_entry_safe(cb, next, &list->list, list) {
2728 - if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
2729 + if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
2730 list_del(&cb->list);
2731 + if (free)
2732 + mei_io_cb_free(cb);
2733 + }
2734 }
2735 }
2736
2737 /**
2738 + * mei_io_list_flush - removes list entry belonging to cl.
2739 + *
2740 + * @list: An instance of our list structure
2741 + * @cl: host client
2742 + */
2743 +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
2744 +{
2745 + __mei_io_list_flush(list, cl, false);
2746 +}
2747 +
2748 +
2749 +/**
2750 + * mei_io_list_free - removes cb belonging to cl and free them
2751 + *
2752 + * @list: An instance of our list structure
2753 + * @cl: host client
2754 + */
2755 +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
2756 +{
2757 + __mei_io_list_flush(list, cl, true);
2758 +}
2759 +
2760 +/**
2761 * mei_io_cb_free - free mei_cb_private related memory
2762 *
2763 * @cb: mei callback struct
2764 @@ -196,8 +242,8 @@ int mei_cl_flush_queues(struct mei_cl *cl)
2765
2766 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
2767 mei_io_list_flush(&cl->dev->read_list, cl);
2768 - mei_io_list_flush(&cl->dev->write_list, cl);
2769 - mei_io_list_flush(&cl->dev->write_waiting_list, cl);
2770 + mei_io_list_free(&cl->dev->write_list, cl);
2771 + mei_io_list_free(&cl->dev->write_waiting_list, cl);
2772 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
2773 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
2774 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
2775 @@ -942,20 +988,8 @@ void mei_cl_all_wakeup(struct mei_device *dev)
2776 */
2777 void mei_cl_all_write_clear(struct mei_device *dev)
2778 {
2779 - struct mei_cl_cb *cb, *next;
2780 - struct list_head *list;
2781 -
2782 - list = &dev->write_list.list;
2783 - list_for_each_entry_safe(cb, next, list, list) {
2784 - list_del(&cb->list);
2785 - mei_io_cb_free(cb);
2786 - }
2787 -
2788 - list = &dev->write_waiting_list.list;
2789 - list_for_each_entry_safe(cb, next, list, list) {
2790 - list_del(&cb->list);
2791 - mei_io_cb_free(cb);
2792 - }
2793 + mei_io_list_free(&dev->write_list, NULL);
2794 + mei_io_list_free(&dev->write_waiting_list, NULL);
2795 }
2796
2797
2798 diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
2799 index c8396e582f1c..0820e121d137 100644
2800 --- a/drivers/misc/mei/client.h
2801 +++ b/drivers/misc/mei/client.h
2802 @@ -45,8 +45,6 @@ static inline void mei_io_list_init(struct mei_cl_cb *list)
2803 {
2804 INIT_LIST_HEAD(&list->list);
2805 }
2806 -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
2807 -
2808 /*
2809 * MEI Host Client Functions
2810 */
2811 @@ -61,22 +59,6 @@ int mei_cl_unlink(struct mei_cl *cl);
2812 int mei_cl_flush_queues(struct mei_cl *cl);
2813 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
2814
2815 -/**
2816 - * mei_cl_cmp_id - tells if file private data have same id
2817 - *
2818 - * @fe1: private data of 1. file object
2819 - * @fe2: private data of 2. file object
2820 - *
2821 - * returns true - if ids are the same and not NULL
2822 - */
2823 -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
2824 - const struct mei_cl *cl2)
2825 -{
2826 - return cl1 && cl2 &&
2827 - (cl1->host_client_id == cl2->host_client_id) &&
2828 - (cl1->me_client_id == cl2->me_client_id);
2829 -}
2830 -
2831
2832 int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
2833
2834 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
2835 index 66f411a6e8ea..cabc04383685 100644
2836 --- a/drivers/misc/mei/hw-me-regs.h
2837 +++ b/drivers/misc/mei/hw-me-regs.h
2838 @@ -115,6 +115,11 @@
2839 #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
2840
2841 #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
2842 +
2843 +/* Host Firmware Status Registers in PCI Config Space */
2844 +#define PCI_CFG_HFS_1 0x40
2845 +#define PCI_CFG_HFS_2 0x48
2846 +
2847 /*
2848 * MEI HW Section
2849 */
2850 diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
2851 index f0fbb5179f80..d36286874946 100644
2852 --- a/drivers/misc/mei/interrupt.c
2853 +++ b/drivers/misc/mei/interrupt.c
2854 @@ -428,8 +428,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
2855
2856 cl->status = 0;
2857 list_del(&cb->list);
2858 - if (MEI_WRITING == cl->writing_state &&
2859 - cb->fop_type == MEI_FOP_WRITE &&
2860 + if (cb->fop_type == MEI_FOP_WRITE &&
2861 cl != &dev->iamthif_cl) {
2862 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
2863 cl->writing_state = MEI_WRITE_COMPLETE;
2864 diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
2865 index 5424f8ff3f7f..cfe1789923a3 100644
2866 --- a/drivers/misc/mei/main.c
2867 +++ b/drivers/misc/mei/main.c
2868 @@ -653,8 +653,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
2869 goto out;
2870 }
2871
2872 - if (MEI_WRITE_COMPLETE == cl->writing_state)
2873 - mask |= (POLLIN | POLLRDNORM);
2874 + mask |= (POLLIN | POLLRDNORM);
2875
2876 out:
2877 mutex_unlock(&dev->device_lock);
2878 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
2879 index ddadd08956f4..bf4bb74c39ff 100644
2880 --- a/drivers/misc/mei/pci-me.c
2881 +++ b/drivers/misc/mei/pci-me.c
2882 @@ -100,15 +100,31 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
2883 const struct pci_device_id *ent)
2884 {
2885 u32 reg;
2886 - if (ent->device == MEI_DEV_ID_PBG_1) {
2887 - pci_read_config_dword(pdev, 0x48, &reg);
2888 - /* make sure that bit 9 is up and bit 10 is down */
2889 - if ((reg & 0x600) == 0x200) {
2890 - dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
2891 - return false;
2892 - }
2893 + /* Cougar Point || Patsburg */
2894 + if (ent->device == MEI_DEV_ID_CPT_1 ||
2895 + ent->device == MEI_DEV_ID_PBG_1) {
2896 + pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
2897 + /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
2898 + if ((reg & 0x600) == 0x200)
2899 + goto no_mei;
2900 }
2901 +
2902 + /* Lynx Point */
2903 + if (ent->device == MEI_DEV_ID_LPT_H ||
2904 + ent->device == MEI_DEV_ID_LPT_W ||
2905 + ent->device == MEI_DEV_ID_LPT_HR) {
2906 + /* Read ME FW Status check for SPS Firmware */
2907 + pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
2908 + /* if bits [19:16] = 15, running SPS Firmware */
2909 + if ((reg & 0xf0000) == 0xf0000)
2910 + goto no_mei;
2911 + }
2912 +
2913 return true;
2914 +
2915 +no_mei:
2916 + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
2917 + return false;
2918 }
2919 /**
2920 * mei_probe - Device Initialization Routine
2921 diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
2922 index e8663a8c3406..ee082509b0ba 100644
2923 --- a/drivers/pci/host/pci-imx6.c
2924 +++ b/drivers/pci/host/pci-imx6.c
2925 @@ -424,20 +424,40 @@ static void imx6_pcie_reset_phy(struct pcie_port *pp)
2926
2927 static int imx6_pcie_link_up(struct pcie_port *pp)
2928 {
2929 - u32 rc, ltssm, rx_valid;
2930 + u32 rc, debug_r0, rx_valid;
2931 + int count = 5;
2932
2933 /*
2934 - * Test if the PHY reports that the link is up and also that
2935 - * the link training finished. It might happen that the PHY
2936 - * reports the link is already up, but the link training bit
2937 - * is still set, so make sure to check the training is done
2938 - * as well here.
2939 + * Test if the PHY reports that the link is up and also that the LTSSM
2940 + * training finished. There are three possible states of the link when
2941 + * this code is called:
2942 + * 1) The link is DOWN (unlikely)
2943 + * The link didn't come up yet for some reason. This usually means
2944 + * we have a real problem somewhere. Reset the PHY and exit. This
2945 + * state calls for inspection of the DEBUG registers.
2946 + * 2) The link is UP, but still in LTSSM training
2947 + * Wait for the training to finish, which should take a very short
2948 + * time. If the training does not finish, we have a problem and we
2949 + * need to inspect the DEBUG registers. If the training does finish,
2950 + * the link is up and operating correctly.
2951 + * 3) The link is UP and no longer in LTSSM training
2952 + * The link is up and operating correctly.
2953 */
2954 - rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
2955 - if ((rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
2956 - !(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
2957 - return 1;
2958 -
2959 + while (1) {
2960 + rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
2961 + if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
2962 + break;
2963 + if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
2964 + return 1;
2965 + if (!count--)
2966 + break;
2967 + dev_dbg(pp->dev, "Link is up, but still in training\n");
2968 + /*
2969 + * Wait a little bit, then re-check if the link finished
2970 + * the training.
2971 + */
2972 + usleep_range(1000, 2000);
2973 + }
2974 /*
2975 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
2976 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
2977 @@ -446,15 +466,16 @@ static int imx6_pcie_link_up(struct pcie_port *pp)
2978 * to gen2 is stuck
2979 */
2980 pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
2981 - ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
2982 + debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
2983
2984 if (rx_valid & 0x01)
2985 return 0;
2986
2987 - if (ltssm != 0x0d)
2988 + if ((debug_r0 & 0x3f) != 0x0d)
2989 return 0;
2990
2991 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
2992 + dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
2993
2994 imx6_pcie_reset_phy(pp);
2995
2996 diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
2997 index 0e79665afd44..05e352889868 100644
2998 --- a/drivers/pci/host/pci-mvebu.c
2999 +++ b/drivers/pci/host/pci-mvebu.c
3000 @@ -797,7 +797,7 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
3001
3002 for (i = 0; i < nranges; i++) {
3003 u32 flags = of_read_number(range, 1);
3004 - u32 slot = of_read_number(range, 2);
3005 + u32 slot = of_read_number(range + 1, 1);
3006 u64 cpuaddr = of_read_number(range + na, pna);
3007 unsigned long rtype;
3008
3009 diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
3010 index 4f6c2055f6b2..f0ea4fdfde87 100644
3011 --- a/drivers/regulator/arizona-ldo1.c
3012 +++ b/drivers/regulator/arizona-ldo1.c
3013 @@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = {
3014
3015 .vsel_reg = ARIZONA_LDO1_CONTROL_1,
3016 .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
3017 - .bypass_reg = ARIZONA_LDO1_CONTROL_1,
3018 - .bypass_mask = ARIZONA_LDO1_BYPASS,
3019 .min_uV = 900000,
3020 - .uV_step = 50000,
3021 - .n_voltages = 7,
3022 + .uV_step = 25000,
3023 + .n_voltages = 13,
3024 .enable_time = 500,
3025
3026 .owner = THIS_MODULE,
3027 @@ -203,6 +201,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
3028 */
3029 switch (arizona->type) {
3030 case WM5102:
3031 + case WM8997:
3032 desc = &arizona_ldo1_hc;
3033 ldo1->init_data = arizona_ldo1_dvfs;
3034 break;
3035 diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
3036 index eb5d22795c47..bb86494e2b7b 100644
3037 --- a/drivers/s390/char/con3215.c
3038 +++ b/drivers/s390/char/con3215.c
3039 @@ -922,7 +922,7 @@ static int __init con3215_init(void)
3040 raw3215_freelist = req;
3041 }
3042
3043 - cdev = ccw_device_probe_console();
3044 + cdev = ccw_device_probe_console(&raw3215_ccw_driver);
3045 if (IS_ERR(cdev))
3046 return -ENODEV;
3047
3048 diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
3049 index 699fd3e363df..bb6b0df50b33 100644
3050 --- a/drivers/s390/char/con3270.c
3051 +++ b/drivers/s390/char/con3270.c
3052 @@ -576,7 +576,6 @@ static struct console con3270 = {
3053 static int __init
3054 con3270_init(void)
3055 {
3056 - struct ccw_device *cdev;
3057 struct raw3270 *rp;
3058 void *cbuf;
3059 int i;
3060 @@ -591,10 +590,7 @@ con3270_init(void)
3061 cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
3062 }
3063
3064 - cdev = ccw_device_probe_console();
3065 - if (IS_ERR(cdev))
3066 - return -ENODEV;
3067 - rp = raw3270_setup_console(cdev);
3068 + rp = raw3270_setup_console();
3069 if (IS_ERR(rp))
3070 return PTR_ERR(rp);
3071
3072 diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
3073 index 2cdec21e8924..de2c0483949f 100644
3074 --- a/drivers/s390/char/raw3270.c
3075 +++ b/drivers/s390/char/raw3270.c
3076 @@ -776,16 +776,24 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
3077 }
3078
3079 #ifdef CONFIG_TN3270_CONSOLE
3080 +/* Tentative definition - see below for actual definition. */
3081 +static struct ccw_driver raw3270_ccw_driver;
3082 +
3083 /*
3084 * Setup 3270 device configured as console.
3085 */
3086 -struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
3087 +struct raw3270 __init *raw3270_setup_console(void)
3088 {
3089 + struct ccw_device *cdev;
3090 unsigned long flags;
3091 struct raw3270 *rp;
3092 char *ascebc;
3093 int rc;
3094
3095 + cdev = ccw_device_probe_console(&raw3270_ccw_driver);
3096 + if (IS_ERR(cdev))
3097 + return ERR_CAST(cdev);
3098 +
3099 rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
3100 ascebc = kzalloc(256, GFP_KERNEL);
3101 rc = raw3270_setup_device(cdev, rp, ascebc);
3102 diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
3103 index 7b73ff8c1bd7..359276a88396 100644
3104 --- a/drivers/s390/char/raw3270.h
3105 +++ b/drivers/s390/char/raw3270.h
3106 @@ -190,7 +190,7 @@ raw3270_put_view(struct raw3270_view *view)
3107 wake_up(&raw3270_wait_queue);
3108 }
3109
3110 -struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
3111 +struct raw3270 *raw3270_setup_console(void);
3112 void raw3270_wait_cons_dev(struct raw3270 *);
3113
3114 /* Notifier for device addition/removal */
3115 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
3116 index e9d783563cbb..4283dd3cdd49 100644
3117 --- a/drivers/s390/cio/device.c
3118 +++ b/drivers/s390/cio/device.c
3119 @@ -1609,7 +1609,7 @@ out_unlock:
3120 return rc;
3121 }
3122
3123 -struct ccw_device *ccw_device_probe_console(void)
3124 +struct ccw_device *ccw_device_probe_console(struct ccw_driver *drv)
3125 {
3126 struct io_subchannel_private *io_priv;
3127 struct ccw_device *cdev;
3128 @@ -1631,6 +1631,7 @@ struct ccw_device *ccw_device_probe_console(void)
3129 kfree(io_priv);
3130 return cdev;
3131 }
3132 + cdev->drv = drv;
3133 set_io_private(sch, io_priv);
3134 ret = ccw_device_console_enable(cdev, sch);
3135 if (ret) {
3136 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
3137 index 4f6a30b8e5f9..652b41b4ddbd 100644
3138 --- a/drivers/scsi/arcmsr/arcmsr_hba.c
3139 +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
3140 @@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3141 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3142 {
3143 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3144 - dma_addr_t dma_coherent_handle;
3145 +
3146 /*
3147 ********************************************************************
3148 ** here we need to tell iop 331 our freeccb.HighPart
3149 ** if freeccb.HighPart is not zero
3150 ********************************************************************
3151 */
3152 - dma_coherent_handle = acb->dma_coherent_handle;
3153 - cdb_phyaddr = (uint32_t)(dma_coherent_handle);
3154 - cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
3155 + cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
3156 + cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
3157 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3158 /*
3159 ***********************************************************************
3160 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3161 index 89a53002b585..83cb61266979 100644
3162 --- a/drivers/scsi/qla2xxx/qla_os.c
3163 +++ b/drivers/scsi/qla2xxx/qla_os.c
3164 @@ -2536,7 +2536,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3165 ha->flags.enable_64bit_addressing ? "enable" :
3166 "disable");
3167 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
3168 - if (!ret) {
3169 + if (ret) {
3170 ql_log_pci(ql_log_fatal, pdev, 0x0031,
3171 "Failed to allocate memory for adapter, aborting.\n");
3172
3173 @@ -3478,10 +3478,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3174 else {
3175 qla2x00_set_reserved_loop_ids(ha);
3176 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3177 - "loop_id_map=%p. \n", ha->loop_id_map);
3178 + "loop_id_map=%p.\n", ha->loop_id_map);
3179 }
3180
3181 - return 1;
3182 + return 0;
3183
3184 fail_async_pd:
3185 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
3186 diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
3187 index bf98d63d92b3..e63d27013142 100644
3188 --- a/drivers/spi/spi-dw.c
3189 +++ b/drivers/spi/spi-dw.c
3190 @@ -671,12 +671,6 @@ static int dw_spi_setup(struct spi_device *spi)
3191 return 0;
3192 }
3193
3194 -static void dw_spi_cleanup(struct spi_device *spi)
3195 -{
3196 - struct chip_data *chip = spi_get_ctldata(spi);
3197 - kfree(chip);
3198 -}
3199 -
3200 static int init_queue(struct dw_spi *dws)
3201 {
3202 INIT_LIST_HEAD(&dws->queue);
3203 @@ -806,7 +800,6 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
3204 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
3205 master->bus_num = dws->bus_num;
3206 master->num_chipselect = dws->num_cs;
3207 - master->cleanup = dw_spi_cleanup;
3208 master->setup = dw_spi_setup;
3209 master->transfer = dw_spi_transfer;
3210
3211 diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
3212 index d4d3cc534792..8933eb7ab79b 100644
3213 --- a/drivers/spi/spi-efm32.c
3214 +++ b/drivers/spi/spi-efm32.c
3215 @@ -487,6 +487,9 @@ static int efm32_spi_remove(struct platform_device *pdev)
3216
3217 static const struct of_device_id efm32_spi_dt_ids[] = {
3218 {
3219 + .compatible = "energymicro,efm32-spi",
3220 + }, {
3221 + /* doesn't follow the "vendor,device" scheme, don't use */
3222 .compatible = "efm32,spi",
3223 }, {
3224 /* sentinel */
3225 diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
3226 index 71db683098d6..b59af0303581 100644
3227 --- a/drivers/staging/comedi/drivers/usbdux.c
3228 +++ b/drivers/staging/comedi/drivers/usbdux.c
3229 @@ -493,7 +493,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
3230 /* pointer to the DA */
3231 *datap++ = val & 0xff;
3232 *datap++ = (val >> 8) & 0xff;
3233 - *datap++ = chan;
3234 + *datap++ = chan << 6;
3235 devpriv->ao_readback[chan] = val;
3236
3237 s->async->events |= COMEDI_CB_BLOCK;
3238 @@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
3239 /* set current channel of the running acquisition to zero */
3240 s->async->cur_chan = 0;
3241
3242 - for (i = 0; i < cmd->chanlist_len; ++i) {
3243 - unsigned int chan = CR_CHAN(cmd->chanlist[i]);
3244 -
3245 - devpriv->ao_chanlist[i] = chan << 6;
3246 - }
3247 + for (i = 0; i < cmd->chanlist_len; ++i)
3248 + devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
3249
3250 /* we count in steps of 1ms (125us) */
3251 /* 125us mode not used yet */
3252 diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
3253 index c9c180649c12..cec3f1628cd8 100644
3254 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c
3255 +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
3256 @@ -542,7 +542,7 @@ _func_exit_;
3257 /* set the security information in the recv_frame */
3258 static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
3259 {
3260 - u8 *psta_addr = NULL, *ptr;
3261 + u8 *psta_addr, *ptr;
3262 uint auth_alg;
3263 struct recv_frame_hdr *pfhdr;
3264 struct sta_info *psta;
3265 @@ -556,7 +556,6 @@ static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *pre
3266 _func_enter_;
3267
3268 pstapriv = &adapter->stapriv;
3269 - psta = rtw_get_stainfo(pstapriv, psta_addr);
3270
3271 auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
3272
3273 @@ -564,6 +563,7 @@ _func_enter_;
3274 pfhdr = &precv_frame->u.hdr;
3275 pattrib = &pfhdr->attrib;
3276 psta_addr = pattrib->ta;
3277 + psta = rtw_get_stainfo(pstapriv, psta_addr);
3278
3279 prtnframe = NULL;
3280
3281 diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
3282 index 23ec684b60e1..274c359279ef 100644
3283 --- a/drivers/staging/rtl8712/rtl871x_recv.c
3284 +++ b/drivers/staging/rtl8712/rtl871x_recv.c
3285 @@ -254,7 +254,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
3286 struct sta_info *psta;
3287 struct sta_priv *pstapriv;
3288 union recv_frame *prtnframe;
3289 - u16 ether_type = 0;
3290 + u16 ether_type;
3291
3292 pstapriv = &adapter->stapriv;
3293 ptr = get_recvframe_data(precv_frame);
3294 @@ -263,15 +263,14 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
3295 psta = r8712_get_stainfo(pstapriv, psta_addr);
3296 auth_alg = adapter->securitypriv.AuthAlgrthm;
3297 if (auth_alg == 2) {
3298 + /* get ether_type */
3299 + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
3300 + memcpy(&ether_type, ptr, 2);
3301 + ether_type = ntohs((unsigned short)ether_type);
3302 +
3303 if ((psta != NULL) && (psta->ieee8021x_blocked)) {
3304 /* blocked
3305 * only accept EAPOL frame */
3306 - prtnframe = precv_frame;
3307 - /*get ether_type */
3308 - ptr = ptr + pfhdr->attrib.hdrlen +
3309 - pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
3310 - memcpy(&ether_type, ptr, 2);
3311 - ether_type = ntohs((unsigned short)ether_type);
3312 if (ether_type == 0x888e)
3313 prtnframe = precv_frame;
3314 else {
3315 diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
3316 index f0fcbf7c7d7f..868226953d5c 100644
3317 --- a/drivers/staging/serqt_usb2/serqt_usb2.c
3318 +++ b/drivers/staging/serqt_usb2/serqt_usb2.c
3319 @@ -724,7 +724,7 @@ static int qt_startup(struct usb_serial *serial)
3320 goto startup_error;
3321 }
3322
3323 - switch (serial->dev->descriptor.idProduct) {
3324 + switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
3325 case QUATECH_DSU100:
3326 case QUATECH_QSU100:
3327 case QUATECH_ESU100A:
3328 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3329 index b83ec378d04f..7e5469a80fe3 100644
3330 --- a/drivers/target/iscsi/iscsi_target.c
3331 +++ b/drivers/target/iscsi/iscsi_target.c
3332 @@ -2468,6 +2468,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
3333 {
3334 struct iscsi_cmd *cmd;
3335 struct iscsi_conn *conn_p;
3336 + bool found = false;
3337
3338 /*
3339 * Only send a Asynchronous Message on connections whos network
3340 @@ -2476,11 +2477,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
3341 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
3342 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
3343 iscsit_inc_conn_usage_count(conn_p);
3344 + found = true;
3345 break;
3346 }
3347 }
3348
3349 - if (!conn_p)
3350 + if (!found)
3351 return;
3352
3353 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
3354 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
3355 index 554d4f75a75a..9e0232cca92e 100644
3356 --- a/drivers/target/target_core_iblock.c
3357 +++ b/drivers/target/target_core_iblock.c
3358 @@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
3359
3360 if (ib_dev->ibd_bd != NULL)
3361 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
3362 - if (ib_dev->ibd_bio_set != NULL) {
3363 - bioset_integrity_free(ib_dev->ibd_bio_set);
3364 + if (ib_dev->ibd_bio_set != NULL)
3365 bioset_free(ib_dev->ibd_bio_set);
3366 - }
3367 +
3368 kfree(ib_dev);
3369 }
3370
3371 diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
3372 index 66a5aba5a0d9..b920db3388cd 100644
3373 --- a/drivers/target/target_core_rd.c
3374 +++ b/drivers/target/target_core_rd.c
3375 @@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
3376 rd_dev->sg_prot_count = 0;
3377 }
3378
3379 -static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
3380 +static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
3381 {
3382 struct rd_dev_sg_table *sg_table;
3383 u32 total_sg_needed, sg_tables;
3384 @@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
3385
3386 if (rd_dev->rd_flags & RDF_NULLIO)
3387 return 0;
3388 -
3389 - total_sg_needed = rd_dev->rd_page_count / prot_length;
3390 + /*
3391 + * prot_length=8byte dif data
3392 + * tot sg needed = rd_page_count * (PGSZ/block_size) *
3393 + * (prot_length/block_size) + pad
3394 + * PGSZ canceled each other.
3395 + */
3396 + total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
3397
3398 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
3399
3400 @@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
3401 if (!dev->dev_attrib.pi_prot_type)
3402 return 0;
3403
3404 - return rd_build_prot_space(rd_dev, dev->prot_length);
3405 + return rd_build_prot_space(rd_dev, dev->prot_length,
3406 + dev->dev_attrib.block_size);
3407 }
3408
3409 static void rd_free_prot(struct se_device *dev)
3410 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
3411 index 77e6531fb0a1..aa064a7bb446 100644
3412 --- a/drivers/target/target_core_sbc.c
3413 +++ b/drivers/target/target_core_sbc.c
3414 @@ -425,13 +425,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
3415 goto out;
3416 }
3417
3418 - write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
3419 + write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
3420 GFP_KERNEL);
3421 if (!write_sg) {
3422 pr_err("Unable to allocate compare_and_write sg\n");
3423 ret = TCM_OUT_OF_RESOURCES;
3424 goto out;
3425 }
3426 + sg_init_table(write_sg, cmd->t_data_nents);
3427 /*
3428 * Setup verify and write data payloads from total NumberLBAs.
3429 */
3430 diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
3431 index ae52c08dad09..e9fe60fa7df1 100644
3432 --- a/drivers/target/tcm_fc/tfc_sess.c
3433 +++ b/drivers/target/tcm_fc/tfc_sess.c
3434 @@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
3435
3436 if (tport) {
3437 tport->tpg = tpg;
3438 + tpg->tport = tport;
3439 return tport;
3440 }
3441
3442 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
3443 index 50b46881b6ca..94f9e3a38412 100644
3444 --- a/drivers/tty/hvc/hvc_console.c
3445 +++ b/drivers/tty/hvc/hvc_console.c
3446 @@ -31,6 +31,7 @@
3447 #include <linux/list.h>
3448 #include <linux/module.h>
3449 #include <linux/major.h>
3450 +#include <linux/atomic.h>
3451 #include <linux/sysrq.h>
3452 #include <linux/tty.h>
3453 #include <linux/tty_flip.h>
3454 @@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
3455 /* Picks up late kicks after list walk but before schedule() */
3456 static int hvc_kicked;
3457
3458 +/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
3459 +static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
3460 +
3461 static int hvc_init(void);
3462
3463 #ifdef CONFIG_MAGIC_SYSRQ
3464 @@ -851,7 +855,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
3465 int i;
3466
3467 /* We wait until a driver actually comes along */
3468 - if (!hvc_driver) {
3469 + if (atomic_inc_not_zero(&hvc_needs_init)) {
3470 int err = hvc_init();
3471 if (err)
3472 return ERR_PTR(err);
3473 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
3474 index ab90a0156828..4ebf1b2034e8 100644
3475 --- a/drivers/usb/core/driver.c
3476 +++ b/drivers/usb/core/driver.c
3477 @@ -990,8 +990,7 @@ EXPORT_SYMBOL_GPL(usb_deregister);
3478 * it doesn't support pre_reset/post_reset/reset_resume or
3479 * because it doesn't support suspend/resume.
3480 *
3481 - * The caller must hold @intf's device's lock, but not its pm_mutex
3482 - * and not @intf->dev.sem.
3483 + * The caller must hold @intf's device's lock, but not @intf's lock.
3484 */
3485 void usb_forced_unbind_intf(struct usb_interface *intf)
3486 {
3487 @@ -1004,16 +1003,37 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
3488 intf->needs_binding = 1;
3489 }
3490
3491 +/*
3492 + * Unbind drivers for @udev's marked interfaces. These interfaces have
3493 + * the needs_binding flag set, for example by usb_resume_interface().
3494 + *
3495 + * The caller must hold @udev's device lock.
3496 + */
3497 +static void unbind_marked_interfaces(struct usb_device *udev)
3498 +{
3499 + struct usb_host_config *config;
3500 + int i;
3501 + struct usb_interface *intf;
3502 +
3503 + config = udev->actconfig;
3504 + if (config) {
3505 + for (i = 0; i < config->desc.bNumInterfaces; ++i) {
3506 + intf = config->interface[i];
3507 + if (intf->dev.driver && intf->needs_binding)
3508 + usb_forced_unbind_intf(intf);
3509 + }
3510 + }
3511 +}
3512 +
3513 /* Delayed forced unbinding of a USB interface driver and scan
3514 * for rebinding.
3515 *
3516 - * The caller must hold @intf's device's lock, but not its pm_mutex
3517 - * and not @intf->dev.sem.
3518 + * The caller must hold @intf's device's lock, but not @intf's lock.
3519 *
3520 * Note: Rebinds will be skipped if a system sleep transition is in
3521 * progress and the PM "complete" callback hasn't occurred yet.
3522 */
3523 -void usb_rebind_intf(struct usb_interface *intf)
3524 +static void usb_rebind_intf(struct usb_interface *intf)
3525 {
3526 int rc;
3527
3528 @@ -1030,68 +1050,66 @@ void usb_rebind_intf(struct usb_interface *intf)
3529 }
3530 }
3531
3532 -#ifdef CONFIG_PM
3533 -
3534 -/* Unbind drivers for @udev's interfaces that don't support suspend/resume
3535 - * There is no check for reset_resume here because it can be determined
3536 - * only during resume whether reset_resume is needed.
3537 +/*
3538 + * Rebind drivers to @udev's marked interfaces. These interfaces have
3539 + * the needs_binding flag set.
3540 *
3541 * The caller must hold @udev's device lock.
3542 */
3543 -static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
3544 +static void rebind_marked_interfaces(struct usb_device *udev)
3545 {
3546 struct usb_host_config *config;
3547 int i;
3548 struct usb_interface *intf;
3549 - struct usb_driver *drv;
3550
3551 config = udev->actconfig;
3552 if (config) {
3553 for (i = 0; i < config->desc.bNumInterfaces; ++i) {
3554 intf = config->interface[i];
3555 -
3556 - if (intf->dev.driver) {
3557 - drv = to_usb_driver(intf->dev.driver);
3558 - if (!drv->suspend || !drv->resume)
3559 - usb_forced_unbind_intf(intf);
3560 - }
3561 + if (intf->needs_binding)
3562 + usb_rebind_intf(intf);
3563 }
3564 }
3565 }
3566
3567 -/* Unbind drivers for @udev's interfaces that failed to support reset-resume.
3568 - * These interfaces have the needs_binding flag set by usb_resume_interface().
3569 +/*
3570 + * Unbind all of @udev's marked interfaces and then rebind all of them.
3571 + * This ordering is necessary because some drivers claim several interfaces
3572 + * when they are first probed.
3573 *
3574 * The caller must hold @udev's device lock.
3575 */
3576 -static void unbind_no_reset_resume_drivers_interfaces(struct usb_device *udev)
3577 +void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev)
3578 {
3579 - struct usb_host_config *config;
3580 - int i;
3581 - struct usb_interface *intf;
3582 -
3583 - config = udev->actconfig;
3584 - if (config) {
3585 - for (i = 0; i < config->desc.bNumInterfaces; ++i) {
3586 - intf = config->interface[i];
3587 - if (intf->dev.driver && intf->needs_binding)
3588 - usb_forced_unbind_intf(intf);
3589 - }
3590 - }
3591 + unbind_marked_interfaces(udev);
3592 + rebind_marked_interfaces(udev);
3593 }
3594
3595 -static void do_rebind_interfaces(struct usb_device *udev)
3596 +#ifdef CONFIG_PM
3597 +
3598 +/* Unbind drivers for @udev's interfaces that don't support suspend/resume
3599 + * There is no check for reset_resume here because it can be determined
3600 + * only during resume whether reset_resume is needed.
3601 + *
3602 + * The caller must hold @udev's device lock.
3603 + */
3604 +static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
3605 {
3606 struct usb_host_config *config;
3607 int i;
3608 struct usb_interface *intf;
3609 + struct usb_driver *drv;
3610
3611 config = udev->actconfig;
3612 if (config) {
3613 for (i = 0; i < config->desc.bNumInterfaces; ++i) {
3614 intf = config->interface[i];
3615 - if (intf->needs_binding)
3616 - usb_rebind_intf(intf);
3617 +
3618 + if (intf->dev.driver) {
3619 + drv = to_usb_driver(intf->dev.driver);
3620 + if (!drv->suspend || !drv->resume)
3621 + usb_forced_unbind_intf(intf);
3622 + }
3623 }
3624 }
3625 }
3626 @@ -1420,7 +1438,7 @@ int usb_resume_complete(struct device *dev)
3627 * whose needs_binding flag is set
3628 */
3629 if (udev->state != USB_STATE_NOTATTACHED)
3630 - do_rebind_interfaces(udev);
3631 + rebind_marked_interfaces(udev);
3632 return 0;
3633 }
3634
3635 @@ -1442,7 +1460,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
3636 pm_runtime_disable(dev);
3637 pm_runtime_set_active(dev);
3638 pm_runtime_enable(dev);
3639 - unbind_no_reset_resume_drivers_interfaces(udev);
3640 + unbind_marked_interfaces(udev);
3641 }
3642
3643 /* Avoid PM error messages for devices disconnected while suspended
3644 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
3645 index d59d99347d54..1f02e65fe305 100644
3646 --- a/drivers/usb/core/hcd-pci.c
3647 +++ b/drivers/usb/core/hcd-pci.c
3648 @@ -75,7 +75,7 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
3649 PCI_SLOT(companion->devfn) != slot)
3650 continue;
3651 companion_hcd = pci_get_drvdata(companion);
3652 - if (!companion_hcd)
3653 + if (!companion_hcd || !companion_hcd->self.root_hub)
3654 continue;
3655 fn(pdev, hcd, companion, companion_hcd);
3656 }
3657 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3658 index 64ea21971be2..d498d03afdbd 100644
3659 --- a/drivers/usb/core/hub.c
3660 +++ b/drivers/usb/core/hub.c
3661 @@ -5340,10 +5340,11 @@ int usb_reset_device(struct usb_device *udev)
3662 else if (cintf->condition ==
3663 USB_INTERFACE_BOUND)
3664 rebind = 1;
3665 + if (rebind)
3666 + cintf->needs_binding = 1;
3667 }
3668 - if (ret == 0 && rebind)
3669 - usb_rebind_intf(cintf);
3670 }
3671 + usb_unbind_and_rebind_marked_interfaces(udev);
3672 }
3673
3674 usb_autosuspend_device(udev);
3675 diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
3676 index 823857767a16..0923add72b59 100644
3677 --- a/drivers/usb/core/usb.h
3678 +++ b/drivers/usb/core/usb.h
3679 @@ -55,7 +55,7 @@ extern int usb_match_one_id_intf(struct usb_device *dev,
3680 extern int usb_match_device(struct usb_device *dev,
3681 const struct usb_device_id *id);
3682 extern void usb_forced_unbind_intf(struct usb_interface *intf);
3683 -extern void usb_rebind_intf(struct usb_interface *intf);
3684 +extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
3685
3686 extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
3687 struct dev_state *owner);
3688 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
3689 index f8af8d44af85..546e67a2e4cd 100644
3690 --- a/drivers/usb/dwc3/core.h
3691 +++ b/drivers/usb/dwc3/core.h
3692 @@ -815,15 +815,15 @@ struct dwc3_event_depevt {
3693 * 12 - VndrDevTstRcved
3694 * @reserved15_12: Reserved, not used
3695 * @event_info: Information about this event
3696 - * @reserved31_24: Reserved, not used
3697 + * @reserved31_25: Reserved, not used
3698 */
3699 struct dwc3_event_devt {
3700 u32 one_bit:1;
3701 u32 device_event:7;
3702 u32 type:4;
3703 u32 reserved15_12:4;
3704 - u32 event_info:8;
3705 - u32 reserved31_24:8;
3706 + u32 event_info:9;
3707 + u32 reserved31_25:7;
3708 } __packed;
3709
3710 /**
3711 @@ -856,6 +856,19 @@ union dwc3_event {
3712 struct dwc3_event_gevt gevt;
3713 };
3714
3715 +/**
3716 + * struct dwc3_gadget_ep_cmd_params - representation of endpoint command
3717 + * parameters
3718 + * @param2: third parameter
3719 + * @param1: second parameter
3720 + * @param0: first parameter
3721 + */
3722 +struct dwc3_gadget_ep_cmd_params {
3723 + u32 param2;
3724 + u32 param1;
3725 + u32 param0;
3726 +};
3727 +
3728 /*
3729 * DWC3 Features to be used as Driver Data
3730 */
3731 @@ -881,11 +894,31 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
3732 #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
3733 int dwc3_gadget_init(struct dwc3 *dwc);
3734 void dwc3_gadget_exit(struct dwc3 *dwc);
3735 +int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
3736 +int dwc3_gadget_get_link_state(struct dwc3 *dwc);
3737 +int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
3738 +int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
3739 + unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
3740 +int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
3741 #else
3742 static inline int dwc3_gadget_init(struct dwc3 *dwc)
3743 { return 0; }
3744 static inline void dwc3_gadget_exit(struct dwc3 *dwc)
3745 { }
3746 +static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
3747 +{ return 0; }
3748 +static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
3749 +{ return 0; }
3750 +static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
3751 + enum dwc3_link_state state)
3752 +{ return 0; }
3753 +
3754 +static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
3755 + unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
3756 +{ return 0; }
3757 +static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
3758 + int cmd, u32 param)
3759 +{ return 0; }
3760 #endif
3761
3762 /* power management interface */
3763 diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
3764 index febe1aa7b714..a0ee75b68a80 100644
3765 --- a/drivers/usb/dwc3/gadget.h
3766 +++ b/drivers/usb/dwc3/gadget.h
3767 @@ -56,12 +56,6 @@ struct dwc3;
3768 /* DEPXFERCFG parameter 0 */
3769 #define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
3770
3771 -struct dwc3_gadget_ep_cmd_params {
3772 - u32 param2;
3773 - u32 param1;
3774 - u32 param0;
3775 -};
3776 -
3777 /* -------------------------------------------------------------------------- */
3778
3779 #define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
3780 @@ -85,9 +79,6 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
3781 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
3782 int status);
3783
3784 -int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
3785 -int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
3786 -
3787 void dwc3_ep0_interrupt(struct dwc3 *dwc,
3788 const struct dwc3_event_depevt *event);
3789 void dwc3_ep0_out_start(struct dwc3 *dwc);
3790 @@ -95,9 +86,6 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
3791 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
3792 gfp_t gfp_flags);
3793 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
3794 -int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
3795 - unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
3796 -int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param);
3797
3798 /**
3799 * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
3800 diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
3801 index 52771d4c44bc..167843de2d8a 100644
3802 --- a/drivers/usb/gadget/atmel_usba_udc.c
3803 +++ b/drivers/usb/gadget/atmel_usba_udc.c
3804 @@ -1827,12 +1827,12 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
3805 toggle_bias(0);
3806 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
3807
3808 - udc->driver = NULL;
3809 -
3810 clk_disable_unprepare(udc->hclk);
3811 clk_disable_unprepare(udc->pclk);
3812
3813 - DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
3814 + DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
3815 +
3816 + udc->driver = NULL;
3817
3818 return 0;
3819 }
3820 diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
3821 index 0f8aad78b54f..460c266b8e24 100644
3822 --- a/drivers/usb/gadget/tcm_usb_gadget.c
3823 +++ b/drivers/usb/gadget/tcm_usb_gadget.c
3824 @@ -1613,7 +1613,7 @@ static struct se_wwn *usbg_make_tport(
3825 return ERR_PTR(-ENOMEM);
3826 }
3827 tport->tport_wwpn = wwpn;
3828 - snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
3829 + snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
3830 return &tport->tport_wwn;
3831 }
3832
3833 diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
3834 index 9f170c53e3d9..134f354ede62 100644
3835 --- a/drivers/usb/gadget/zero.c
3836 +++ b/drivers/usb/gadget/zero.c
3837 @@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
3838 ss_opts->isoc_interval = gzero_options.isoc_interval;
3839 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
3840 ss_opts->isoc_mult = gzero_options.isoc_mult;
3841 - ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
3842 + ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
3843 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
3844
3845 func_ss = usb_get_function(func_inst_ss);
3846 diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
3847 index af28b748e87a..633dbea28f30 100644
3848 --- a/drivers/usb/host/ehci-tegra.c
3849 +++ b/drivers/usb/host/ehci-tegra.c
3850 @@ -513,8 +513,31 @@ static struct platform_driver tegra_ehci_driver = {
3851 }
3852 };
3853
3854 +static int tegra_ehci_reset(struct usb_hcd *hcd)
3855 +{
3856 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
3857 + int retval;
3858 + int txfifothresh;
3859 +
3860 + retval = ehci_setup(hcd);
3861 + if (retval)
3862 + return retval;
3863 +
3864 + /*
3865 + * We should really pull this value out of tegra_ehci_soc_config, but
3866 + * to avoid needing access to it, make use of the fact that Tegra20 is
3867 + * the only one so far that needs a value of 10, and Tegra20 is the
3868 + * only one which doesn't set has_hostpc.
3869 + */
3870 + txfifothresh = ehci->has_hostpc ? 0x10 : 10;
3871 + ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
3872 +
3873 + return 0;
3874 +}
3875 +
3876 static const struct ehci_driver_overrides tegra_overrides __initconst = {
3877 .extra_priv_size = sizeof(struct tegra_ehci_hcd),
3878 + .reset = tegra_ehci_reset,
3879 };
3880
3881 static int __init ehci_tegra_init(void)
3882 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3883 index 04f986d9234f..08a5f92d6c54 100644
3884 --- a/drivers/usb/host/xhci-pci.c
3885 +++ b/drivers/usb/host/xhci-pci.c
3886 @@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3887 */
3888 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
3889 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
3890 +
3891 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
3892 }
3893 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3894 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
3895 @@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3896 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
3897 }
3898 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
3899 - pdev->device == 0x0015 &&
3900 - pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
3901 - pdev->subsystem_device == 0xc0cd)
3902 + pdev->device == 0x0015)
3903 xhci->quirks |= XHCI_RESET_ON_RESUME;
3904 if (pdev->vendor == PCI_VENDOR_ID_VIA)
3905 xhci->quirks |= XHCI_RESET_ON_RESUME;
3906 @@ -190,6 +190,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3907 struct usb_hcd *hcd;
3908
3909 driver = (struct hc_driver *)id->driver_data;
3910 +
3911 + /* Prevent runtime suspending between USB-2 and USB-3 initialization */
3912 + pm_runtime_get_noresume(&dev->dev);
3913 +
3914 /* Register the USB 2.0 roothub.
3915 * FIXME: USB core must know to register the USB 2.0 roothub first.
3916 * This is sort of silly, because we could just set the HCD driver flags
3917 @@ -199,7 +203,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3918 retval = usb_hcd_pci_probe(dev, id);
3919
3920 if (retval)
3921 - return retval;
3922 + goto put_runtime_pm;
3923
3924 /* USB 2.0 roothub is stored in the PCI device now. */
3925 hcd = dev_get_drvdata(&dev->dev);
3926 @@ -228,12 +232,17 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3927 if (xhci->quirks & XHCI_LPM_SUPPORT)
3928 hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
3929
3930 + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
3931 + pm_runtime_put_noidle(&dev->dev);
3932 +
3933 return 0;
3934
3935 put_usb3_hcd:
3936 usb_put_hcd(xhci->shared_hcd);
3937 dealloc_usb2_hcd:
3938 usb_hcd_pci_remove(dev);
3939 +put_runtime_pm:
3940 + pm_runtime_put_noidle(&dev->dev);
3941 return retval;
3942 }
3943
3944 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3945 index 0ed64eb68e48..dff9b5ead3b3 100644
3946 --- a/drivers/usb/host/xhci-ring.c
3947 +++ b/drivers/usb/host/xhci-ring.c
3948 @@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
3949 struct xhci_generic_trb *trb;
3950 struct xhci_ep_ctx *ep_ctx;
3951 dma_addr_t addr;
3952 + u64 hw_dequeue;
3953
3954 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
3955 ep_index, stream_id);
3956 @@ -559,56 +560,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
3957 stream_id);
3958 return;
3959 }
3960 - state->new_cycle_state = 0;
3961 - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3962 - "Finding segment containing stopped TRB.");
3963 - state->new_deq_seg = find_trb_seg(cur_td->start_seg,
3964 - dev->eps[ep_index].stopped_trb,
3965 - &state->new_cycle_state);
3966 - if (!state->new_deq_seg) {
3967 - WARN_ON(1);
3968 - return;
3969 - }
3970
3971 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
3972 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3973 "Finding endpoint context");
3974 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
3975 - state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
3976 + hw_dequeue = le64_to_cpu(ep_ctx->deq);
3977 +
3978 + /* Find virtual address and segment of hardware dequeue pointer */
3979 + state->new_deq_seg = ep_ring->deq_seg;
3980 + state->new_deq_ptr = ep_ring->dequeue;
3981 + while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
3982 + != (dma_addr_t)(hw_dequeue & ~0xf)) {
3983 + next_trb(xhci, ep_ring, &state->new_deq_seg,
3984 + &state->new_deq_ptr);
3985 + if (state->new_deq_ptr == ep_ring->dequeue) {
3986 + WARN_ON(1);
3987 + return;
3988 + }
3989 + }
3990 + /*
3991 + * Find cycle state for last_trb, starting at old cycle state of
3992 + * hw_dequeue. If there is only one segment ring, find_trb_seg() will
3993 + * return immediately and cannot toggle the cycle state if this search
3994 + * wraps around, so add one more toggle manually in that case.
3995 + */
3996 + state->new_cycle_state = hw_dequeue & 0x1;
3997 + if (ep_ring->first_seg == ep_ring->first_seg->next &&
3998 + cur_td->last_trb < state->new_deq_ptr)
3999 + state->new_cycle_state ^= 0x1;
4000
4001 state->new_deq_ptr = cur_td->last_trb;
4002 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4003 "Finding segment containing last TRB in TD.");
4004 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
4005 - state->new_deq_ptr,
4006 - &state->new_cycle_state);
4007 + state->new_deq_ptr, &state->new_cycle_state);
4008 if (!state->new_deq_seg) {
4009 WARN_ON(1);
4010 return;
4011 }
4012
4013 + /* Increment to find next TRB after last_trb. Cycle if appropriate. */
4014 trb = &state->new_deq_ptr->generic;
4015 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
4016 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
4017 state->new_cycle_state ^= 0x1;
4018 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
4019
4020 - /*
4021 - * If there is only one segment in a ring, find_trb_seg()'s while loop
4022 - * will not run, and it will return before it has a chance to see if it
4023 - * needs to toggle the cycle bit. It can't tell if the stalled transfer
4024 - * ended just before the link TRB on a one-segment ring, or if the TD
4025 - * wrapped around the top of the ring, because it doesn't have the TD in
4026 - * question. Look for the one-segment case where stalled TRB's address
4027 - * is greater than the new dequeue pointer address.
4028 - */
4029 - if (ep_ring->first_seg == ep_ring->first_seg->next &&
4030 - state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
4031 - state->new_cycle_state ^= 0x1;
4032 + /* Don't update the ring cycle state for the producer (us). */
4033 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4034 "Cycle state = 0x%x", state->new_cycle_state);
4035
4036 - /* Don't update the ring cycle state for the producer (us). */
4037 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4038 "New dequeue segment = %p (virtual)",
4039 state->new_deq_seg);
4040 @@ -791,7 +793,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
4041 if (list_empty(&ep->cancelled_td_list)) {
4042 xhci_stop_watchdog_timer_in_irq(xhci, ep);
4043 ep->stopped_td = NULL;
4044 - ep->stopped_trb = NULL;
4045 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
4046 return;
4047 }
4048 @@ -859,11 +860,9 @@ remove_finished_td:
4049 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
4050 }
4051
4052 - /* Clear stopped_td and stopped_trb if endpoint is not halted */
4053 - if (!(ep->ep_state & EP_HALTED)) {
4054 + /* Clear stopped_td if endpoint is not halted */
4055 + if (!(ep->ep_state & EP_HALTED))
4056 ep->stopped_td = NULL;
4057 - ep->stopped_trb = NULL;
4058 - }
4059
4060 /*
4061 * Drop the lock and complete the URBs in the cancelled TD list.
4062 @@ -1908,14 +1907,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
4063 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
4064 ep->ep_state |= EP_HALTED;
4065 ep->stopped_td = td;
4066 - ep->stopped_trb = event_trb;
4067 ep->stopped_stream = stream_id;
4068
4069 xhci_queue_reset_ep(xhci, slot_id, ep_index);
4070 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
4071
4072 ep->stopped_td = NULL;
4073 - ep->stopped_trb = NULL;
4074 ep->stopped_stream = 0;
4075
4076 xhci_ring_cmd_db(xhci);
4077 @@ -1997,7 +1994,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
4078 * the ring dequeue pointer or take this TD off any lists yet.
4079 */
4080 ep->stopped_td = td;
4081 - ep->stopped_trb = event_trb;
4082 return 0;
4083 } else {
4084 if (trb_comp_code == COMP_STALL) {
4085 @@ -2009,7 +2005,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
4086 * USB class driver clear the stall later.
4087 */
4088 ep->stopped_td = td;
4089 - ep->stopped_trb = event_trb;
4090 ep->stopped_stream = ep_ring->stream_id;
4091 } else if (xhci_requires_manual_halt_cleanup(xhci,
4092 ep_ctx, trb_comp_code)) {
4093 @@ -2626,7 +2621,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
4094 * successful event after a short transfer.
4095 * Ignore it.
4096 */
4097 - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
4098 + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
4099 ep_ring->last_td_was_short) {
4100 ep_ring->last_td_was_short = false;
4101 ret = 0;
4102 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4103 index 924a6ccdb622..cca289667cf6 100644
4104 --- a/drivers/usb/host/xhci.c
4105 +++ b/drivers/usb/host/xhci.c
4106 @@ -404,16 +404,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
4107
4108 #else
4109
4110 -static int xhci_try_enable_msi(struct usb_hcd *hcd)
4111 +static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
4112 {
4113 return 0;
4114 }
4115
4116 -static void xhci_cleanup_msix(struct xhci_hcd *xhci)
4117 +static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
4118 {
4119 }
4120
4121 -static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
4122 +static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
4123 {
4124 }
4125
4126 @@ -2933,7 +2933,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
4127 xhci_ring_cmd_db(xhci);
4128 }
4129 virt_ep->stopped_td = NULL;
4130 - virt_ep->stopped_trb = NULL;
4131 virt_ep->stopped_stream = 0;
4132 spin_unlock_irqrestore(&xhci->lock, flags);
4133
4134 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4135 index 58ed9d088e63..8faef64371c6 100644
4136 --- a/drivers/usb/host/xhci.h
4137 +++ b/drivers/usb/host/xhci.h
4138 @@ -864,8 +864,6 @@ struct xhci_virt_ep {
4139 #define EP_GETTING_NO_STREAMS (1 << 5)
4140 /* ---- Related to URB cancellation ---- */
4141 struct list_head cancelled_td_list;
4142 - /* The TRB that was last reported in a stopped endpoint ring */
4143 - union xhci_trb *stopped_trb;
4144 struct xhci_td *stopped_td;
4145 unsigned int stopped_stream;
4146 /* Watchdog timer for stop endpoint command to cancel URBs */
4147 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
4148 index 239ad0b1ceb6..07576907e2c6 100644
4149 --- a/drivers/usb/musb/musb_core.c
4150 +++ b/drivers/usb/musb/musb_core.c
4151 @@ -438,7 +438,6 @@ void musb_hnp_stop(struct musb *musb)
4152 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
4153 u8 devctl)
4154 {
4155 - struct usb_otg *otg = musb->xceiv->otg;
4156 irqreturn_t handled = IRQ_NONE;
4157
4158 dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
4159 @@ -656,7 +655,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
4160 break;
4161 case OTG_STATE_B_PERIPHERAL:
4162 musb_g_suspend(musb);
4163 - musb->is_active = otg->gadget->b_hnp_enable;
4164 + musb->is_active = musb->g.b_hnp_enable;
4165 if (musb->is_active) {
4166 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
4167 dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
4168 @@ -672,7 +671,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
4169 break;
4170 case OTG_STATE_A_HOST:
4171 musb->xceiv->state = OTG_STATE_A_SUSPEND;
4172 - musb->is_active = otg->host->b_hnp_enable;
4173 + musb->is_active = musb->hcd->self.b_hnp_enable;
4174 break;
4175 case OTG_STATE_B_HOST:
4176 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
4177 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
4178 index 8aa59a2c5eb2..8e90d327e52b 100644
4179 --- a/drivers/usb/musb/omap2430.c
4180 +++ b/drivers/usb/musb/omap2430.c
4181 @@ -416,6 +416,7 @@ static int omap2430_musb_init(struct musb *musb)
4182 omap_musb_set_mailbox(glue);
4183
4184 phy_init(musb->phy);
4185 + phy_power_on(musb->phy);
4186
4187 pm_runtime_put_noidle(musb->controller);
4188 return 0;
4189 @@ -478,6 +479,7 @@ static int omap2430_musb_exit(struct musb *musb)
4190 del_timer_sync(&musb_idle_timer);
4191
4192 omap2430_low_level_exit(musb);
4193 + phy_power_off(musb->phy);
4194 phy_exit(musb->phy);
4195
4196 return 0;
4197 diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
4198 index d75196ad5f2f..35b6083b7999 100644
4199 --- a/drivers/usb/phy/phy-am335x-control.c
4200 +++ b/drivers/usb/phy/phy-am335x-control.c
4201 @@ -3,6 +3,7 @@
4202 #include <linux/err.h>
4203 #include <linux/of.h>
4204 #include <linux/io.h>
4205 +#include <linux/delay.h>
4206 #include "am35x-phy-control.h"
4207
4208 struct am335x_control_usb {
4209 @@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
4210 }
4211
4212 writel(val, usb_ctrl->phy_reg + reg);
4213 +
4214 + /*
4215 + * Give the PHY ~1ms to complete the power up operation.
4216 + * Tests have shown unstable behaviour if other USB PHY related
4217 + * registers are written too shortly after such a transition.
4218 + */
4219 + if (on)
4220 + mdelay(1);
4221 }
4222
4223 static const struct phy_control ctrl_am335x = {
4224 diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
4225 index 217339dd7a90..17ea3f271bd8 100644
4226 --- a/drivers/usb/phy/phy-ulpi.c
4227 +++ b/drivers/usb/phy/phy-ulpi.c
4228 @@ -47,6 +47,8 @@ struct ulpi_info {
4229 static struct ulpi_info ulpi_ids[] = {
4230 ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
4231 ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
4232 + ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"),
4233 + ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
4234 };
4235
4236 static int ulpi_set_otg_flags(struct usb_phy *phy)
4237 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
4238 index 2e22fc22c382..b3d5a35c0d4b 100644
4239 --- a/drivers/usb/serial/pl2303.c
4240 +++ b/drivers/usb/serial/pl2303.c
4241 @@ -83,6 +83,9 @@ static const struct usb_device_id id_table[] = {
4242 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
4243 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
4244 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
4245 + { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
4246 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
4247 + { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
4248 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
4249 { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
4250 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
4251 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
4252 index c38b8c00c06f..42bc082896ac 100644
4253 --- a/drivers/usb/serial/pl2303.h
4254 +++ b/drivers/usb/serial/pl2303.h
4255 @@ -121,8 +121,11 @@
4256 #define SUPERIAL_VENDOR_ID 0x5372
4257 #define SUPERIAL_PRODUCT_ID 0x2303
4258
4259 -/* Hewlett-Packard LD220-HP POS Pole Display */
4260 +/* Hewlett-Packard POS Pole Displays */
4261 #define HP_VENDOR_ID 0x03f0
4262 +#define HP_LD960_PRODUCT_ID 0x0b39
4263 +#define HP_LCM220_PRODUCT_ID 0x3139
4264 +#define HP_LCM960_PRODUCT_ID 0x3239
4265 #define HP_LD220_PRODUCT_ID 0x3524
4266
4267 /* Cressi Edy (diving computer) PC interface */
4268 diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
4269 index cd961622f9c1..b74e5f5ddac8 100644
4270 --- a/drivers/video/atmel_lcdfb.c
4271 +++ b/drivers/video/atmel_lcdfb.c
4272 @@ -1298,6 +1298,12 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
4273 goto unregister_irqs;
4274 }
4275
4276 + ret = atmel_lcdfb_set_par(info);
4277 + if (ret < 0) {
4278 + dev_err(dev, "set par failed: %d\n", ret);
4279 + goto unregister_irqs;
4280 + }
4281 +
4282 dev_set_drvdata(dev, info);
4283
4284 /*
4285 diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
4286 index 40788c925d1c..73705aff53cb 100644
4287 --- a/drivers/w1/w1_netlink.c
4288 +++ b/drivers/w1/w1_netlink.c
4289 @@ -54,28 +54,29 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
4290 struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
4291 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
4292 int avail;
4293 + u64 *data;
4294
4295 /* update kernel slave list */
4296 w1_slave_found(dev, rn);
4297
4298 avail = dev->priv_size - cmd->len;
4299
4300 - if (avail > 8) {
4301 - u64 *data = (void *)(cmd + 1) + cmd->len;
4302 + if (avail < 8) {
4303 + msg->ack++;
4304 + cn_netlink_send(msg, 0, GFP_KERNEL);
4305
4306 - *data = rn;
4307 - cmd->len += 8;
4308 - hdr->len += 8;
4309 - msg->len += 8;
4310 - return;
4311 + msg->len = sizeof(struct w1_netlink_msg) +
4312 + sizeof(struct w1_netlink_cmd);
4313 + hdr->len = sizeof(struct w1_netlink_cmd);
4314 + cmd->len = 0;
4315 }
4316
4317 - msg->ack++;
4318 - cn_netlink_send(msg, 0, GFP_KERNEL);
4319 + data = (void *)(cmd + 1) + cmd->len;
4320
4321 - msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
4322 - hdr->len = sizeof(struct w1_netlink_cmd);
4323 - cmd->len = 0;
4324 + *data = rn;
4325 + cmd->len += 8;
4326 + hdr->len += 8;
4327 + msg->len += 8;
4328 }
4329
4330 static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
4331 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4332 index d3a534fdc5ff..3a603a8d9f96 100644
4333 --- a/fs/ext4/ext4.h
4334 +++ b/fs/ext4/ext4.h
4335 @@ -2462,23 +2462,6 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
4336 up_write(&EXT4_I(inode)->i_data_sem);
4337 }
4338
4339 -/*
4340 - * Update i_disksize after writeback has been started. Races with truncate
4341 - * are avoided by checking i_size under i_data_sem.
4342 - */
4343 -static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
4344 -{
4345 - loff_t i_size;
4346 -
4347 - down_write(&EXT4_I(inode)->i_data_sem);
4348 - i_size = i_size_read(inode);
4349 - if (newsize > i_size)
4350 - newsize = i_size;
4351 - if (newsize > EXT4_I(inode)->i_disksize)
4352 - EXT4_I(inode)->i_disksize = newsize;
4353 - up_write(&EXT4_I(inode)->i_data_sem);
4354 -}
4355 -
4356 struct ext4_group_info {
4357 unsigned long bb_state;
4358 struct rb_root bb_free_root;
4359 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
4360 index 1a5073959f32..2a8b2e15dcc4 100644
4361 --- a/fs/ext4/file.c
4362 +++ b/fs/ext4/file.c
4363 @@ -82,7 +82,7 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
4364 size_t count = iov_length(iov, nr_segs);
4365 loff_t final_size = pos + count;
4366
4367 - if (pos >= inode->i_size)
4368 + if (pos >= i_size_read(inode))
4369 return 0;
4370
4371 if ((pos & blockmask) || (final_size & blockmask))
4372 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4373 index 24bfd7ff3049..4e8903d0432e 100644
4374 --- a/fs/ext4/inode.c
4375 +++ b/fs/ext4/inode.c
4376 @@ -515,6 +515,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
4377 "logical block %lu\n", inode->i_ino, flags, map->m_len,
4378 (unsigned long) map->m_lblk);
4379
4380 + /* We can handle the block number less than EXT_MAX_BLOCKS */
4381 + if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
4382 + return -EIO;
4383 +
4384 /* Lookup extent status tree firstly */
4385 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
4386 ext4_es_lru_add(inode);
4387 @@ -2232,13 +2236,23 @@ static int mpage_map_and_submit_extent(handle_t *handle,
4388 return err;
4389 } while (map->m_len);
4390
4391 - /* Update on-disk size after IO is submitted */
4392 + /*
4393 + * Update on-disk size after IO is submitted. Races with
4394 + * truncate are avoided by checking i_size under i_data_sem.
4395 + */
4396 disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
4397 if (disksize > EXT4_I(inode)->i_disksize) {
4398 int err2;
4399 -
4400 - ext4_wb_update_i_disksize(inode, disksize);
4401 + loff_t i_size;
4402 +
4403 + down_write(&EXT4_I(inode)->i_data_sem);
4404 + i_size = i_size_read(inode);
4405 + if (disksize > i_size)
4406 + disksize = i_size;
4407 + if (disksize > EXT4_I(inode)->i_disksize)
4408 + EXT4_I(inode)->i_disksize = disksize;
4409 err2 = ext4_mark_inode_dirty(handle, inode);
4410 + up_write(&EXT4_I(inode)->i_data_sem);
4411 if (err2)
4412 ext4_error(inode->i_sb,
4413 "Failed to mark inode %lu dirty",
4414 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
4415 index ab95508e3d40..c18d95b50540 100644
4416 --- a/fs/ext4/page-io.c
4417 +++ b/fs/ext4/page-io.c
4418 @@ -308,13 +308,14 @@ static void ext4_end_bio(struct bio *bio, int error)
4419 if (error) {
4420 struct inode *inode = io_end->inode;
4421
4422 - ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
4423 + ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
4424 "(offset %llu size %ld starting block %llu)",
4425 - inode->i_ino,
4426 + error, inode->i_ino,
4427 (unsigned long long) io_end->offset,
4428 (long) io_end->size,
4429 (unsigned long long)
4430 bi_sector >> (inode->i_blkbits - 9));
4431 + mapping_set_error(inode->i_mapping, error);
4432 }
4433
4434 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
4435 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4436 index e175e94116ac..55e611c1513c 100644
4437 --- a/fs/ext4/xattr.c
4438 +++ b/fs/ext4/xattr.c
4439 @@ -517,8 +517,8 @@ static void ext4_xattr_update_super_block(handle_t *handle,
4440 }
4441
4442 /*
4443 - * Release the xattr block BH: If the reference count is > 1, decrement
4444 - * it; otherwise free the block.
4445 + * Release the xattr block BH: If the reference count is > 1, decrement it;
4446 + * otherwise free the block.
4447 */
4448 static void
4449 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
4450 @@ -538,16 +538,31 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
4451 if (ce)
4452 mb_cache_entry_free(ce);
4453 get_bh(bh);
4454 + unlock_buffer(bh);
4455 ext4_free_blocks(handle, inode, bh, 0, 1,
4456 EXT4_FREE_BLOCKS_METADATA |
4457 EXT4_FREE_BLOCKS_FORGET);
4458 - unlock_buffer(bh);
4459 } else {
4460 le32_add_cpu(&BHDR(bh)->h_refcount, -1);
4461 if (ce)
4462 mb_cache_entry_release(ce);
4463 + /*
4464 + * Beware of this ugliness: Releasing of xattr block references
4465 + * from different inodes can race and so we have to protect
4466 + * from a race where someone else frees the block (and releases
4467 + * its journal_head) before we are done dirtying the buffer. In
4468 + * nojournal mode this race is harmless and we actually cannot
4469 + * call ext4_handle_dirty_xattr_block() with locked buffer as
4470 + * that function can call sync_dirty_buffer() so for that case
4471 + * we handle the dirtying after unlocking the buffer.
4472 + */
4473 + if (ext4_handle_valid(handle))
4474 + error = ext4_handle_dirty_xattr_block(handle, inode,
4475 + bh);
4476 unlock_buffer(bh);
4477 - error = ext4_handle_dirty_xattr_block(handle, inode, bh);
4478 + if (!ext4_handle_valid(handle))
4479 + error = ext4_handle_dirty_xattr_block(handle, inode,
4480 + bh);
4481 if (IS_SYNC(inode))
4482 ext4_handle_sync(handle);
4483 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
4484 diff --git a/fs/namespace.c b/fs/namespace.c
4485 index 2ffc5a2905d4..65233a5f390a 100644
4486 --- a/fs/namespace.c
4487 +++ b/fs/namespace.c
4488 @@ -885,7 +885,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
4489 goto out_free;
4490 }
4491
4492 - mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
4493 + mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
4494 /* Don't allow unprivileged users to change mount flags */
4495 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
4496 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
4497 @@ -1661,9 +1661,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
4498 if (err)
4499 goto out;
4500 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
4501 + lock_mount_hash();
4502 if (err)
4503 goto out_cleanup_ids;
4504 - lock_mount_hash();
4505 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
4506 set_mnt_shared(p);
4507 } else {
4508 @@ -1690,6 +1690,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
4509 return 0;
4510
4511 out_cleanup_ids:
4512 + while (!hlist_empty(&tree_list)) {
4513 + child = hlist_entry(tree_list.first, struct mount, mnt_hash);
4514 + umount_tree(child, 0);
4515 + }
4516 + unlock_mount_hash();
4517 cleanup_group_ids(source_mnt, NULL);
4518 out:
4519 return err;
4520 @@ -2044,7 +2049,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
4521 struct mount *parent;
4522 int err;
4523
4524 - mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
4525 + mnt_flags &= ~MNT_INTERNAL_FLAGS;
4526
4527 mp = lock_mount(path);
4528 if (IS_ERR(mp))
4529 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4530 index 450bfedbe2f4..d5d06e868841 100644
4531 --- a/fs/nfs/nfs4proc.c
4532 +++ b/fs/nfs/nfs4proc.c
4533 @@ -1068,6 +1068,7 @@ static void nfs4_opendata_free(struct kref *kref)
4534 dput(p->dentry);
4535 nfs_sb_deactive(sb);
4536 nfs_fattr_free_names(&p->f_attr);
4537 + kfree(p->f_attr.mdsthreshold);
4538 kfree(p);
4539 }
4540
4541 @@ -2244,10 +2245,12 @@ static int _nfs4_do_open(struct inode *dir,
4542 }
4543 }
4544
4545 - if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
4546 - opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
4547 - if (!opendata->f_attr.mdsthreshold)
4548 - goto err_free_label;
4549 + if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
4550 + if (!opendata->f_attr.mdsthreshold) {
4551 + opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
4552 + if (!opendata->f_attr.mdsthreshold)
4553 + goto err_free_label;
4554 + }
4555 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
4556 }
4557 if (dentry->d_inode != NULL)
4558 @@ -2275,11 +2278,10 @@ static int _nfs4_do_open(struct inode *dir,
4559 if (opendata->file_created)
4560 *opened |= FILE_CREATED;
4561
4562 - if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
4563 + if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
4564 *ctx_th = opendata->f_attr.mdsthreshold;
4565 - else
4566 - kfree(opendata->f_attr.mdsthreshold);
4567 - opendata->f_attr.mdsthreshold = NULL;
4568 + opendata->f_attr.mdsthreshold = NULL;
4569 + }
4570
4571 nfs4_label_free(olabel);
4572
4573 @@ -2289,7 +2291,6 @@ static int _nfs4_do_open(struct inode *dir,
4574 err_free_label:
4575 nfs4_label_free(olabel);
4576 err_opendata_put:
4577 - kfree(opendata->f_attr.mdsthreshold);
4578 nfs4_opendata_put(opendata);
4579 err_put_state_owner:
4580 nfs4_put_state_owner(sp);
4581 diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
4582 index 7f05cd140de3..3eaa6e30a2dc 100644
4583 --- a/fs/nfsd/nfs4callback.c
4584 +++ b/fs/nfsd/nfs4callback.c
4585 @@ -637,9 +637,11 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
4586
4587 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
4588 {
4589 + int maxtime = max_cb_time(clp->net);
4590 struct rpc_timeout timeparms = {
4591 - .to_initval = max_cb_time(clp->net),
4592 + .to_initval = maxtime,
4593 .to_retries = 0,
4594 + .to_maxval = maxtime,
4595 };
4596 struct rpc_create_args args = {
4597 .net = clp->net,
4598 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
4599 index 82189b208af3..9a914e892bb1 100644
4600 --- a/fs/nfsd/nfs4proc.c
4601 +++ b/fs/nfsd/nfs4proc.c
4602 @@ -1359,6 +1359,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
4603 /* If op is non-idempotent */
4604 if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
4605 plen = opdesc->op_rsize_bop(rqstp, op);
4606 + /*
4607 + * If there's still another operation, make sure
4608 + * we'll have space to at least encode an error:
4609 + */
4610 + if (resp->opcnt < args->opcnt)
4611 + plen += COMPOUND_ERR_SLACK_SPACE;
4612 op->status = nfsd4_check_resp_size(resp, plen);
4613 }
4614
4615 @@ -1523,7 +1529,8 @@ static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *o
4616
4617 static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
4618 {
4619 - return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
4620 + return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
4621 + sizeof(__be32);
4622 }
4623
4624 static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
4625 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4626 index d5d070fbeb35..7415eac36501 100644
4627 --- a/fs/nfsd/nfs4state.c
4628 +++ b/fs/nfsd/nfs4state.c
4629 @@ -5062,7 +5062,6 @@ nfs4_state_destroy_net(struct net *net)
4630 int i;
4631 struct nfs4_client *clp = NULL;
4632 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4633 - struct rb_node *node, *tmp;
4634
4635 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4636 while (!list_empty(&nn->conf_id_hashtbl[i])) {
4637 @@ -5071,13 +5070,11 @@ nfs4_state_destroy_net(struct net *net)
4638 }
4639 }
4640
4641 - node = rb_first(&nn->unconf_name_tree);
4642 - while (node != NULL) {
4643 - tmp = node;
4644 - node = rb_next(tmp);
4645 - clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
4646 - rb_erase(tmp, &nn->unconf_name_tree);
4647 - destroy_client(clp);
4648 + for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4649 + while (!list_empty(&nn->unconf_id_hashtbl[i])) {
4650 + clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4651 + destroy_client(clp);
4652 + }
4653 }
4654
4655 kfree(nn->sessionid_hashtbl);
4656 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
4657 index 63f2395c57ed..16e8fa71eb84 100644
4658 --- a/fs/nfsd/nfs4xdr.c
4659 +++ b/fs/nfsd/nfs4xdr.c
4660 @@ -2483,6 +2483,8 @@ out_acl:
4661 goto out;
4662 }
4663 if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
4664 + if ((buflen -= 16) < 0)
4665 + goto out_resource;
4666 WRITE32(3);
4667 WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
4668 WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
4669 @@ -2499,8 +2501,10 @@ out:
4670 security_release_secctx(context, contextlen);
4671 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
4672 kfree(acl);
4673 - if (tempfh)
4674 + if (tempfh) {
4675 fh_put(tempfh);
4676 + kfree(tempfh);
4677 + }
4678 return status;
4679 out_nfserr:
4680 status = nfserrno(err);
4681 @@ -3471,6 +3475,9 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
4682 struct nfsd4_test_stateid_id *stateid, *next;
4683 __be32 *p;
4684
4685 + if (nfserr)
4686 + return nfserr;
4687 +
4688 RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
4689 *p++ = htonl(test_stateid->ts_num_ids);
4690
4691 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
4692 index 7f555179bf81..f34d9de802ab 100644
4693 --- a/fs/nfsd/nfsctl.c
4694 +++ b/fs/nfsd/nfsctl.c
4695 @@ -699,6 +699,11 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net)
4696 if (err != 0 || fd < 0)
4697 return -EINVAL;
4698
4699 + if (svc_alien_sock(net, fd)) {
4700 + printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__);
4701 + return -EINVAL;
4702 + }
4703 +
4704 err = nfsd_create_serv(net);
4705 if (err != 0)
4706 return err;
4707 diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
4708 index 30f34ab02137..479eb681c27c 100644
4709 --- a/fs/nfsd/nfsd.h
4710 +++ b/fs/nfsd/nfsd.h
4711 @@ -282,7 +282,7 @@ void nfsd_lockd_shutdown(void);
4712 * reason.
4713 */
4714 #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
4715 -#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
4716 +#define COMPOUND_ERR_SLACK_SPACE 16 /* OP_SETATTR */
4717
4718 #define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
4719
4720 diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
4721 index b17d93214d01..9c769a47ac5a 100644
4722 --- a/fs/nfsd/nfsxdr.c
4723 +++ b/fs/nfsd/nfsxdr.c
4724 @@ -152,7 +152,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
4725 type = (stat->mode & S_IFMT);
4726
4727 *p++ = htonl(nfs_ftypes[type >> 12]);
4728 - *p++ = htonl((u32) (stat->mode & S_IALLUGO));
4729 + *p++ = htonl((u32) stat->mode);
4730 *p++ = htonl((u32) stat->nlink);
4731 *p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
4732 *p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
4733 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
4734 index 6d7be3f80356..eea5ad188984 100644
4735 --- a/fs/nfsd/vfs.c
4736 +++ b/fs/nfsd/vfs.c
4737 @@ -404,6 +404,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
4738 umode_t ftype = 0;
4739 __be32 err;
4740 int host_err;
4741 + bool get_write_count;
4742 int size_change = 0;
4743
4744 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
4745 @@ -411,10 +412,18 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
4746 if (iap->ia_valid & ATTR_SIZE)
4747 ftype = S_IFREG;
4748
4749 + /* Callers that do fh_verify should do the fh_want_write: */
4750 + get_write_count = !fhp->fh_dentry;
4751 +
4752 /* Get inode */
4753 err = fh_verify(rqstp, fhp, ftype, accmode);
4754 if (err)
4755 goto out;
4756 + if (get_write_count) {
4757 + host_err = fh_want_write(fhp);
4758 + if (host_err)
4759 + return nfserrno(host_err);
4760 + }
4761
4762 dentry = fhp->fh_dentry;
4763 inode = dentry->d_inode;
4764 diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
4765 index 5b704c63a103..1edcb141f639 100644
4766 --- a/fs/ocfs2/buffer_head_io.c
4767 +++ b/fs/ocfs2/buffer_head_io.c
4768 @@ -90,7 +90,6 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
4769 * information for this bh as it's not marked locally
4770 * uptodate. */
4771 ret = -EIO;
4772 - put_bh(bh);
4773 mlog_errno(ret);
4774 }
4775
4776 @@ -420,7 +419,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
4777
4778 if (!buffer_uptodate(bh)) {
4779 ret = -EIO;
4780 - put_bh(bh);
4781 mlog_errno(ret);
4782 }
4783
4784 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
4785 index 7035af09cc03..fe29f7978f81 100644
4786 --- a/fs/ocfs2/dlm/dlmrecovery.c
4787 +++ b/fs/ocfs2/dlm/dlmrecovery.c
4788 @@ -537,7 +537,10 @@ master_here:
4789 /* success! see if any other nodes need recovery */
4790 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
4791 dlm->name, dlm->reco.dead_node, dlm->node_num);
4792 - dlm_reset_recovery(dlm);
4793 + spin_lock(&dlm->spinlock);
4794 + __dlm_reset_recovery(dlm);
4795 + dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
4796 + spin_unlock(&dlm->spinlock);
4797 }
4798 dlm_end_recovery(dlm);
4799
4800 @@ -695,6 +698,14 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
4801 if (all_nodes_done) {
4802 int ret;
4803
4804 + /* Set this flag on recovery master to avoid
4805 + * a new recovery for another dead node start
4806 + * before the recovery is not done. That may
4807 + * cause recovery hung.*/
4808 + spin_lock(&dlm->spinlock);
4809 + dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
4810 + spin_unlock(&dlm->spinlock);
4811 +
4812 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
4813 * just send a finalize message to everyone and
4814 * clean up */
4815 @@ -1750,13 +1761,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
4816 struct dlm_migratable_lockres *mres)
4817 {
4818 struct dlm_migratable_lock *ml;
4819 - struct list_head *queue;
4820 + struct list_head *queue, *iter;
4821 struct list_head *tmpq = NULL;
4822 struct dlm_lock *newlock = NULL;
4823 struct dlm_lockstatus *lksb = NULL;
4824 int ret = 0;
4825 int i, j, bad;
4826 - struct dlm_lock *lock = NULL;
4827 + struct dlm_lock *lock;
4828 u8 from = O2NM_MAX_NODES;
4829 unsigned int added = 0;
4830 __be64 c;
4831 @@ -1791,14 +1802,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
4832 /* MIGRATION ONLY! */
4833 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
4834
4835 + lock = NULL;
4836 spin_lock(&res->spinlock);
4837 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
4838 tmpq = dlm_list_idx_to_ptr(res, j);
4839 - list_for_each_entry(lock, tmpq, list) {
4840 - if (lock->ml.cookie != ml->cookie)
4841 - lock = NULL;
4842 - else
4843 + list_for_each(iter, tmpq) {
4844 + lock = list_entry(iter,
4845 + struct dlm_lock, list);
4846 + if (lock->ml.cookie == ml->cookie)
4847 break;
4848 + lock = NULL;
4849 }
4850 if (lock)
4851 break;
4852 @@ -2882,8 +2895,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
4853 BUG();
4854 }
4855 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
4856 + __dlm_reset_recovery(dlm);
4857 spin_unlock(&dlm->spinlock);
4858 - dlm_reset_recovery(dlm);
4859 dlm_kick_recovery_thread(dlm);
4860 break;
4861 default:
4862 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
4863 index 3683643f3f0e..feed025fe064 100644
4864 --- a/fs/ocfs2/namei.c
4865 +++ b/fs/ocfs2/namei.c
4866 @@ -450,7 +450,6 @@ leave:
4867
4868 brelse(new_fe_bh);
4869 brelse(parent_fe_bh);
4870 - kfree(si.name);
4871 kfree(si.value);
4872
4873 ocfs2_free_dir_lookup_result(&lookup);
4874 @@ -1855,7 +1854,6 @@ bail:
4875
4876 brelse(new_fe_bh);
4877 brelse(parent_fe_bh);
4878 - kfree(si.name);
4879 kfree(si.value);
4880 ocfs2_free_dir_lookup_result(&lookup);
4881 if (inode_ac)
4882 diff --git a/fs/pnode.c b/fs/pnode.c
4883 index 88396df725b4..a364a704333b 100644
4884 --- a/fs/pnode.c
4885 +++ b/fs/pnode.c
4886 @@ -164,46 +164,94 @@ static struct mount *propagation_next(struct mount *m,
4887 }
4888 }
4889
4890 -/*
4891 - * return the source mount to be used for cloning
4892 - *
4893 - * @dest the current destination mount
4894 - * @last_dest the last seen destination mount
4895 - * @last_src the last seen source mount
4896 - * @type return CL_SLAVE if the new mount has to be
4897 - * cloned as a slave.
4898 - */
4899 -static struct mount *get_source(struct mount *dest,
4900 - struct mount *last_dest,
4901 - struct mount *last_src,
4902 - int *type)
4903 +static struct mount *next_group(struct mount *m, struct mount *origin)
4904 {
4905 - struct mount *p_last_src = NULL;
4906 - struct mount *p_last_dest = NULL;
4907 -
4908 - while (last_dest != dest->mnt_master) {
4909 - p_last_dest = last_dest;
4910 - p_last_src = last_src;
4911 - last_dest = last_dest->mnt_master;
4912 - last_src = last_src->mnt_master;
4913 + while (1) {
4914 + while (1) {
4915 + struct mount *next;
4916 + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
4917 + return first_slave(m);
4918 + next = next_peer(m);
4919 + if (m->mnt_group_id == origin->mnt_group_id) {
4920 + if (next == origin)
4921 + return NULL;
4922 + } else if (m->mnt_slave.next != &next->mnt_slave)
4923 + break;
4924 + m = next;
4925 + }
4926 + /* m is the last peer */
4927 + while (1) {
4928 + struct mount *master = m->mnt_master;
4929 + if (m->mnt_slave.next != &master->mnt_slave_list)
4930 + return next_slave(m);
4931 + m = next_peer(master);
4932 + if (master->mnt_group_id == origin->mnt_group_id)
4933 + break;
4934 + if (master->mnt_slave.next == &m->mnt_slave)
4935 + break;
4936 + m = master;
4937 + }
4938 + if (m == origin)
4939 + return NULL;
4940 }
4941 +}
4942
4943 - if (p_last_dest) {
4944 - do {
4945 - p_last_dest = next_peer(p_last_dest);
4946 - } while (IS_MNT_NEW(p_last_dest));
4947 - /* is that a peer of the earlier? */
4948 - if (dest == p_last_dest) {
4949 - *type = CL_MAKE_SHARED;
4950 - return p_last_src;
4951 +/* all accesses are serialized by namespace_sem */
4952 +static struct user_namespace *user_ns;
4953 +static struct mount *last_dest, *last_source, *dest_master;
4954 +static struct mountpoint *mp;
4955 +static struct hlist_head *list;
4956 +
4957 +static int propagate_one(struct mount *m)
4958 +{
4959 + struct mount *child;
4960 + int type;
4961 + /* skip ones added by this propagate_mnt() */
4962 + if (IS_MNT_NEW(m))
4963 + return 0;
4964 + /* skip if mountpoint isn't covered by it */
4965 + if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
4966 + return 0;
4967 + if (m->mnt_group_id == last_dest->mnt_group_id) {
4968 + type = CL_MAKE_SHARED;
4969 + } else {
4970 + struct mount *n, *p;
4971 + for (n = m; ; n = p) {
4972 + p = n->mnt_master;
4973 + if (p == dest_master || IS_MNT_MARKED(p)) {
4974 + while (last_dest->mnt_master != p) {
4975 + last_source = last_source->mnt_master;
4976 + last_dest = last_source->mnt_parent;
4977 + }
4978 + if (n->mnt_group_id != last_dest->mnt_group_id) {
4979 + last_source = last_source->mnt_master;
4980 + last_dest = last_source->mnt_parent;
4981 + }
4982 + break;
4983 + }
4984 }
4985 + type = CL_SLAVE;
4986 + /* beginning of peer group among the slaves? */
4987 + if (IS_MNT_SHARED(m))
4988 + type |= CL_MAKE_SHARED;
4989 }
4990 - /* slave of the earlier, then */
4991 - *type = CL_SLAVE;
4992 - /* beginning of peer group among the slaves? */
4993 - if (IS_MNT_SHARED(dest))
4994 - *type |= CL_MAKE_SHARED;
4995 - return last_src;
4996 +
4997 + /* Notice when we are propagating across user namespaces */
4998 + if (m->mnt_ns->user_ns != user_ns)
4999 + type |= CL_UNPRIVILEGED;
5000 + child = copy_tree(last_source, last_source->mnt.mnt_root, type);
5001 + if (IS_ERR(child))
5002 + return PTR_ERR(child);
5003 + mnt_set_mountpoint(m, mp, child);
5004 + last_dest = m;
5005 + last_source = child;
5006 + if (m->mnt_master != dest_master) {
5007 + read_seqlock_excl(&mount_lock);
5008 + SET_MNT_MARK(m->mnt_master);
5009 + read_sequnlock_excl(&mount_lock);
5010 + }
5011 + hlist_add_head(&child->mnt_hash, list);
5012 + return 0;
5013 }
5014
5015 /*
5016 @@ -222,56 +270,48 @@ static struct mount *get_source(struct mount *dest,
5017 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
5018 struct mount *source_mnt, struct hlist_head *tree_list)
5019 {
5020 - struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
5021 - struct mount *m, *child;
5022 + struct mount *m, *n;
5023 int ret = 0;
5024 - struct mount *prev_dest_mnt = dest_mnt;
5025 - struct mount *prev_src_mnt = source_mnt;
5026 - HLIST_HEAD(tmp_list);
5027 -
5028 - for (m = propagation_next(dest_mnt, dest_mnt); m;
5029 - m = propagation_next(m, dest_mnt)) {
5030 - int type;
5031 - struct mount *source;
5032 -
5033 - if (IS_MNT_NEW(m))
5034 - continue;
5035 -
5036 - source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
5037 -
5038 - /* Notice when we are propagating across user namespaces */
5039 - if (m->mnt_ns->user_ns != user_ns)
5040 - type |= CL_UNPRIVILEGED;
5041 -
5042 - child = copy_tree(source, source->mnt.mnt_root, type);
5043 - if (IS_ERR(child)) {
5044 - ret = PTR_ERR(child);
5045 - tmp_list = *tree_list;
5046 - tmp_list.first->pprev = &tmp_list.first;
5047 - INIT_HLIST_HEAD(tree_list);
5048 +
5049 + /*
5050 + * we don't want to bother passing tons of arguments to
5051 + * propagate_one(); everything is serialized by namespace_sem,
5052 + * so globals will do just fine.
5053 + */
5054 + user_ns = current->nsproxy->mnt_ns->user_ns;
5055 + last_dest = dest_mnt;
5056 + last_source = source_mnt;
5057 + mp = dest_mp;
5058 + list = tree_list;
5059 + dest_master = dest_mnt->mnt_master;
5060 +
5061 + /* all peers of dest_mnt, except dest_mnt itself */
5062 + for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
5063 + ret = propagate_one(n);
5064 + if (ret)
5065 goto out;
5066 - }
5067 + }
5068
5069 - if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
5070 - mnt_set_mountpoint(m, dest_mp, child);
5071 - hlist_add_head(&child->mnt_hash, tree_list);
5072 - } else {
5073 - /*
5074 - * This can happen if the parent mount was bind mounted
5075 - * on some subdirectory of a shared/slave mount.
5076 - */
5077 - hlist_add_head(&child->mnt_hash, &tmp_list);
5078 - }
5079 - prev_dest_mnt = m;
5080 - prev_src_mnt = child;
5081 + /* all slave groups */
5082 + for (m = next_group(dest_mnt, dest_mnt); m;
5083 + m = next_group(m, dest_mnt)) {
5084 + /* everything in that slave group */
5085 + n = m;
5086 + do {
5087 + ret = propagate_one(n);
5088 + if (ret)
5089 + goto out;
5090 + n = next_peer(n);
5091 + } while (n != m);
5092 }
5093 out:
5094 - lock_mount_hash();
5095 - while (!hlist_empty(&tmp_list)) {
5096 - child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
5097 - umount_tree(child, 0);
5098 + read_seqlock_excl(&mount_lock);
5099 + hlist_for_each_entry(n, tree_list, mnt_hash) {
5100 + m = n->mnt_parent;
5101 + if (m->mnt_master != dest_mnt->mnt_master)
5102 + CLEAR_MNT_MARK(m->mnt_master);
5103 }
5104 - unlock_mount_hash();
5105 + read_sequnlock_excl(&mount_lock);
5106 return ret;
5107 }
5108
5109 diff --git a/fs/pnode.h b/fs/pnode.h
5110 index fc28a27fa892..4a246358b031 100644
5111 --- a/fs/pnode.h
5112 +++ b/fs/pnode.h
5113 @@ -16,6 +16,9 @@
5114 #define IS_MNT_NEW(m) (!(m)->mnt_ns)
5115 #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
5116 #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
5117 +#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
5118 +#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
5119 +#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
5120
5121 #define CL_EXPIRE 0x01
5122 #define CL_SLAVE 0x02
5123 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
5124 index 1fd2051109a3..af677353a3f5 100644
5125 --- a/fs/reiserfs/dir.c
5126 +++ b/fs/reiserfs/dir.c
5127 @@ -125,6 +125,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
5128 int d_reclen;
5129 char *d_name;
5130 ino_t d_ino;
5131 + loff_t cur_pos = deh_offset(deh);
5132
5133 if (!de_visible(deh))
5134 /* it is hidden entry */
5135 @@ -196,8 +197,9 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
5136 if (local_buf != small_buf) {
5137 kfree(local_buf);
5138 }
5139 - // next entry should be looked for with such offset
5140 - next_pos = deh_offset(deh) + 1;
5141 +
5142 + /* deh_offset(deh) may be invalid now. */
5143 + next_pos = cur_pos + 1;
5144
5145 if (item_moved(&tmp_ih, &path_to_entry)) {
5146 set_cpu_key_k_offset(&pos_key,
5147 diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
5148 index d3909effd725..d96deb443f18 100644
5149 --- a/include/asm-generic/word-at-a-time.h
5150 +++ b/include/asm-generic/word-at-a-time.h
5151 @@ -50,11 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
5152 }
5153
5154 #ifndef zero_bytemask
5155 -#ifdef CONFIG_64BIT
5156 -#define zero_bytemask(mask) (~0ul << fls64(mask))
5157 -#else
5158 -#define zero_bytemask(mask) (~0ul << fls(mask))
5159 -#endif /* CONFIG_64BIT */
5160 -#endif /* zero_bytemask */
5161 +#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
5162 +#endif
5163
5164 #endif /* _ASM_WORD_AT_A_TIME_H */
5165 diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
5166 index 8c1603b10665..433528ab5161 100644
5167 --- a/include/dt-bindings/clock/tegra124-car.h
5168 +++ b/include/dt-bindings/clock/tegra124-car.h
5169 @@ -29,7 +29,7 @@
5170 /* 10 (register bit affects spdif_in and spdif_out) */
5171 #define TEGRA124_CLK_I2S1 11
5172 #define TEGRA124_CLK_I2C1 12
5173 -#define TEGRA124_CLK_NDFLASH 13
5174 +/* 13 */
5175 #define TEGRA124_CLK_SDMMC1 14
5176 #define TEGRA124_CLK_SDMMC4 15
5177 /* 16 */
5178 @@ -83,7 +83,7 @@
5179
5180 /* 64 */
5181 #define TEGRA124_CLK_UARTD 65
5182 -#define TEGRA124_CLK_UARTE 66
5183 +/* 66 */
5184 #define TEGRA124_CLK_I2C3 67
5185 #define TEGRA124_CLK_SBC4 68
5186 #define TEGRA124_CLK_SDMMC3 69
5187 @@ -97,7 +97,7 @@
5188 #define TEGRA124_CLK_TRACE 77
5189 #define TEGRA124_CLK_SOC_THERM 78
5190 #define TEGRA124_CLK_DTV 79
5191 -#define TEGRA124_CLK_NDSPEED 80
5192 +/* 80 */
5193 #define TEGRA124_CLK_I2CSLOW 81
5194 #define TEGRA124_CLK_DSIB 82
5195 #define TEGRA124_CLK_TSEC 83
5196 diff --git a/include/linux/bio.h b/include/linux/bio.h
5197 index 5a4d39b4686b..5aa372a7380c 100644
5198 --- a/include/linux/bio.h
5199 +++ b/include/linux/bio.h
5200 @@ -216,9 +216,9 @@ static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
5201 }
5202
5203 #define for_each_bvec(bvl, bio_vec, iter, start) \
5204 - for ((iter) = start; \
5205 - (bvl) = bvec_iter_bvec((bio_vec), (iter)), \
5206 - (iter).bi_size; \
5207 + for (iter = (start); \
5208 + (iter).bi_size && \
5209 + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
5210 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
5211
5212
5213 diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
5214 index 344883dce584..6088058a3e00 100644
5215 --- a/include/linux/hyperv.h
5216 +++ b/include/linux/hyperv.h
5217 @@ -464,15 +464,17 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
5218 * 0 . 13 (Windows Server 2008)
5219 * 1 . 1 (Windows 7)
5220 * 2 . 4 (Windows 8)
5221 + * 3 . 0 (Windows 8 R2)
5222 */
5223
5224 #define VERSION_WS2008 ((0 << 16) | (13))
5225 #define VERSION_WIN7 ((1 << 16) | (1))
5226 #define VERSION_WIN8 ((2 << 16) | (4))
5227 +#define VERSION_WIN8_1 ((3 << 16) | (0))
5228
5229 #define VERSION_INVAL -1
5230
5231 -#define VERSION_CURRENT VERSION_WIN8
5232 +#define VERSION_CURRENT VERSION_WIN8_1
5233
5234 /* Make maximum size of pipe payload of 16K */
5235 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
5236 diff --git a/include/linux/mount.h b/include/linux/mount.h
5237 index 371d346fa270..839bac270904 100644
5238 --- a/include/linux/mount.h
5239 +++ b/include/linux/mount.h
5240 @@ -44,6 +44,8 @@ struct mnt_namespace;
5241 #define MNT_SHARED_MASK (MNT_UNBINDABLE)
5242 #define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
5243
5244 +#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
5245 + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
5246
5247 #define MNT_INTERNAL 0x4000
5248
5249 @@ -51,6 +53,7 @@ struct mnt_namespace;
5250 #define MNT_LOCKED 0x800000
5251 #define MNT_DOOMED 0x1000000
5252 #define MNT_SYNC_UMOUNT 0x2000000
5253 +#define MNT_MARKED 0x4000000
5254
5255 struct vfsmount {
5256 struct dentry *mnt_root; /* root of the mounted tree */
5257 diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
5258 index 62fd1b756e99..947009ed5996 100644
5259 --- a/include/linux/sunrpc/svcsock.h
5260 +++ b/include/linux/sunrpc/svcsock.h
5261 @@ -56,6 +56,7 @@ int svc_recv(struct svc_rqst *, long);
5262 int svc_send(struct svc_rqst *);
5263 void svc_drop(struct svc_rqst *);
5264 void svc_sock_update_bufs(struct svc_serv *serv);
5265 +bool svc_alien_sock(struct net *net, int fd);
5266 int svc_addsock(struct svc_serv *serv, const int fd,
5267 char *name_return, const size_t len);
5268 void svc_init_xprt_sock(void);
5269 diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
5270 index f29e3a27c2cc..0e3ff30647d5 100644
5271 --- a/include/rdma/ib_cm.h
5272 +++ b/include/rdma/ib_cm.h
5273 @@ -601,5 +601,4 @@ struct ib_cm_sidr_rep_param {
5274 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
5275 struct ib_cm_sidr_rep_param *param);
5276
5277 -int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac);
5278 #endif /* IB_CM_H */
5279 diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
5280 index 335e8a7cad39..c140620dad92 100644
5281 --- a/include/uapi/linux/libc-compat.h
5282 +++ b/include/uapi/linux/libc-compat.h
5283 @@ -85,6 +85,12 @@
5284
5285 #endif /* _NETINET_IN_H */
5286
5287 +/* Definitions for xattr.h */
5288 +#if defined(_SYS_XATTR_H)
5289 +#define __UAPI_DEF_XATTR 0
5290 +#else
5291 +#define __UAPI_DEF_XATTR 1
5292 +#endif
5293
5294 /* If we did not see any headers from any supported C libraries,
5295 * or we are being included in the kernel, then define everything
5296 @@ -98,6 +104,9 @@
5297 #define __UAPI_DEF_IPV6_MREQ 1
5298 #define __UAPI_DEF_IPPROTO_V6 1
5299
5300 +/* Definitions for xattr.h */
5301 +#define __UAPI_DEF_XATTR 1
5302 +
5303 #endif /* __GLIBC__ */
5304
5305 #endif /* _UAPI_LIBC_COMPAT_H */
5306 diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
5307 index 6ae7bbe988cc..fe94bb90582e 100644
5308 --- a/include/uapi/linux/videodev2.h
5309 +++ b/include/uapi/linux/videodev2.h
5310 @@ -1059,14 +1059,14 @@ struct v4l2_bt_timings {
5311
5312 /* A few useful defines to calculate the total blanking and frame sizes */
5313 #define V4L2_DV_BT_BLANKING_WIDTH(bt) \
5314 - (bt->hfrontporch + bt->hsync + bt->hbackporch)
5315 + ((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch)
5316 #define V4L2_DV_BT_FRAME_WIDTH(bt) \
5317 - (bt->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
5318 + ((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
5319 #define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
5320 - (bt->vfrontporch + bt->vsync + bt->vbackporch + \
5321 - bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch)
5322 + ((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
5323 + (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
5324 #define V4L2_DV_BT_FRAME_HEIGHT(bt) \
5325 - (bt->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
5326 + ((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
5327
5328 /** struct v4l2_dv_timings - DV timings
5329 * @type: the type of the timings
5330 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
5331 index 40bbc04b6f81..c38355c1f3c9 100644
5332 --- a/include/uapi/linux/xattr.h
5333 +++ b/include/uapi/linux/xattr.h
5334 @@ -7,11 +7,18 @@
5335 Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
5336 Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
5337 */
5338 +
5339 +#include <linux/libc-compat.h>
5340 +
5341 #ifndef _UAPI_LINUX_XATTR_H
5342 #define _UAPI_LINUX_XATTR_H
5343
5344 +#ifdef __UAPI_DEF_XATTR
5345 +#define __USE_KERNEL_XATTR_DEFS
5346 +
5347 #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
5348 #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
5349 +#endif
5350
5351 /* Namespaces */
5352 #define XATTR_OS2_PREFIX "os2."
5353 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
5354 index 49e13e1f8fe6..aae21e842918 100644
5355 --- a/kernel/sysctl.c
5356 +++ b/kernel/sysctl.c
5357 @@ -144,6 +144,11 @@ static int min_percpu_pagelist_fract = 8;
5358 static int ngroups_max = NGROUPS_MAX;
5359 static const int cap_last_cap = CAP_LAST_CAP;
5360
5361 +/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
5362 +#ifdef CONFIG_DETECT_HUNG_TASK
5363 +static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
5364 +#endif
5365 +
5366 #ifdef CONFIG_INOTIFY_USER
5367 #include <linux/inotify.h>
5368 #endif
5369 @@ -995,6 +1000,7 @@ static struct ctl_table kern_table[] = {
5370 .maxlen = sizeof(unsigned long),
5371 .mode = 0644,
5372 .proc_handler = proc_dohung_task_timeout_secs,
5373 + .extra2 = &hung_task_timeout_max,
5374 },
5375 {
5376 .procname = "hung_task_warnings",
5377 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5378 index c01cb9fedb18..2de3c845f03a 100644
5379 --- a/mm/hugetlb.c
5380 +++ b/mm/hugetlb.c
5381 @@ -1509,6 +1509,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
5382 while (min_count < persistent_huge_pages(h)) {
5383 if (!free_pool_huge_page(h, nodes_allowed, 0))
5384 break;
5385 + cond_resched_lock(&hugetlb_lock);
5386 }
5387 while (count < persistent_huge_pages(h)) {
5388 if (!adjust_pool_surplus(h, nodes_allowed, 1))
5389 diff --git a/mm/internal.h b/mm/internal.h
5390 index 29e1e761f9eb..3e910000fda4 100644
5391 --- a/mm/internal.h
5392 +++ b/mm/internal.h
5393 @@ -370,5 +370,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
5394 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
5395 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
5396 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
5397 +#define ALLOC_FAIR 0x100 /* fair zone allocation */
5398
5399 #endif /* __MM_INTERNAL_H */
5400 diff --git a/mm/mlock.c b/mm/mlock.c
5401 index 4e1a68162285..b1eb53634005 100644
5402 --- a/mm/mlock.c
5403 +++ b/mm/mlock.c
5404 @@ -79,6 +79,7 @@ void clear_page_mlock(struct page *page)
5405 */
5406 void mlock_vma_page(struct page *page)
5407 {
5408 + /* Serialize with page migration */
5409 BUG_ON(!PageLocked(page));
5410
5411 if (!TestSetPageMlocked(page)) {
5412 @@ -174,6 +175,7 @@ unsigned int munlock_vma_page(struct page *page)
5413 unsigned int nr_pages;
5414 struct zone *zone = page_zone(page);
5415
5416 + /* For try_to_munlock() and to serialize with page migration */
5417 BUG_ON(!PageLocked(page));
5418
5419 /*
5420 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5421 index 3bac76ae4b30..7387a671234e 100644
5422 --- a/mm/page_alloc.c
5423 +++ b/mm/page_alloc.c
5424 @@ -1238,15 +1238,6 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
5425 }
5426 local_irq_restore(flags);
5427 }
5428 -static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5429 -{
5430 - return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
5431 -}
5432 -#else
5433 -static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5434 -{
5435 - return false;
5436 -}
5437 #endif
5438
5439 /*
5440 @@ -1583,12 +1574,7 @@ again:
5441 get_pageblock_migratetype(page));
5442 }
5443
5444 - /*
5445 - * NOTE: GFP_THISNODE allocations do not partake in the kswapd
5446 - * aging protocol, so they can't be fair.
5447 - */
5448 - if (!gfp_thisnode_allocation(gfp_flags))
5449 - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5450 + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5451
5452 __count_zone_vm_events(PGALLOC, zone, 1 << order);
5453 zone_statistics(preferred_zone, zone, gfp_flags);
5454 @@ -1954,23 +1940,12 @@ zonelist_scan:
5455 * zone size to ensure fair page aging. The zone a
5456 * page was allocated in should have no effect on the
5457 * time the page has in memory before being reclaimed.
5458 - *
5459 - * Try to stay in local zones in the fastpath. If
5460 - * that fails, the slowpath is entered, which will do
5461 - * another pass starting with the local zones, but
5462 - * ultimately fall back to remote zones that do not
5463 - * partake in the fairness round-robin cycle of this
5464 - * zonelist.
5465 - *
5466 - * NOTE: GFP_THISNODE allocations do not partake in
5467 - * the kswapd aging protocol, so they can't be fair.
5468 */
5469 - if ((alloc_flags & ALLOC_WMARK_LOW) &&
5470 - !gfp_thisnode_allocation(gfp_mask)) {
5471 - if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
5472 - continue;
5473 + if (alloc_flags & ALLOC_FAIR) {
5474 if (!zone_local(preferred_zone, zone))
5475 continue;
5476 + if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
5477 + continue;
5478 }
5479 /*
5480 * When allocating a page cache page for writing, we
5481 @@ -2408,32 +2383,40 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
5482 return page;
5483 }
5484
5485 -static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
5486 - struct zonelist *zonelist,
5487 - enum zone_type high_zoneidx,
5488 - struct zone *preferred_zone)
5489 +static void reset_alloc_batches(struct zonelist *zonelist,
5490 + enum zone_type high_zoneidx,
5491 + struct zone *preferred_zone)
5492 {
5493 struct zoneref *z;
5494 struct zone *zone;
5495
5496 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
5497 - if (!(gfp_mask & __GFP_NO_KSWAPD))
5498 - wakeup_kswapd(zone, order, zone_idx(preferred_zone));
5499 /*
5500 * Only reset the batches of zones that were actually
5501 - * considered in the fast path, we don't want to
5502 - * thrash fairness information for zones that are not
5503 + * considered in the fairness pass, we don't want to
5504 + * trash fairness information for zones that are not
5505 * actually part of this zonelist's round-robin cycle.
5506 */
5507 if (!zone_local(preferred_zone, zone))
5508 continue;
5509 mod_zone_page_state(zone, NR_ALLOC_BATCH,
5510 - high_wmark_pages(zone) -
5511 - low_wmark_pages(zone) -
5512 - zone_page_state(zone, NR_ALLOC_BATCH));
5513 + high_wmark_pages(zone) - low_wmark_pages(zone) -
5514 + atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5515 }
5516 }
5517
5518 +static void wake_all_kswapds(unsigned int order,
5519 + struct zonelist *zonelist,
5520 + enum zone_type high_zoneidx,
5521 + struct zone *preferred_zone)
5522 +{
5523 + struct zoneref *z;
5524 + struct zone *zone;
5525 +
5526 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
5527 + wakeup_kswapd(zone, order, zone_idx(preferred_zone));
5528 +}
5529 +
5530 static inline int
5531 gfp_to_alloc_flags(gfp_t gfp_mask)
5532 {
5533 @@ -2522,12 +2505,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5534 * allowed per node queues are empty and that nodes are
5535 * over allocated.
5536 */
5537 - if (gfp_thisnode_allocation(gfp_mask))
5538 + if (IS_ENABLED(CONFIG_NUMA) &&
5539 + (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
5540 goto nopage;
5541
5542 restart:
5543 - prepare_slowpath(gfp_mask, order, zonelist,
5544 - high_zoneidx, preferred_zone);
5545 + if (!(gfp_mask & __GFP_NO_KSWAPD))
5546 + wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone);
5547
5548 /*
5549 * OK, we're below the kswapd watermark and have kicked background
5550 @@ -2711,7 +2695,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
5551 struct page *page = NULL;
5552 int migratetype = allocflags_to_migratetype(gfp_mask);
5553 unsigned int cpuset_mems_cookie;
5554 - int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
5555 + int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
5556 struct mem_cgroup *memcg = NULL;
5557
5558 gfp_mask &= gfp_allowed_mask;
5559 @@ -2752,12 +2736,29 @@ retry_cpuset:
5560 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
5561 alloc_flags |= ALLOC_CMA;
5562 #endif
5563 +retry:
5564 /* First allocation attempt */
5565 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5566 zonelist, high_zoneidx, alloc_flags,
5567 preferred_zone, migratetype);
5568 if (unlikely(!page)) {
5569 /*
5570 + * The first pass makes sure allocations are spread
5571 + * fairly within the local node. However, the local
5572 + * node might have free pages left after the fairness
5573 + * batches are exhausted, and remote zones haven't
5574 + * even been considered yet. Try once more without
5575 + * fairness, and include remote zones now, before
5576 + * entering the slowpath and waking kswapd: prefer
5577 + * spilling to a remote zone over swapping locally.
5578 + */
5579 + if (alloc_flags & ALLOC_FAIR) {
5580 + reset_alloc_batches(zonelist, high_zoneidx,
5581 + preferred_zone);
5582 + alloc_flags &= ~ALLOC_FAIR;
5583 + goto retry;
5584 + }
5585 + /*
5586 * Runtime PM, block IO and its error handling path
5587 * can deadlock because I/O on the device might not
5588 * complete.
5589 diff --git a/mm/rmap.c b/mm/rmap.c
5590 index 8fc049f9a5a6..d3cbac508c2f 100644
5591 --- a/mm/rmap.c
5592 +++ b/mm/rmap.c
5593 @@ -1322,9 +1322,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
5594 BUG_ON(!page || PageAnon(page));
5595
5596 if (locked_vma) {
5597 - mlock_vma_page(page); /* no-op if already mlocked */
5598 - if (page == check_page)
5599 + if (page == check_page) {
5600 + /* we know we have check_page locked */
5601 + mlock_vma_page(page);
5602 ret = SWAP_MLOCK;
5603 + } else if (trylock_page(page)) {
5604 + /*
5605 + * If we can lock the page, perform mlock.
5606 + * Otherwise leave the page alone, it will be
5607 + * eventually encountered again later.
5608 + */
5609 + mlock_vma_page(page);
5610 + unlock_page(page);
5611 + }
5612 continue; /* don't unmap */
5613 }
5614
5615 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5616 index b6e59f0a9475..d06cb8752dcd 100644
5617 --- a/net/sunrpc/svcsock.c
5618 +++ b/net/sunrpc/svcsock.c
5619 @@ -1397,6 +1397,22 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
5620 return svsk;
5621 }
5622
5623 +bool svc_alien_sock(struct net *net, int fd)
5624 +{
5625 + int err;
5626 + struct socket *sock = sockfd_lookup(fd, &err);
5627 + bool ret = false;
5628 +
5629 + if (!sock)
5630 + goto out;
5631 + if (sock_net(sock->sk) != net)
5632 + ret = true;
5633 + sockfd_put(sock);
5634 +out:
5635 + return ret;
5636 +}
5637 +EXPORT_SYMBOL_GPL(svc_alien_sock);
5638 +
5639 /**
5640 * svc_addsock - add a listener socket to an RPC service
5641 * @serv: pointer to RPC service to which to add a new listener
5642 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5643 index 8d0a84436674..41106813a9aa 100644
5644 --- a/sound/pci/hda/patch_realtek.c
5645 +++ b/sound/pci/hda/patch_realtek.c
5646 @@ -993,6 +993,7 @@ static int alc_codec_rename_from_preset(struct hda_codec *codec)
5647
5648 static const struct snd_pci_quirk beep_white_list[] = {
5649 SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
5650 + SND_PCI_QUIRK(0x1043, 0x115d, "ASUS", 1),
5651 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
5652 SND_PCI_QUIRK(0x1043, 0x8376, "EeePC", 1),
5653 SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
5654 @@ -2786,6 +2787,89 @@ static void alc269_shutup(struct hda_codec *codec)
5655 snd_hda_shutup_pins(codec);
5656 }
5657
5658 +static void alc283_restore_default_value(struct hda_codec *codec)
5659 +{
5660 + int val;
5661 +
5662 + /* Power Down Control */
5663 + alc_write_coef_idx(codec, 0x03, 0x0002);
5664 + /* FIFO and filter clock */
5665 + alc_write_coef_idx(codec, 0x05, 0x0700);
5666 + /* DMIC control */
5667 + alc_write_coef_idx(codec, 0x07, 0x0200);
5668 + /* Analog clock */
5669 + val = alc_read_coef_idx(codec, 0x06);
5670 + alc_write_coef_idx(codec, 0x06, (val & ~0x00f0) | 0x0);
5671 + /* JD */
5672 + val = alc_read_coef_idx(codec, 0x08);
5673 + alc_write_coef_idx(codec, 0x08, (val & ~0xfffc) | 0x0c2c);
5674 + /* JD offset1 */
5675 + alc_write_coef_idx(codec, 0x0a, 0xcccc);
5676 + /* JD offset2 */
5677 + alc_write_coef_idx(codec, 0x0b, 0xcccc);
5678 + /* LDO1/2/3, DAC/ADC */
5679 + alc_write_coef_idx(codec, 0x0e, 0x6fc0);
5680 + /* JD */
5681 + val = alc_read_coef_idx(codec, 0x0f);
5682 + alc_write_coef_idx(codec, 0x0f, (val & ~0xf800) | 0x1000);
5683 + /* Capless */
5684 + val = alc_read_coef_idx(codec, 0x10);
5685 + alc_write_coef_idx(codec, 0x10, (val & ~0xfc00) | 0x0c00);
5686 + /* Class D test 4 */
5687 + alc_write_coef_idx(codec, 0x3a, 0x0);
5688 + /* IO power down directly */
5689 + val = alc_read_coef_idx(codec, 0x0c);
5690 + alc_write_coef_idx(codec, 0x0c, (val & ~0xfe00) | 0x0);
5691 + /* ANC */
5692 + alc_write_coef_idx(codec, 0x22, 0xa0c0);
5693 + /* AGC MUX */
5694 + val = alc_read_coefex_idx(codec, 0x53, 0x01);
5695 + alc_write_coefex_idx(codec, 0x53, 0x01, (val & ~0x000f) | 0x0008);
5696 + /* DAC simple content protection */
5697 + val = alc_read_coef_idx(codec, 0x1d);
5698 + alc_write_coef_idx(codec, 0x1d, (val & ~0x00e0) | 0x0);
5699 + /* ADC simple content protection */
5700 + val = alc_read_coef_idx(codec, 0x1f);
5701 + alc_write_coef_idx(codec, 0x1f, (val & ~0x00e0) | 0x0);
5702 + /* DAC ADC Zero Detection */
5703 + alc_write_coef_idx(codec, 0x21, 0x8804);
5704 + /* PLL */
5705 + alc_write_coef_idx(codec, 0x2e, 0x2902);
5706 + /* capless control 2 */
5707 + alc_write_coef_idx(codec, 0x33, 0xa080);
5708 + /* capless control 3 */
5709 + alc_write_coef_idx(codec, 0x34, 0x3400);
5710 + /* capless control 4 */
5711 + alc_write_coef_idx(codec, 0x35, 0x2f3e);
5712 + /* capless control 5 */
5713 + alc_write_coef_idx(codec, 0x36, 0x0);
5714 + /* class D test 2 */
5715 + val = alc_read_coef_idx(codec, 0x38);
5716 + alc_write_coef_idx(codec, 0x38, (val & ~0x0fff) | 0x0900);
5717 + /* class D test 3 */
5718 + alc_write_coef_idx(codec, 0x39, 0x110a);
5719 + /* class D test 5 */
5720 + val = alc_read_coef_idx(codec, 0x3b);
5721 + alc_write_coef_idx(codec, 0x3b, (val & ~0x00f8) | 0x00d8);
5722 + /* class D test 6 */
5723 + alc_write_coef_idx(codec, 0x3c, 0x0014);
5724 + /* classD OCP */
5725 + alc_write_coef_idx(codec, 0x3d, 0xc2ba);
5726 + /* classD pure DC test */
5727 + val = alc_read_coef_idx(codec, 0x42);
5728 + alc_write_coef_idx(codec, 0x42, (val & ~0x0f80) | 0x0);
5729 + /* test mode */
5730 + alc_write_coef_idx(codec, 0x49, 0x0);
5731 + /* Class D DC enable */
5732 + val = alc_read_coef_idx(codec, 0x40);
5733 + alc_write_coef_idx(codec, 0x40, (val & ~0xf800) | 0x9800);
5734 + /* DC offset */
5735 + val = alc_read_coef_idx(codec, 0x42);
5736 + alc_write_coef_idx(codec, 0x42, (val & ~0xf000) | 0x2000);
5737 + /* Class D amp control */
5738 + alc_write_coef_idx(codec, 0x37, 0xfc06);
5739 +}
5740 +
5741 static void alc283_init(struct hda_codec *codec)
5742 {
5743 struct alc_spec *spec = codec->spec;
5744 @@ -2793,6 +2877,8 @@ static void alc283_init(struct hda_codec *codec)
5745 bool hp_pin_sense;
5746 int val;
5747
5748 + alc283_restore_default_value(codec);
5749 +
5750 if (!hp_pin)
5751 return;
5752 hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
5753 @@ -3126,8 +3212,9 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
5754
5755 if (spec->mute_led_polarity)
5756 enabled = !enabled;
5757 - pinval = AC_PINCTL_IN_EN |
5758 - (enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80);
5759 + pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
5760 + pinval &= ~AC_PINCTL_VREFEN;
5761 + pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
5762 if (spec->mute_led_nid)
5763 snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
5764 }
5765 @@ -4319,6 +4406,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5766 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
5767 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5768 SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5769 + SND_PCI_QUIRK(0x1028, 0x062c, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5770 + SND_PCI_QUIRK(0x1028, 0x062e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5771 + SND_PCI_QUIRK(0x1028, 0x0632, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5772 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
5773 SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5774 SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5775 @@ -4618,6 +4708,7 @@ static int patch_alc269(struct hda_codec *codec)
5776 spec->codec_variant = ALC269_TYPE_ALC285;
5777 break;
5778 case 0x10ec0286:
5779 + case 0x10ec0288:
5780 spec->codec_variant = ALC269_TYPE_ALC286;
5781 break;
5782 case 0x10ec0255:
5783 @@ -5184,6 +5275,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5784 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5785 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
5786 SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE),
5787 + SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5788 + SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5789 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
5790 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
5791 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
5792 @@ -5424,6 +5517,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
5793 { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
5794 { .id = 0x10ec0285, .name = "ALC285", .patch = patch_alc269 },
5795 { .id = 0x10ec0286, .name = "ALC286", .patch = patch_alc269 },
5796 + { .id = 0x10ec0288, .name = "ALC288", .patch = patch_alc269 },
5797 { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
5798 { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
5799 { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 },
5800 diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
5801 index 28ec872e54c0..b6e278fe8f67 100644
5802 --- a/sound/pci/ice1712/ice1712.c
5803 +++ b/sound/pci/ice1712/ice1712.c
5804 @@ -685,9 +685,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pointer(struct snd_pcm_substream *
5805 if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
5806 return 0;
5807 ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
5808 + ptr = bytes_to_frames(substream->runtime, ptr);
5809 if (ptr == runtime->buffer_size)
5810 ptr = 0;
5811 - return bytes_to_frames(substream->runtime, ptr);
5812 + return ptr;
5813 }
5814
5815 static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
5816 @@ -704,9 +705,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substrea
5817 addr = ICE1712_DSC_ADDR0;
5818 ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
5819 ice->playback_con_virt_addr[substream->number];
5820 + ptr = bytes_to_frames(substream->runtime, ptr);
5821 if (ptr == substream->runtime->buffer_size)
5822 ptr = 0;
5823 - return bytes_to_frames(substream->runtime, ptr);
5824 + return ptr;
5825 }
5826
5827 static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
5828 @@ -717,9 +719,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *s
5829 if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
5830 return 0;
5831 ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
5832 + ptr = bytes_to_frames(substream->runtime, ptr);
5833 if (ptr == substream->runtime->buffer_size)
5834 ptr = 0;
5835 - return bytes_to_frames(substream->runtime, ptr);
5836 + return ptr;
5837 }
5838
5839 static const struct snd_pcm_hardware snd_ice1712_playback = {
5840 @@ -1113,9 +1116,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pro_pointer(struct snd_pcm_substre
5841 if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
5842 return 0;
5843 ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
5844 + ptr = bytes_to_frames(substream->runtime, ptr);
5845 if (ptr == substream->runtime->buffer_size)
5846 ptr = 0;
5847 - return bytes_to_frames(substream->runtime, ptr);
5848 + return ptr;
5849 }
5850
5851 static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
5852 @@ -1126,9 +1130,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substrea
5853 if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
5854 return 0;
5855 ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
5856 + ptr = bytes_to_frames(substream->runtime, ptr);
5857 if (ptr == substream->runtime->buffer_size)
5858 ptr = 0;
5859 - return bytes_to_frames(substream->runtime, ptr);
5860 + return ptr;
5861 }
5862
5863 static const struct snd_pcm_hardware snd_ice1712_playback_pro = {
5864 diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
5865 index 6e9ea8379a91..7a272fa90b39 100644
5866 --- a/sound/soc/codecs/cs42l51.c
5867 +++ b/sound/soc/codecs/cs42l51.c
5868 @@ -124,9 +124,8 @@ static int cs42l51_set_chan_mix(struct snd_kcontrol *kcontrol,
5869
5870 static const DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -5150, 50, 0);
5871 static const DECLARE_TLV_DB_SCALE(tone_tlv, -1050, 150, 0);
5872 -/* This is a lie. after -102 db, it stays at -102 */
5873 -/* maybe a range would be better */
5874 -static const DECLARE_TLV_DB_SCALE(aout_tlv, -11550, 50, 0);
5875 +
5876 +static const DECLARE_TLV_DB_SCALE(aout_tlv, -10200, 50, 0);
5877
5878 static const DECLARE_TLV_DB_SCALE(boost_tlv, 1600, 1600, 0);
5879 static const char *chan_mix[] = {
5880 @@ -141,7 +140,7 @@ static const struct soc_enum cs42l51_chan_mix =
5881 static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
5882 SOC_DOUBLE_R_SX_TLV("PCM Playback Volume",
5883 CS42L51_PCMA_VOL, CS42L51_PCMB_VOL,
5884 - 6, 0x19, 0x7F, adc_pcm_tlv),
5885 + 0, 0x19, 0x7F, adc_pcm_tlv),
5886 SOC_DOUBLE_R("PCM Playback Switch",
5887 CS42L51_PCMA_VOL, CS42L51_PCMB_VOL, 7, 1, 1),
5888 SOC_DOUBLE_R_SX_TLV("Analog Playback Volume",
5889 @@ -149,7 +148,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
5890 0, 0x34, 0xE4, aout_tlv),
5891 SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume",
5892 CS42L51_ADCA_VOL, CS42L51_ADCB_VOL,
5893 - 6, 0x19, 0x7F, adc_pcm_tlv),
5894 + 0, 0x19, 0x7F, adc_pcm_tlv),
5895 SOC_DOUBLE_R("ADC Mixer Switch",
5896 CS42L51_ADCA_VOL, CS42L51_ADCB_VOL, 7, 1, 1),
5897 SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0),
5898 diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
5899 index 0bac6d5a4ac8..1102ced9b20e 100644
5900 --- a/sound/soc/codecs/cs42l52.c
5901 +++ b/sound/soc/codecs/cs42l52.c
5902 @@ -347,7 +347,7 @@ static const char * const right_swap_text[] = {
5903 static const unsigned int swap_values[] = { 0, 1, 3 };
5904
5905 static const struct soc_enum adca_swap_enum =
5906 - SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 2, 1,
5907 + SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 2, 3,
5908 ARRAY_SIZE(left_swap_text),
5909 left_swap_text,
5910 swap_values);
5911 @@ -356,7 +356,7 @@ static const struct snd_kcontrol_new adca_mixer =
5912 SOC_DAPM_ENUM("Route", adca_swap_enum);
5913
5914 static const struct soc_enum pcma_swap_enum =
5915 - SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 6, 1,
5916 + SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 6, 3,
5917 ARRAY_SIZE(left_swap_text),
5918 left_swap_text,
5919 swap_values);
5920 @@ -365,7 +365,7 @@ static const struct snd_kcontrol_new pcma_mixer =
5921 SOC_DAPM_ENUM("Route", pcma_swap_enum);
5922
5923 static const struct soc_enum adcb_swap_enum =
5924 - SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 0, 1,
5925 + SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 0, 3,
5926 ARRAY_SIZE(right_swap_text),
5927 right_swap_text,
5928 swap_values);
5929 @@ -374,7 +374,7 @@ static const struct snd_kcontrol_new adcb_mixer =
5930 SOC_DAPM_ENUM("Route", adcb_swap_enum);
5931
5932 static const struct soc_enum pcmb_swap_enum =
5933 - SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 4, 1,
5934 + SOC_VALUE_ENUM_SINGLE(CS42L52_ADC_PCM_MIXER, 4, 3,
5935 ARRAY_SIZE(right_swap_text),
5936 right_swap_text,
5937 swap_values);
5938 diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
5939 index 549d5d6a3fef..7b95f7cbc515 100644
5940 --- a/sound/soc/codecs/cs42l73.c
5941 +++ b/sound/soc/codecs/cs42l73.c
5942 @@ -319,7 +319,7 @@ static const char * const cs42l73_mono_mix_texts[] = {
5943 static const unsigned int cs42l73_mono_mix_values[] = { 0, 1, 2 };
5944
5945 static const struct soc_enum spk_asp_enum =
5946 - SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 6, 1,
5947 + SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 6, 3,
5948 ARRAY_SIZE(cs42l73_mono_mix_texts),
5949 cs42l73_mono_mix_texts,
5950 cs42l73_mono_mix_values);
5951 @@ -337,7 +337,7 @@ static const struct snd_kcontrol_new spk_xsp_mixer =
5952 SOC_DAPM_ENUM("Route", spk_xsp_enum);
5953
5954 static const struct soc_enum esl_asp_enum =
5955 - SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 2, 5,
5956 + SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 2, 3,
5957 ARRAY_SIZE(cs42l73_mono_mix_texts),
5958 cs42l73_mono_mix_texts,
5959 cs42l73_mono_mix_values);
5960 @@ -346,7 +346,7 @@ static const struct snd_kcontrol_new esl_asp_mixer =
5961 SOC_DAPM_ENUM("Route", esl_asp_enum);
5962
5963 static const struct soc_enum esl_xsp_enum =
5964 - SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 0, 7,
5965 + SOC_VALUE_ENUM_SINGLE(CS42L73_MMIXCTL, 0, 3,
5966 ARRAY_SIZE(cs42l73_mono_mix_texts),
5967 cs42l73_mono_mix_texts,
5968 cs42l73_mono_mix_values);
5969 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5970 index 28522bd03b8e..47e1ce771e65 100644
5971 --- a/sound/soc/soc-pcm.c
5972 +++ b/sound/soc/soc-pcm.c
5973 @@ -1989,7 +1989,6 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
5974
5975 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
5976 if (paths < 0) {
5977 - dpcm_path_put(&list);
5978 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
5979 fe->dai_link->name, "playback");
5980 mutex_unlock(&card->mutex);
5981 @@ -2019,7 +2018,6 @@ capture:
5982
5983 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
5984 if (paths < 0) {
5985 - dpcm_path_put(&list);
5986 dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
5987 fe->dai_link->name, "capture");
5988 mutex_unlock(&card->mutex);
5989 @@ -2084,7 +2082,6 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
5990 fe->dpcm[stream].runtime = fe_substream->runtime;
5991
5992 if (dpcm_path_get(fe, stream, &list) <= 0) {
5993 - dpcm_path_put(&list);
5994 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
5995 fe->dai_link->name, stream ? "capture" : "playback");
5996 }