Magellan Linux

Contents of /trunk/kernel-mcore/patches-3.0-r2/0104-3.0.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1560 - (show annotations) (download)
Thu Nov 10 14:21:33 2011 UTC (12 years, 5 months ago) by niro
File size: 398927 byte(s)
3.0-mcore-r2, updated to linux-3.0.8
1 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
2 index 9adc278..91c84cb 100644
3 --- a/arch/arm/Kconfig
4 +++ b/arch/arm/Kconfig
5 @@ -1298,6 +1298,20 @@ source "drivers/pci/Kconfig"
6
7 source "drivers/pcmcia/Kconfig"
8
9 +config ARM_ERRATA_764369
10 + bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
11 + depends on CPU_V7 && SMP
12 + help
13 + This option enables the workaround for erratum 764369
14 + affecting Cortex-A9 MPCore with two or more processors (all
15 + current revisions). Under certain timing circumstances, a data
16 + cache line maintenance operation by MVA targeting an Inner
17 + Shareable memory region may fail to proceed up to either the
18 + Point of Coherency or to the Point of Unification of the
19 + system. This workaround adds a DSB instruction before the
20 + relevant cache maintenance functions and sets a specific bit
21 + in the diagnostic control register of the SCU.
22 +
23 endmenu
24
25 menu "Kernel Features"
26 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
27 index 8c73900..253cc86 100644
28 --- a/arch/arm/include/asm/futex.h
29 +++ b/arch/arm/include/asm/futex.h
30 @@ -25,17 +25,17 @@
31
32 #ifdef CONFIG_SMP
33
34 -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
35 +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
36 smp_mb(); \
37 __asm__ __volatile__( \
38 - "1: ldrex %1, [%2]\n" \
39 + "1: ldrex %1, [%3]\n" \
40 " " insn "\n" \
41 - "2: strex %1, %0, [%2]\n" \
42 - " teq %1, #0\n" \
43 + "2: strex %2, %0, [%3]\n" \
44 + " teq %2, #0\n" \
45 " bne 1b\n" \
46 " mov %0, #0\n" \
47 - __futex_atomic_ex_table("%4") \
48 - : "=&r" (ret), "=&r" (oldval) \
49 + __futex_atomic_ex_table("%5") \
50 + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
51 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
52 : "cc", "memory")
53
54 @@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
55 #include <linux/preempt.h>
56 #include <asm/domain.h>
57
58 -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
59 +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
60 __asm__ __volatile__( \
61 - "1: " T(ldr) " %1, [%2]\n" \
62 + "1: " T(ldr) " %1, [%3]\n" \
63 " " insn "\n" \
64 - "2: " T(str) " %0, [%2]\n" \
65 + "2: " T(str) " %0, [%3]\n" \
66 " mov %0, #0\n" \
67 - __futex_atomic_ex_table("%4") \
68 - : "=&r" (ret), "=&r" (oldval) \
69 + __futex_atomic_ex_table("%5") \
70 + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
71 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
72 : "cc", "memory")
73
74 @@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
75 int cmp = (encoded_op >> 24) & 15;
76 int oparg = (encoded_op << 8) >> 20;
77 int cmparg = (encoded_op << 20) >> 20;
78 - int oldval = 0, ret;
79 + int oldval = 0, ret, tmp;
80
81 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
82 oparg = 1 << oparg;
83 @@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84
85 switch (op) {
86 case FUTEX_OP_SET:
87 - __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
88 + __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
89 break;
90 case FUTEX_OP_ADD:
91 - __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
92 + __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
93 break;
94 case FUTEX_OP_OR:
95 - __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
96 + __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
97 break;
98 case FUTEX_OP_ANDN:
99 - __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
100 + __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
101 break;
102 case FUTEX_OP_XOR:
103 - __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
104 + __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
105 break;
106 default:
107 ret = -ENOSYS;
108 diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
109 index 16bd480..bfa706f 100644
110 --- a/arch/arm/include/asm/hardware/cache-l2x0.h
111 +++ b/arch/arm/include/asm/hardware/cache-l2x0.h
112 @@ -64,7 +64,7 @@
113 #define L2X0_AUX_CTRL_MASK 0xc0000fff
114 #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
115 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
116 -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
117 +#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
118 #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
119 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
120 #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
121 diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
122 index a1e757c..cb7dd40 100644
123 --- a/arch/arm/kernel/smp_scu.c
124 +++ b/arch/arm/kernel/smp_scu.c
125 @@ -13,6 +13,7 @@
126
127 #include <asm/smp_scu.h>
128 #include <asm/cacheflush.h>
129 +#include <asm/cputype.h>
130
131 #define SCU_CTRL 0x00
132 #define SCU_CONFIG 0x04
133 @@ -36,6 +37,15 @@ void __init scu_enable(void __iomem *scu_base)
134 {
135 u32 scu_ctrl;
136
137 +#ifdef CONFIG_ARM_ERRATA_764369
138 + /* Cortex-A9 only */
139 + if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
140 + scu_ctrl = __raw_readl(scu_base + 0x30);
141 + if (!(scu_ctrl & 1))
142 + __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
143 + }
144 +#endif
145 +
146 scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
147 /* already enabled? */
148 if (scu_ctrl & 1)
149 diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
150 index a7b41bf..e83cc86 100644
151 --- a/arch/arm/mach-davinci/board-da850-evm.c
152 +++ b/arch/arm/mach-davinci/board-da850-evm.c
153 @@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
154 },
155 };
156
157 +#ifdef CONFIG_MTD
158 +static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
159 +{
160 + char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
161 + size_t retlen;
162 +
163 + if (!strcmp(mtd->name, "MAC-Address")) {
164 + mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
165 + if (retlen == ETH_ALEN)
166 + pr_info("Read MAC addr from SPI Flash: %pM\n",
167 + mac_addr);
168 + }
169 +}
170 +
171 +static struct mtd_notifier da850evm_spi_notifier = {
172 + .add = da850_evm_m25p80_notify_add,
173 +};
174 +
175 +static void da850_evm_setup_mac_addr(void)
176 +{
177 + register_mtd_user(&da850evm_spi_notifier);
178 +}
179 +#else
180 +static void da850_evm_setup_mac_addr(void) { }
181 +#endif
182 +
183 static struct mtd_partition da850_evm_norflash_partition[] = {
184 {
185 .name = "bootloaders + env",
186 @@ -1237,6 +1263,8 @@ static __init void da850_evm_init(void)
187 if (ret)
188 pr_warning("da850_evm_init: spi 1 registration failed: %d\n",
189 ret);
190 +
191 + da850_evm_setup_mac_addr();
192 }
193
194 #ifdef CONFIG_SERIAL_8250_CONSOLE
195 diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
196 index fb5e72b..5f1e045 100644
197 --- a/arch/arm/mach-davinci/sleep.S
198 +++ b/arch/arm/mach-davinci/sleep.S
199 @@ -217,7 +217,11 @@ ddr2clk_stop_done:
200 ENDPROC(davinci_ddr_psc_config)
201
202 CACHE_FLUSH:
203 - .word arm926_flush_kern_cache_all
204 +#ifdef CONFIG_CPU_V6
205 + .word v6_flush_kern_cache_all
206 +#else
207 + .word arm926_flush_kern_cache_all
208 +#endif
209
210 ENTRY(davinci_cpu_suspend_sz)
211 .word . - davinci_cpu_suspend
212 diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
213 index 5ed51b8..cf7e598 100644
214 --- a/arch/arm/mach-dove/common.c
215 +++ b/arch/arm/mach-dove/common.c
216 @@ -160,7 +160,7 @@ void __init dove_spi0_init(void)
217
218 void __init dove_spi1_init(void)
219 {
220 - orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk());
221 + orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
222 }
223
224 /*****************************************************************************
225 diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
226 index 2fbbdd5..fcf0ae9 100644
227 --- a/arch/arm/mach-integrator/integrator_ap.c
228 +++ b/arch/arm/mach-integrator/integrator_ap.c
229 @@ -337,15 +337,15 @@ static unsigned long timer_reload;
230 static void integrator_clocksource_init(u32 khz)
231 {
232 void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
233 - u32 ctrl = TIMER_CTRL_ENABLE;
234 + u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
235
236 if (khz >= 1500) {
237 khz /= 16;
238 - ctrl = TIMER_CTRL_DIV16;
239 + ctrl |= TIMER_CTRL_DIV16;
240 }
241
242 - writel(ctrl, base + TIMER_CTRL);
243 writel(0xffff, base + TIMER_LOAD);
244 + writel(ctrl, base + TIMER_CTRL);
245
246 clocksource_mmio_init(base + TIMER_VALUE, "timer2",
247 khz * 1000, 200, 16, clocksource_mmio_readl_down);
248 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
249 index d32f02b..3593119 100644
250 --- a/arch/arm/mm/cache-v7.S
251 +++ b/arch/arm/mm/cache-v7.S
252 @@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
253 dcache_line_size r2, r3
254 sub r3, r2, #1
255 bic r12, r0, r3
256 +#ifdef CONFIG_ARM_ERRATA_764369
257 + ALT_SMP(W(dsb))
258 + ALT_UP(W(nop))
259 +#endif
260 1:
261 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
262 add r12, r12, r2
263 @@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
264 add r1, r0, r1
265 sub r3, r2, #1
266 bic r0, r0, r3
267 +#ifdef CONFIG_ARM_ERRATA_764369
268 + ALT_SMP(W(dsb))
269 + ALT_UP(W(nop))
270 +#endif
271 1:
272 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
273 add r0, r0, r2
274 @@ -247,6 +255,10 @@ v7_dma_inv_range:
275 sub r3, r2, #1
276 tst r0, r3
277 bic r0, r0, r3
278 +#ifdef CONFIG_ARM_ERRATA_764369
279 + ALT_SMP(W(dsb))
280 + ALT_UP(W(nop))
281 +#endif
282 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
283
284 tst r1, r3
285 @@ -270,6 +282,10 @@ v7_dma_clean_range:
286 dcache_line_size r2, r3
287 sub r3, r2, #1
288 bic r0, r0, r3
289 +#ifdef CONFIG_ARM_ERRATA_764369
290 + ALT_SMP(W(dsb))
291 + ALT_UP(W(nop))
292 +#endif
293 1:
294 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
295 add r0, r0, r2
296 @@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
297 dcache_line_size r2, r3
298 sub r3, r2, #1
299 bic r0, r0, r3
300 +#ifdef CONFIG_ARM_ERRATA_764369
301 + ALT_SMP(W(dsb))
302 + ALT_UP(W(nop))
303 +#endif
304 1:
305 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
306 add r0, r0, r2
307 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
308 index 82a093c..f96d2c7 100644
309 --- a/arch/arm/mm/dma-mapping.c
310 +++ b/arch/arm/mm/dma-mapping.c
311 @@ -322,6 +322,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
312
313 if (addr)
314 *handle = pfn_to_dma(dev, page_to_pfn(page));
315 + else
316 + __dma_free_buffer(page, size);
317
318 return addr;
319 }
320 diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
321 index 82620af..ebbce33 100644
322 --- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
323 +++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
324 @@ -66,7 +66,6 @@ typedef u64 iomux_v3_cfg_t;
325 #define MUX_MODE_MASK ((iomux_v3_cfg_t)0x1f << MUX_MODE_SHIFT)
326 #define MUX_PAD_CTRL_SHIFT 41
327 #define MUX_PAD_CTRL_MASK ((iomux_v3_cfg_t)0x1ffff << MUX_PAD_CTRL_SHIFT)
328 -#define NO_PAD_CTRL ((iomux_v3_cfg_t)1 << (MUX_PAD_CTRL_SHIFT + 16))
329 #define MUX_SEL_INPUT_SHIFT 58
330 #define MUX_SEL_INPUT_MASK ((iomux_v3_cfg_t)0xf << MUX_SEL_INPUT_SHIFT)
331
332 @@ -85,6 +84,7 @@ typedef u64 iomux_v3_cfg_t;
333 * Use to set PAD control
334 */
335
336 +#define NO_PAD_CTRL (1 << 16)
337 #define PAD_CTL_DVS (1 << 13)
338 #define PAD_CTL_HYS (1 << 8)
339
340 diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
341 index b3fd081..cdd765b 100644
342 --- a/arch/powerpc/sysdev/fsl_rio.c
343 +++ b/arch/powerpc/sysdev/fsl_rio.c
344 @@ -54,6 +54,7 @@
345 #define ODSR_CLEAR 0x1c00
346 #define LTLEECSR_ENABLE_ALL 0xFFC000FC
347 #define ESCSR_CLEAR 0x07120204
348 +#define IECSR_CLEAR 0x80000000
349
350 #define RIO_PORT1_EDCSR 0x0640
351 #define RIO_PORT2_EDCSR 0x0680
352 @@ -1089,11 +1090,11 @@ static void port_error_handler(struct rio_mport *port, int offset)
353
354 if (offset == 0) {
355 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
356 - out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
357 + out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR);
358 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
359 } else {
360 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
361 - out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
362 + out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR);
363 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
364 }
365 }
366 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
367 index 253986b..2e79419 100644
368 --- a/arch/sparc/Kconfig
369 +++ b/arch/sparc/Kconfig
370 @@ -53,6 +53,7 @@ config SPARC64
371 select HAVE_PERF_EVENTS
372 select PERF_USE_VMALLOC
373 select IRQ_PREFLOW_FASTEOI
374 + select HAVE_C_RECORDMCOUNT
375
376 config ARCH_DEFCONFIG
377 string
378 diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h
379 index a1607d1..69914d7 100644
380 --- a/arch/sparc/include/asm/sigcontext.h
381 +++ b/arch/sparc/include/asm/sigcontext.h
382 @@ -45,6 +45,19 @@ typedef struct {
383 int si_mask;
384 } __siginfo32_t;
385
386 +#define __SIGC_MAXWIN 7
387 +
388 +typedef struct {
389 + unsigned long locals[8];
390 + unsigned long ins[8];
391 +} __siginfo_reg_window;
392 +
393 +typedef struct {
394 + int wsaved;
395 + __siginfo_reg_window reg_window[__SIGC_MAXWIN];
396 + unsigned long rwbuf_stkptrs[__SIGC_MAXWIN];
397 +} __siginfo_rwin_t;
398 +
399 #ifdef CONFIG_SPARC64
400 typedef struct {
401 unsigned int si_float_regs [64];
402 @@ -73,6 +86,7 @@ struct sigcontext {
403 unsigned long ss_size;
404 } sigc_stack;
405 unsigned long sigc_mask;
406 + __siginfo_rwin_t * sigc_rwin_save;
407 };
408
409 #else
410 diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
411 index 5f5b8bf..bcc98fc 100644
412 --- a/arch/sparc/include/asm/spinlock_32.h
413 +++ b/arch/sparc/include/asm/spinlock_32.h
414 @@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
415 *(volatile __u32 *)&lp->lock = ~0U;
416 }
417
418 +static void inline arch_write_unlock(arch_rwlock_t *lock)
419 +{
420 + __asm__ __volatile__(
421 +" st %%g0, [%0]"
422 + : /* no outputs */
423 + : "r" (lock)
424 + : "memory");
425 +}
426 +
427 static inline int arch_write_trylock(arch_rwlock_t *rw)
428 {
429 unsigned int val;
430 @@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
431 res; \
432 })
433
434 -#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
435 -
436 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
437 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
438 #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
439 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
440 index 073936a..9689176 100644
441 --- a/arch/sparc/include/asm/spinlock_64.h
442 +++ b/arch/sparc/include/asm/spinlock_64.h
443 @@ -210,14 +210,8 @@ static int inline arch_write_trylock(arch_rwlock_t *lock)
444 return result;
445 }
446
447 -#define arch_read_lock(p) arch_read_lock(p)
448 #define arch_read_lock_flags(p, f) arch_read_lock(p)
449 -#define arch_read_trylock(p) arch_read_trylock(p)
450 -#define arch_read_unlock(p) arch_read_unlock(p)
451 -#define arch_write_lock(p) arch_write_lock(p)
452 #define arch_write_lock_flags(p, f) arch_write_lock(p)
453 -#define arch_write_unlock(p) arch_write_unlock(p)
454 -#define arch_write_trylock(p) arch_write_trylock(p)
455
456 #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
457 #define arch_write_can_lock(rw) (!(rw)->lock)
458 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
459 index b90b4a1..cb85458 100644
460 --- a/arch/sparc/kernel/Makefile
461 +++ b/arch/sparc/kernel/Makefile
462 @@ -32,6 +32,7 @@ obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o
463
464 obj-y += process_$(BITS).o
465 obj-y += signal_$(BITS).o
466 +obj-y += sigutil_$(BITS).o
467 obj-$(CONFIG_SPARC32) += ioport.o
468 obj-y += setup_$(BITS).o
469 obj-y += idprom.o
470 diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
471 index 100b9c2..4285112 100644
472 --- a/arch/sparc/kernel/irq.h
473 +++ b/arch/sparc/kernel/irq.h
474 @@ -88,7 +88,7 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
475 #define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
476
477 /* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
478 -#define SUN4D_IPI_IRQ 14
479 +#define SUN4D_IPI_IRQ 13
480
481 extern void sun4d_ipi_interrupt(void);
482
483 diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
484 index 948601a..6418ba6 100644
485 --- a/arch/sparc/kernel/pcic.c
486 +++ b/arch/sparc/kernel/pcic.c
487 @@ -352,8 +352,8 @@ int __init pcic_probe(void)
488 strcpy(pbm->prom_name, namebuf);
489
490 {
491 - extern volatile int t_nmi[1];
492 - extern int pcic_nmi_trap_patch[1];
493 + extern volatile int t_nmi[4];
494 + extern int pcic_nmi_trap_patch[4];
495
496 t_nmi[0] = pcic_nmi_trap_patch[0];
497 t_nmi[1] = pcic_nmi_trap_patch[1];
498 diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
499 index 3e9daea..3c5bb78 100644
500 --- a/arch/sparc/kernel/setup_64.c
501 +++ b/arch/sparc/kernel/setup_64.c
502 @@ -440,8 +440,14 @@ static void __init init_sparc64_elf_hwcap(void)
503 cap |= AV_SPARC_VIS;
504 if (tlb_type == cheetah || tlb_type == cheetah_plus)
505 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
506 - if (tlb_type == cheetah_plus)
507 - cap |= AV_SPARC_POPC;
508 + if (tlb_type == cheetah_plus) {
509 + unsigned long impl, ver;
510 +
511 + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
512 + impl = ((ver >> 32) & 0xffff);
513 + if (impl == PANTHER_IMPL)
514 + cap |= AV_SPARC_POPC;
515 + }
516 if (tlb_type == hypervisor) {
517 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
518 cap |= AV_SPARC_ASI_BLK_INIT;
519 diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
520 index 75fad42..5d92488 100644
521 --- a/arch/sparc/kernel/signal32.c
522 +++ b/arch/sparc/kernel/signal32.c
523 @@ -29,6 +29,8 @@
524 #include <asm/visasm.h>
525 #include <asm/compat_signal.h>
526
527 +#include "sigutil.h"
528 +
529 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
530
531 /* This magic should be in g_upper[0] for all upper parts
532 @@ -44,14 +46,14 @@ typedef struct {
533 struct signal_frame32 {
534 struct sparc_stackf32 ss;
535 __siginfo32_t info;
536 - /* __siginfo_fpu32_t * */ u32 fpu_save;
537 + /* __siginfo_fpu_t * */ u32 fpu_save;
538 unsigned int insns[2];
539 unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
540 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
541 /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
542 siginfo_extra_v8plus_t v8plus;
543 - __siginfo_fpu_t fpu_state;
544 -};
545 + /* __siginfo_rwin_t * */u32 rwin_save;
546 +} __attribute__((aligned(8)));
547
548 typedef struct compat_siginfo{
549 int si_signo;
550 @@ -110,18 +112,14 @@ struct rt_signal_frame32 {
551 compat_siginfo_t info;
552 struct pt_regs32 regs;
553 compat_sigset_t mask;
554 - /* __siginfo_fpu32_t * */ u32 fpu_save;
555 + /* __siginfo_fpu_t * */ u32 fpu_save;
556 unsigned int insns[2];
557 stack_t32 stack;
558 unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
559 /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
560 siginfo_extra_v8plus_t v8plus;
561 - __siginfo_fpu_t fpu_state;
562 -};
563 -
564 -/* Align macros */
565 -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
566 -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
567 + /* __siginfo_rwin_t * */u32 rwin_save;
568 +} __attribute__((aligned(8)));
569
570 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
571 {
572 @@ -192,30 +190,13 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
573 return 0;
574 }
575
576 -static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
577 -{
578 - unsigned long *fpregs = current_thread_info()->fpregs;
579 - unsigned long fprs;
580 - int err;
581 -
582 - err = __get_user(fprs, &fpu->si_fprs);
583 - fprs_write(0);
584 - regs->tstate &= ~TSTATE_PEF;
585 - if (fprs & FPRS_DL)
586 - err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
587 - if (fprs & FPRS_DU)
588 - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
589 - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
590 - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
591 - current_thread_info()->fpsaved[0] |= fprs;
592 - return err;
593 -}
594 -
595 void do_sigreturn32(struct pt_regs *regs)
596 {
597 struct signal_frame32 __user *sf;
598 + compat_uptr_t fpu_save;
599 + compat_uptr_t rwin_save;
600 unsigned int psr;
601 - unsigned pc, npc, fpu_save;
602 + unsigned pc, npc;
603 sigset_t set;
604 unsigned seta[_COMPAT_NSIG_WORDS];
605 int err, i;
606 @@ -273,8 +254,13 @@ void do_sigreturn32(struct pt_regs *regs)
607 pt_regs_clear_syscall(regs);
608
609 err |= __get_user(fpu_save, &sf->fpu_save);
610 - if (fpu_save)
611 - err |= restore_fpu_state32(regs, &sf->fpu_state);
612 + if (!err && fpu_save)
613 + err |= restore_fpu_state(regs, compat_ptr(fpu_save));
614 + err |= __get_user(rwin_save, &sf->rwin_save);
615 + if (!err && rwin_save) {
616 + if (restore_rwin_state(compat_ptr(rwin_save)))
617 + goto segv;
618 + }
619 err |= __get_user(seta[0], &sf->info.si_mask);
620 err |= copy_from_user(seta+1, &sf->extramask,
621 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
622 @@ -300,7 +286,9 @@ segv:
623 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
624 {
625 struct rt_signal_frame32 __user *sf;
626 - unsigned int psr, pc, npc, fpu_save, u_ss_sp;
627 + unsigned int psr, pc, npc, u_ss_sp;
628 + compat_uptr_t fpu_save;
629 + compat_uptr_t rwin_save;
630 mm_segment_t old_fs;
631 sigset_t set;
632 compat_sigset_t seta;
633 @@ -359,8 +347,8 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
634 pt_regs_clear_syscall(regs);
635
636 err |= __get_user(fpu_save, &sf->fpu_save);
637 - if (fpu_save)
638 - err |= restore_fpu_state32(regs, &sf->fpu_state);
639 + if (!err && fpu_save)
640 + err |= restore_fpu_state(regs, compat_ptr(fpu_save));
641 err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
642 err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
643 st.ss_sp = compat_ptr(u_ss_sp);
644 @@ -376,6 +364,12 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
645 do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
646 set_fs(old_fs);
647
648 + err |= __get_user(rwin_save, &sf->rwin_save);
649 + if (!err && rwin_save) {
650 + if (restore_rwin_state(compat_ptr(rwin_save)))
651 + goto segv;
652 + }
653 +
654 switch (_NSIG_WORDS) {
655 case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
656 case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
657 @@ -433,26 +427,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
658 return (void __user *) sp;
659 }
660
661 -static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
662 -{
663 - unsigned long *fpregs = current_thread_info()->fpregs;
664 - unsigned long fprs;
665 - int err = 0;
666 -
667 - fprs = current_thread_info()->fpsaved[0];
668 - if (fprs & FPRS_DL)
669 - err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
670 - (sizeof(unsigned int) * 32));
671 - if (fprs & FPRS_DU)
672 - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
673 - (sizeof(unsigned int) * 32));
674 - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
675 - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
676 - err |= __put_user(fprs, &fpu->si_fprs);
677 -
678 - return err;
679 -}
680 -
681 /* The I-cache flush instruction only works in the primary ASI, which
682 * right now is the nucleus, aka. kernel space.
683 *
684 @@ -515,18 +489,23 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
685 int signo, sigset_t *oldset)
686 {
687 struct signal_frame32 __user *sf;
688 + int i, err, wsaved;
689 + void __user *tail;
690 int sigframe_size;
691 u32 psr;
692 - int i, err;
693 unsigned int seta[_COMPAT_NSIG_WORDS];
694
695 /* 1. Make sure everything is clean */
696 synchronize_user_stack();
697 save_and_clear_fpu();
698
699 - sigframe_size = SF_ALIGNEDSZ;
700 - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
701 - sigframe_size -= sizeof(__siginfo_fpu_t);
702 + wsaved = get_thread_wsaved();
703 +
704 + sigframe_size = sizeof(*sf);
705 + if (current_thread_info()->fpsaved[0] & FPRS_FEF)
706 + sigframe_size += sizeof(__siginfo_fpu_t);
707 + if (wsaved)
708 + sigframe_size += sizeof(__siginfo_rwin_t);
709
710 sf = (struct signal_frame32 __user *)
711 get_sigframe(&ka->sa, regs, sigframe_size);
712 @@ -534,8 +513,7 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
713 if (invalid_frame_pointer(sf, sigframe_size))
714 goto sigill;
715
716 - if (get_thread_wsaved() != 0)
717 - goto sigill;
718 + tail = (sf + 1);
719
720 /* 2. Save the current process state */
721 if (test_thread_flag(TIF_32BIT)) {
722 @@ -560,11 +538,22 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
723 &sf->v8plus.asi);
724
725 if (psr & PSR_EF) {
726 - err |= save_fpu_state32(regs, &sf->fpu_state);
727 - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
728 + __siginfo_fpu_t __user *fp = tail;
729 + tail += sizeof(*fp);
730 + err |= save_fpu_state(regs, fp);
731 + err |= __put_user((u64)fp, &sf->fpu_save);
732 } else {
733 err |= __put_user(0, &sf->fpu_save);
734 }
735 + if (wsaved) {
736 + __siginfo_rwin_t __user *rwp = tail;
737 + tail += sizeof(*rwp);
738 + err |= save_rwin_state(wsaved, rwp);
739 + err |= __put_user((u64)rwp, &sf->rwin_save);
740 + set_thread_wsaved(0);
741 + } else {
742 + err |= __put_user(0, &sf->rwin_save);
743 + }
744
745 switch (_NSIG_WORDS) {
746 case 4: seta[7] = (oldset->sig[3] >> 32);
747 @@ -580,10 +569,21 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
748 err |= __copy_to_user(sf->extramask, seta + 1,
749 (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
750
751 - err |= copy_in_user((u32 __user *)sf,
752 - (u32 __user *)(regs->u_regs[UREG_FP]),
753 - sizeof(struct reg_window32));
754 -
755 + if (!wsaved) {
756 + err |= copy_in_user((u32 __user *)sf,
757 + (u32 __user *)(regs->u_regs[UREG_FP]),
758 + sizeof(struct reg_window32));
759 + } else {
760 + struct reg_window *rp;
761 +
762 + rp = &current_thread_info()->reg_window[wsaved - 1];
763 + for (i = 0; i < 8; i++)
764 + err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
765 + for (i = 0; i < 6; i++)
766 + err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
767 + err |= __put_user(rp->ins[6], &sf->ss.fp);
768 + err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
769 + }
770 if (err)
771 goto sigsegv;
772
773 @@ -613,7 +613,6 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
774 err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
775 if (err)
776 goto sigsegv;
777 -
778 flush_signal_insns(address);
779 }
780 return 0;
781 @@ -632,18 +631,23 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
782 siginfo_t *info)
783 {
784 struct rt_signal_frame32 __user *sf;
785 + int i, err, wsaved;
786 + void __user *tail;
787 int sigframe_size;
788 u32 psr;
789 - int i, err;
790 compat_sigset_t seta;
791
792 /* 1. Make sure everything is clean */
793 synchronize_user_stack();
794 save_and_clear_fpu();
795
796 - sigframe_size = RT_ALIGNEDSZ;
797 - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
798 - sigframe_size -= sizeof(__siginfo_fpu_t);
799 + wsaved = get_thread_wsaved();
800 +
801 + sigframe_size = sizeof(*sf);
802 + if (current_thread_info()->fpsaved[0] & FPRS_FEF)
803 + sigframe_size += sizeof(__siginfo_fpu_t);
804 + if (wsaved)
805 + sigframe_size += sizeof(__siginfo_rwin_t);
806
807 sf = (struct rt_signal_frame32 __user *)
808 get_sigframe(&ka->sa, regs, sigframe_size);
809 @@ -651,8 +655,7 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
810 if (invalid_frame_pointer(sf, sigframe_size))
811 goto sigill;
812
813 - if (get_thread_wsaved() != 0)
814 - goto sigill;
815 + tail = (sf + 1);
816
817 /* 2. Save the current process state */
818 if (test_thread_flag(TIF_32BIT)) {
819 @@ -677,11 +680,22 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
820 &sf->v8plus.asi);
821
822 if (psr & PSR_EF) {
823 - err |= save_fpu_state32(regs, &sf->fpu_state);
824 - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
825 + __siginfo_fpu_t __user *fp = tail;
826 + tail += sizeof(*fp);
827 + err |= save_fpu_state(regs, fp);
828 + err |= __put_user((u64)fp, &sf->fpu_save);
829 } else {
830 err |= __put_user(0, &sf->fpu_save);
831 }
832 + if (wsaved) {
833 + __siginfo_rwin_t __user *rwp = tail;
834 + tail += sizeof(*rwp);
835 + err |= save_rwin_state(wsaved, rwp);
836 + err |= __put_user((u64)rwp, &sf->rwin_save);
837 + set_thread_wsaved(0);
838 + } else {
839 + err |= __put_user(0, &sf->rwin_save);
840 + }
841
842 /* Update the siginfo structure. */
843 err |= copy_siginfo_to_user32(&sf->info, info);
844 @@ -703,9 +717,21 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
845 }
846 err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
847
848 - err |= copy_in_user((u32 __user *)sf,
849 - (u32 __user *)(regs->u_regs[UREG_FP]),
850 - sizeof(struct reg_window32));
851 + if (!wsaved) {
852 + err |= copy_in_user((u32 __user *)sf,
853 + (u32 __user *)(regs->u_regs[UREG_FP]),
854 + sizeof(struct reg_window32));
855 + } else {
856 + struct reg_window *rp;
857 +
858 + rp = &current_thread_info()->reg_window[wsaved - 1];
859 + for (i = 0; i < 8; i++)
860 + err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
861 + for (i = 0; i < 6; i++)
862 + err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
863 + err |= __put_user(rp->ins[6], &sf->ss.fp);
864 + err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
865 + }
866 if (err)
867 goto sigsegv;
868
869 diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
870 index 5e5c5fd..04ede8f 100644
871 --- a/arch/sparc/kernel/signal_32.c
872 +++ b/arch/sparc/kernel/signal_32.c
873 @@ -26,6 +26,8 @@
874 #include <asm/pgtable.h>
875 #include <asm/cacheflush.h> /* flush_sig_insns */
876
877 +#include "sigutil.h"
878 +
879 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
880
881 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
882 @@ -39,8 +41,8 @@ struct signal_frame {
883 unsigned long insns[2] __attribute__ ((aligned (8)));
884 unsigned int extramask[_NSIG_WORDS - 1];
885 unsigned int extra_size; /* Should be 0 */
886 - __siginfo_fpu_t fpu_state;
887 -};
888 + __siginfo_rwin_t __user *rwin_save;
889 +} __attribute__((aligned(8)));
890
891 struct rt_signal_frame {
892 struct sparc_stackf ss;
893 @@ -51,8 +53,8 @@ struct rt_signal_frame {
894 unsigned int insns[2];
895 stack_t stack;
896 unsigned int extra_size; /* Should be 0 */
897 - __siginfo_fpu_t fpu_state;
898 -};
899 + __siginfo_rwin_t __user *rwin_save;
900 +} __attribute__((aligned(8)));
901
902 /* Align macros */
903 #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
904 @@ -79,43 +81,13 @@ asmlinkage int sys_sigsuspend(old_sigset_t set)
905 return _sigpause_common(set);
906 }
907
908 -static inline int
909 -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
910 -{
911 - int err;
912 -#ifdef CONFIG_SMP
913 - if (test_tsk_thread_flag(current, TIF_USEDFPU))
914 - regs->psr &= ~PSR_EF;
915 -#else
916 - if (current == last_task_used_math) {
917 - last_task_used_math = NULL;
918 - regs->psr &= ~PSR_EF;
919 - }
920 -#endif
921 - set_used_math();
922 - clear_tsk_thread_flag(current, TIF_USEDFPU);
923 -
924 - if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
925 - return -EFAULT;
926 -
927 - err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
928 - (sizeof(unsigned long) * 32));
929 - err |= __get_user(current->thread.fsr, &fpu->si_fsr);
930 - err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
931 - if (current->thread.fpqdepth != 0)
932 - err |= __copy_from_user(&current->thread.fpqueue[0],
933 - &fpu->si_fpqueue[0],
934 - ((sizeof(unsigned long) +
935 - (sizeof(unsigned long *)))*16));
936 - return err;
937 -}
938 -
939 asmlinkage void do_sigreturn(struct pt_regs *regs)
940 {
941 struct signal_frame __user *sf;
942 unsigned long up_psr, pc, npc;
943 sigset_t set;
944 __siginfo_fpu_t __user *fpu_save;
945 + __siginfo_rwin_t __user *rwin_save;
946 int err;
947
948 /* Always make any pending restarted system calls return -EINTR */
949 @@ -150,9 +122,11 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
950 pt_regs_clear_syscall(regs);
951
952 err |= __get_user(fpu_save, &sf->fpu_save);
953 -
954 if (fpu_save)
955 err |= restore_fpu_state(regs, fpu_save);
956 + err |= __get_user(rwin_save, &sf->rwin_save);
957 + if (rwin_save)
958 + err |= restore_rwin_state(rwin_save);
959
960 /* This is pretty much atomic, no amount locking would prevent
961 * the races which exist anyways.
962 @@ -180,6 +154,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
963 struct rt_signal_frame __user *sf;
964 unsigned int psr, pc, npc;
965 __siginfo_fpu_t __user *fpu_save;
966 + __siginfo_rwin_t __user *rwin_save;
967 mm_segment_t old_fs;
968 sigset_t set;
969 stack_t st;
970 @@ -207,8 +182,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
971 pt_regs_clear_syscall(regs);
972
973 err |= __get_user(fpu_save, &sf->fpu_save);
974 -
975 - if (fpu_save)
976 + if (!err && fpu_save)
977 err |= restore_fpu_state(regs, fpu_save);
978 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
979
980 @@ -228,6 +202,12 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
981 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
982 set_fs(old_fs);
983
984 + err |= __get_user(rwin_save, &sf->rwin_save);
985 + if (!err && rwin_save) {
986 + if (restore_rwin_state(rwin_save))
987 + goto segv;
988 + }
989 +
990 sigdelsetmask(&set, ~_BLOCKABLE);
991 spin_lock_irq(&current->sighand->siglock);
992 current->blocked = set;
993 @@ -280,53 +260,23 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
994 return (void __user *) sp;
995 }
996
997 -static inline int
998 -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
999 -{
1000 - int err = 0;
1001 -#ifdef CONFIG_SMP
1002 - if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
1003 - put_psr(get_psr() | PSR_EF);
1004 - fpsave(&current->thread.float_regs[0], &current->thread.fsr,
1005 - &current->thread.fpqueue[0], &current->thread.fpqdepth);
1006 - regs->psr &= ~(PSR_EF);
1007 - clear_tsk_thread_flag(current, TIF_USEDFPU);
1008 - }
1009 -#else
1010 - if (current == last_task_used_math) {
1011 - put_psr(get_psr() | PSR_EF);
1012 - fpsave(&current->thread.float_regs[0], &current->thread.fsr,
1013 - &current->thread.fpqueue[0], &current->thread.fpqdepth);
1014 - last_task_used_math = NULL;
1015 - regs->psr &= ~(PSR_EF);
1016 - }
1017 -#endif
1018 - err |= __copy_to_user(&fpu->si_float_regs[0],
1019 - &current->thread.float_regs[0],
1020 - (sizeof(unsigned long) * 32));
1021 - err |= __put_user(current->thread.fsr, &fpu->si_fsr);
1022 - err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
1023 - if (current->thread.fpqdepth != 0)
1024 - err |= __copy_to_user(&fpu->si_fpqueue[0],
1025 - &current->thread.fpqueue[0],
1026 - ((sizeof(unsigned long) +
1027 - (sizeof(unsigned long *)))*16));
1028 - clear_used_math();
1029 - return err;
1030 -}
1031 -
1032 static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
1033 int signo, sigset_t *oldset)
1034 {
1035 struct signal_frame __user *sf;
1036 - int sigframe_size, err;
1037 + int sigframe_size, err, wsaved;
1038 + void __user *tail;
1039
1040 /* 1. Make sure everything is clean */
1041 synchronize_user_stack();
1042
1043 - sigframe_size = SF_ALIGNEDSZ;
1044 - if (!used_math())
1045 - sigframe_size -= sizeof(__siginfo_fpu_t);
1046 + wsaved = current_thread_info()->w_saved;
1047 +
1048 + sigframe_size = sizeof(*sf);
1049 + if (used_math())
1050 + sigframe_size += sizeof(__siginfo_fpu_t);
1051 + if (wsaved)
1052 + sigframe_size += sizeof(__siginfo_rwin_t);
1053
1054 sf = (struct signal_frame __user *)
1055 get_sigframe(&ka->sa, regs, sigframe_size);
1056 @@ -334,8 +284,7 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
1057 if (invalid_frame_pointer(sf, sigframe_size))
1058 goto sigill_and_return;
1059
1060 - if (current_thread_info()->w_saved != 0)
1061 - goto sigill_and_return;
1062 + tail = sf + 1;
1063
1064 /* 2. Save the current process state */
1065 err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
1066 @@ -343,17 +292,34 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
1067 err |= __put_user(0, &sf->extra_size);
1068
1069 if (used_math()) {
1070 - err |= save_fpu_state(regs, &sf->fpu_state);
1071 - err |= __put_user(&sf->fpu_state, &sf->fpu_save);
1072 + __siginfo_fpu_t __user *fp = tail;
1073 + tail += sizeof(*fp);
1074 + err |= save_fpu_state(regs, fp);
1075 + err |= __put_user(fp, &sf->fpu_save);
1076 } else {
1077 err |= __put_user(0, &sf->fpu_save);
1078 }
1079 + if (wsaved) {
1080 + __siginfo_rwin_t __user *rwp = tail;
1081 + tail += sizeof(*rwp);
1082 + err |= save_rwin_state(wsaved, rwp);
1083 + err |= __put_user(rwp, &sf->rwin_save);
1084 + } else {
1085 + err |= __put_user(0, &sf->rwin_save);
1086 + }
1087
1088 err |= __put_user(oldset->sig[0], &sf->info.si_mask);
1089 err |= __copy_to_user(sf->extramask, &oldset->sig[1],
1090 (_NSIG_WORDS - 1) * sizeof(unsigned int));
1091 - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
1092 - sizeof(struct reg_window32));
1093 + if (!wsaved) {
1094 + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
1095 + sizeof(struct reg_window32));
1096 + } else {
1097 + struct reg_window32 *rp;
1098 +
1099 + rp = &current_thread_info()->reg_window[wsaved - 1];
1100 + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
1101 + }
1102 if (err)
1103 goto sigsegv;
1104
1105 @@ -399,21 +365,24 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
1106 int signo, sigset_t *oldset, siginfo_t *info)
1107 {
1108 struct rt_signal_frame __user *sf;
1109 - int sigframe_size;
1110 + int sigframe_size, wsaved;
1111 + void __user *tail;
1112 unsigned int psr;
1113 int err;
1114
1115 synchronize_user_stack();
1116 - sigframe_size = RT_ALIGNEDSZ;
1117 - if (!used_math())
1118 - sigframe_size -= sizeof(__siginfo_fpu_t);
1119 + wsaved = current_thread_info()->w_saved;
1120 + sigframe_size = sizeof(*sf);
1121 + if (used_math())
1122 + sigframe_size += sizeof(__siginfo_fpu_t);
1123 + if (wsaved)
1124 + sigframe_size += sizeof(__siginfo_rwin_t);
1125 sf = (struct rt_signal_frame __user *)
1126 get_sigframe(&ka->sa, regs, sigframe_size);
1127 if (invalid_frame_pointer(sf, sigframe_size))
1128 goto sigill;
1129 - if (current_thread_info()->w_saved != 0)
1130 - goto sigill;
1131
1132 + tail = sf + 1;
1133 err = __put_user(regs->pc, &sf->regs.pc);
1134 err |= __put_user(regs->npc, &sf->regs.npc);
1135 err |= __put_user(regs->y, &sf->regs.y);
1136 @@ -425,11 +394,21 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
1137 err |= __put_user(0, &sf->extra_size);
1138
1139 if (psr & PSR_EF) {
1140 - err |= save_fpu_state(regs, &sf->fpu_state);
1141 - err |= __put_user(&sf->fpu_state, &sf->fpu_save);
1142 + __siginfo_fpu_t *fp = tail;
1143 + tail += sizeof(*fp);
1144 + err |= save_fpu_state(regs, fp);
1145 + err |= __put_user(fp, &sf->fpu_save);
1146 } else {
1147 err |= __put_user(0, &sf->fpu_save);
1148 }
1149 + if (wsaved) {
1150 + __siginfo_rwin_t *rwp = tail;
1151 + tail += sizeof(*rwp);
1152 + err |= save_rwin_state(wsaved, rwp);
1153 + err |= __put_user(rwp, &sf->rwin_save);
1154 + } else {
1155 + err |= __put_user(0, &sf->rwin_save);
1156 + }
1157 err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
1158
1159 /* Setup sigaltstack */
1160 @@ -437,8 +416,15 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
1161 err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
1162 err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
1163
1164 - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
1165 - sizeof(struct reg_window32));
1166 + if (!wsaved) {
1167 + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
1168 + sizeof(struct reg_window32));
1169 + } else {
1170 + struct reg_window32 *rp;
1171 +
1172 + rp = &current_thread_info()->reg_window[wsaved - 1];
1173 + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
1174 + }
1175
1176 err |= copy_siginfo_to_user(&sf->info, info);
1177
1178 diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
1179 index 006fe45..47509df 100644
1180 --- a/arch/sparc/kernel/signal_64.c
1181 +++ b/arch/sparc/kernel/signal_64.c
1182 @@ -34,6 +34,7 @@
1183
1184 #include "entry.h"
1185 #include "systbls.h"
1186 +#include "sigutil.h"
1187
1188 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
1189
1190 @@ -236,7 +237,7 @@ struct rt_signal_frame {
1191 __siginfo_fpu_t __user *fpu_save;
1192 stack_t stack;
1193 sigset_t mask;
1194 - __siginfo_fpu_t fpu_state;
1195 + __siginfo_rwin_t *rwin_save;
1196 };
1197
1198 static long _sigpause_common(old_sigset_t set)
1199 @@ -266,33 +267,12 @@ asmlinkage long sys_sigsuspend(old_sigset_t set)
1200 return _sigpause_common(set);
1201 }
1202
1203 -static inline int
1204 -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1205 -{
1206 - unsigned long *fpregs = current_thread_info()->fpregs;
1207 - unsigned long fprs;
1208 - int err;
1209 -
1210 - err = __get_user(fprs, &fpu->si_fprs);
1211 - fprs_write(0);
1212 - regs->tstate &= ~TSTATE_PEF;
1213 - if (fprs & FPRS_DL)
1214 - err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
1215 - (sizeof(unsigned int) * 32));
1216 - if (fprs & FPRS_DU)
1217 - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
1218 - (sizeof(unsigned int) * 32));
1219 - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
1220 - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
1221 - current_thread_info()->fpsaved[0] |= fprs;
1222 - return err;
1223 -}
1224 -
1225 void do_rt_sigreturn(struct pt_regs *regs)
1226 {
1227 struct rt_signal_frame __user *sf;
1228 unsigned long tpc, tnpc, tstate;
1229 __siginfo_fpu_t __user *fpu_save;
1230 + __siginfo_rwin_t __user *rwin_save;
1231 sigset_t set;
1232 int err;
1233
1234 @@ -325,8 +305,8 @@ void do_rt_sigreturn(struct pt_regs *regs)
1235 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
1236
1237 err |= __get_user(fpu_save, &sf->fpu_save);
1238 - if (fpu_save)
1239 - err |= restore_fpu_state(regs, &sf->fpu_state);
1240 + if (!err && fpu_save)
1241 + err |= restore_fpu_state(regs, fpu_save);
1242
1243 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
1244 err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
1245 @@ -334,6 +314,12 @@ void do_rt_sigreturn(struct pt_regs *regs)
1246 if (err)
1247 goto segv;
1248
1249 + err |= __get_user(rwin_save, &sf->rwin_save);
1250 + if (!err && rwin_save) {
1251 + if (restore_rwin_state(rwin_save))
1252 + goto segv;
1253 + }
1254 +
1255 regs->tpc = tpc;
1256 regs->tnpc = tnpc;
1257
1258 @@ -351,34 +337,13 @@ segv:
1259 }
1260
1261 /* Checks if the fp is valid */
1262 -static int invalid_frame_pointer(void __user *fp, int fplen)
1263 +static int invalid_frame_pointer(void __user *fp)
1264 {
1265 if (((unsigned long) fp) & 15)
1266 return 1;
1267 return 0;
1268 }
1269
1270 -static inline int
1271 -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1272 -{
1273 - unsigned long *fpregs = current_thread_info()->fpregs;
1274 - unsigned long fprs;
1275 - int err = 0;
1276 -
1277 - fprs = current_thread_info()->fpsaved[0];
1278 - if (fprs & FPRS_DL)
1279 - err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
1280 - (sizeof(unsigned int) * 32));
1281 - if (fprs & FPRS_DU)
1282 - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
1283 - (sizeof(unsigned int) * 32));
1284 - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
1285 - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
1286 - err |= __put_user(fprs, &fpu->si_fprs);
1287 -
1288 - return err;
1289 -}
1290 -
1291 static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
1292 {
1293 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
1294 @@ -414,34 +379,48 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
1295 int signo, sigset_t *oldset, siginfo_t *info)
1296 {
1297 struct rt_signal_frame __user *sf;
1298 - int sigframe_size, err;
1299 + int wsaved, err, sf_size;
1300 + void __user *tail;
1301
1302 /* 1. Make sure everything is clean */
1303 synchronize_user_stack();
1304 save_and_clear_fpu();
1305
1306 - sigframe_size = sizeof(struct rt_signal_frame);
1307 - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
1308 - sigframe_size -= sizeof(__siginfo_fpu_t);
1309 + wsaved = get_thread_wsaved();
1310
1311 + sf_size = sizeof(struct rt_signal_frame);
1312 + if (current_thread_info()->fpsaved[0] & FPRS_FEF)
1313 + sf_size += sizeof(__siginfo_fpu_t);
1314 + if (wsaved)
1315 + sf_size += sizeof(__siginfo_rwin_t);
1316 sf = (struct rt_signal_frame __user *)
1317 - get_sigframe(ka, regs, sigframe_size);
1318 -
1319 - if (invalid_frame_pointer (sf, sigframe_size))
1320 - goto sigill;
1321 + get_sigframe(ka, regs, sf_size);
1322
1323 - if (get_thread_wsaved() != 0)
1324 + if (invalid_frame_pointer (sf))
1325 goto sigill;
1326
1327 + tail = (sf + 1);
1328 +
1329 /* 2. Save the current process state */
1330 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
1331
1332 if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
1333 - err |= save_fpu_state(regs, &sf->fpu_state);
1334 - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
1335 + __siginfo_fpu_t __user *fpu_save = tail;
1336 + tail += sizeof(__siginfo_fpu_t);
1337 + err |= save_fpu_state(regs, fpu_save);
1338 + err |= __put_user((u64)fpu_save, &sf->fpu_save);
1339 } else {
1340 err |= __put_user(0, &sf->fpu_save);
1341 }
1342 + if (wsaved) {
1343 + __siginfo_rwin_t __user *rwin_save = tail;
1344 + tail += sizeof(__siginfo_rwin_t);
1345 + err |= save_rwin_state(wsaved, rwin_save);
1346 + err |= __put_user((u64)rwin_save, &sf->rwin_save);
1347 + set_thread_wsaved(0);
1348 + } else {
1349 + err |= __put_user(0, &sf->rwin_save);
1350 + }
1351
1352 /* Setup sigaltstack */
1353 err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
1354 @@ -450,10 +429,17 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
1355
1356 err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
1357
1358 - err |= copy_in_user((u64 __user *)sf,
1359 - (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
1360 - sizeof(struct reg_window));
1361 + if (!wsaved) {
1362 + err |= copy_in_user((u64 __user *)sf,
1363 + (u64 __user *)(regs->u_regs[UREG_FP] +
1364 + STACK_BIAS),
1365 + sizeof(struct reg_window));
1366 + } else {
1367 + struct reg_window *rp;
1368
1369 + rp = &current_thread_info()->reg_window[wsaved - 1];
1370 + err |= copy_to_user(sf, rp, sizeof(struct reg_window));
1371 + }
1372 if (info)
1373 err |= copy_siginfo_to_user(&sf->info, info);
1374 else {
1375 diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h
1376 new file mode 100644
1377 index 0000000..d223aa4
1378 --- /dev/null
1379 +++ b/arch/sparc/kernel/sigutil.h
1380 @@ -0,0 +1,9 @@
1381 +#ifndef _SIGUTIL_H
1382 +#define _SIGUTIL_H
1383 +
1384 +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu);
1385 +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu);
1386 +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin);
1387 +int restore_rwin_state(__siginfo_rwin_t __user *rp);
1388 +
1389 +#endif /* _SIGUTIL_H */
1390 diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
1391 new file mode 100644
1392 index 0000000..35c7897
1393 --- /dev/null
1394 +++ b/arch/sparc/kernel/sigutil_32.c
1395 @@ -0,0 +1,120 @@
1396 +#include <linux/kernel.h>
1397 +#include <linux/types.h>
1398 +#include <linux/thread_info.h>
1399 +#include <linux/uaccess.h>
1400 +#include <linux/sched.h>
1401 +
1402 +#include <asm/sigcontext.h>
1403 +#include <asm/fpumacro.h>
1404 +#include <asm/ptrace.h>
1405 +
1406 +#include "sigutil.h"
1407 +
1408 +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1409 +{
1410 + int err = 0;
1411 +#ifdef CONFIG_SMP
1412 + if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
1413 + put_psr(get_psr() | PSR_EF);
1414 + fpsave(&current->thread.float_regs[0], &current->thread.fsr,
1415 + &current->thread.fpqueue[0], &current->thread.fpqdepth);
1416 + regs->psr &= ~(PSR_EF);
1417 + clear_tsk_thread_flag(current, TIF_USEDFPU);
1418 + }
1419 +#else
1420 + if (current == last_task_used_math) {
1421 + put_psr(get_psr() | PSR_EF);
1422 + fpsave(&current->thread.float_regs[0], &current->thread.fsr,
1423 + &current->thread.fpqueue[0], &current->thread.fpqdepth);
1424 + last_task_used_math = NULL;
1425 + regs->psr &= ~(PSR_EF);
1426 + }
1427 +#endif
1428 + err |= __copy_to_user(&fpu->si_float_regs[0],
1429 + &current->thread.float_regs[0],
1430 + (sizeof(unsigned long) * 32));
1431 + err |= __put_user(current->thread.fsr, &fpu->si_fsr);
1432 + err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
1433 + if (current->thread.fpqdepth != 0)
1434 + err |= __copy_to_user(&fpu->si_fpqueue[0],
1435 + &current->thread.fpqueue[0],
1436 + ((sizeof(unsigned long) +
1437 + (sizeof(unsigned long *)))*16));
1438 + clear_used_math();
1439 + return err;
1440 +}
1441 +
1442 +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1443 +{
1444 + int err;
1445 +#ifdef CONFIG_SMP
1446 + if (test_tsk_thread_flag(current, TIF_USEDFPU))
1447 + regs->psr &= ~PSR_EF;
1448 +#else
1449 + if (current == last_task_used_math) {
1450 + last_task_used_math = NULL;
1451 + regs->psr &= ~PSR_EF;
1452 + }
1453 +#endif
1454 + set_used_math();
1455 + clear_tsk_thread_flag(current, TIF_USEDFPU);
1456 +
1457 + if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
1458 + return -EFAULT;
1459 +
1460 + err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0],
1461 + (sizeof(unsigned long) * 32));
1462 + err |= __get_user(current->thread.fsr, &fpu->si_fsr);
1463 + err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
1464 + if (current->thread.fpqdepth != 0)
1465 + err |= __copy_from_user(&current->thread.fpqueue[0],
1466 + &fpu->si_fpqueue[0],
1467 + ((sizeof(unsigned long) +
1468 + (sizeof(unsigned long *)))*16));
1469 + return err;
1470 +}
1471 +
1472 +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
1473 +{
1474 + int i, err = __put_user(wsaved, &rwin->wsaved);
1475 +
1476 + for (i = 0; i < wsaved; i++) {
1477 + struct reg_window32 *rp;
1478 + unsigned long fp;
1479 +
1480 + rp = &current_thread_info()->reg_window[i];
1481 + fp = current_thread_info()->rwbuf_stkptrs[i];
1482 + err |= copy_to_user(&rwin->reg_window[i], rp,
1483 + sizeof(struct reg_window32));
1484 + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
1485 + }
1486 + return err;
1487 +}
1488 +
1489 +int restore_rwin_state(__siginfo_rwin_t __user *rp)
1490 +{
1491 + struct thread_info *t = current_thread_info();
1492 + int i, wsaved, err;
1493 +
1494 + __get_user(wsaved, &rp->wsaved);
1495 + if (wsaved > NSWINS)
1496 + return -EFAULT;
1497 +
1498 + err = 0;
1499 + for (i = 0; i < wsaved; i++) {
1500 + err |= copy_from_user(&t->reg_window[i],
1501 + &rp->reg_window[i],
1502 + sizeof(struct reg_window32));
1503 + err |= __get_user(t->rwbuf_stkptrs[i],
1504 + &rp->rwbuf_stkptrs[i]);
1505 + }
1506 + if (err)
1507 + return err;
1508 +
1509 + t->w_saved = wsaved;
1510 + synchronize_user_stack();
1511 + if (t->w_saved)
1512 + return -EFAULT;
1513 + return 0;
1514 +
1515 +}
1516 diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
1517 new file mode 100644
1518 index 0000000..6edc4e5
1519 --- /dev/null
1520 +++ b/arch/sparc/kernel/sigutil_64.c
1521 @@ -0,0 +1,93 @@
1522 +#include <linux/kernel.h>
1523 +#include <linux/types.h>
1524 +#include <linux/thread_info.h>
1525 +#include <linux/uaccess.h>
1526 +
1527 +#include <asm/sigcontext.h>
1528 +#include <asm/fpumacro.h>
1529 +#include <asm/ptrace.h>
1530 +
1531 +#include "sigutil.h"
1532 +
1533 +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1534 +{
1535 + unsigned long *fpregs = current_thread_info()->fpregs;
1536 + unsigned long fprs;
1537 + int err = 0;
1538 +
1539 + fprs = current_thread_info()->fpsaved[0];
1540 + if (fprs & FPRS_DL)
1541 + err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
1542 + (sizeof(unsigned int) * 32));
1543 + if (fprs & FPRS_DU)
1544 + err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
1545 + (sizeof(unsigned int) * 32));
1546 + err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
1547 + err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
1548 + err |= __put_user(fprs, &fpu->si_fprs);
1549 +
1550 + return err;
1551 +}
1552 +
1553 +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1554 +{
1555 + unsigned long *fpregs = current_thread_info()->fpregs;
1556 + unsigned long fprs;
1557 + int err;
1558 +
1559 + err = __get_user(fprs, &fpu->si_fprs);
1560 + fprs_write(0);
1561 + regs->tstate &= ~TSTATE_PEF;
1562 + if (fprs & FPRS_DL)
1563 + err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
1564 + (sizeof(unsigned int) * 32));
1565 + if (fprs & FPRS_DU)
1566 + err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
1567 + (sizeof(unsigned int) * 32));
1568 + err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
1569 + err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
1570 + current_thread_info()->fpsaved[0] |= fprs;
1571 + return err;
1572 +}
1573 +
1574 +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
1575 +{
1576 + int i, err = __put_user(wsaved, &rwin->wsaved);
1577 +
1578 + for (i = 0; i < wsaved; i++) {
1579 + struct reg_window *rp = &current_thread_info()->reg_window[i];
1580 + unsigned long fp = current_thread_info()->rwbuf_stkptrs[i];
1581 +
1582 + err |= copy_to_user(&rwin->reg_window[i], rp,
1583 + sizeof(struct reg_window));
1584 + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
1585 + }
1586 + return err;
1587 +}
1588 +
1589 +int restore_rwin_state(__siginfo_rwin_t __user *rp)
1590 +{
1591 + struct thread_info *t = current_thread_info();
1592 + int i, wsaved, err;
1593 +
1594 + __get_user(wsaved, &rp->wsaved);
1595 + if (wsaved > NSWINS)
1596 + return -EFAULT;
1597 +
1598 + err = 0;
1599 + for (i = 0; i < wsaved; i++) {
1600 + err |= copy_from_user(&t->reg_window[i],
1601 + &rp->reg_window[i],
1602 + sizeof(struct reg_window));
1603 + err |= __get_user(t->rwbuf_stkptrs[i],
1604 + &rp->rwbuf_stkptrs[i]);
1605 + }
1606 + if (err)
1607 + return err;
1608 +
1609 + set_thread_wsaved(wsaved);
1610 + synchronize_user_stack();
1611 + if (get_thread_wsaved())
1612 + return -EFAULT;
1613 + return 0;
1614 +}
1615 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
1616 index 7c3a95e..d3d9d50 100644
1617 --- a/arch/x86/kernel/amd_iommu.c
1618 +++ b/arch/x86/kernel/amd_iommu.c
1619 @@ -531,7 +531,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
1620 * Writes the command to the IOMMUs command buffer and informs the
1621 * hardware about the new command.
1622 */
1623 -static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1624 +static int iommu_queue_command_sync(struct amd_iommu *iommu,
1625 + struct iommu_cmd *cmd,
1626 + bool sync)
1627 {
1628 u32 left, tail, head, next_tail;
1629 unsigned long flags;
1630 @@ -565,13 +567,18 @@ again:
1631 copy_cmd_to_buffer(iommu, cmd, tail);
1632
1633 /* We need to sync now to make sure all commands are processed */
1634 - iommu->need_sync = true;
1635 + iommu->need_sync = sync;
1636
1637 spin_unlock_irqrestore(&iommu->lock, flags);
1638
1639 return 0;
1640 }
1641
1642 +static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1643 +{
1644 + return iommu_queue_command_sync(iommu, cmd, true);
1645 +}
1646 +
1647 /*
1648 * This function queues a completion wait command into the command
1649 * buffer of an IOMMU
1650 @@ -587,7 +594,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
1651
1652 build_completion_wait(&cmd, (u64)&sem);
1653
1654 - ret = iommu_queue_command(iommu, &cmd);
1655 + ret = iommu_queue_command_sync(iommu, &cmd, false);
1656 if (ret)
1657 return ret;
1658
1659 @@ -773,14 +780,9 @@ static void domain_flush_complete(struct protection_domain *domain)
1660 static void domain_flush_devices(struct protection_domain *domain)
1661 {
1662 struct iommu_dev_data *dev_data;
1663 - unsigned long flags;
1664 -
1665 - spin_lock_irqsave(&domain->lock, flags);
1666
1667 list_for_each_entry(dev_data, &domain->dev_list, list)
1668 device_flush_dte(dev_data->dev);
1669 -
1670 - spin_unlock_irqrestore(&domain->lock, flags);
1671 }
1672
1673 /****************************************************************************
1674 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
1675 index 3a0338b..bf6d692 100644
1676 --- a/arch/x86/kernel/cpu/perf_event.c
1677 +++ b/arch/x86/kernel/cpu/perf_event.c
1678 @@ -1856,6 +1856,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1679
1680 perf_callchain_store(entry, regs->ip);
1681
1682 + if (!current->mm)
1683 + return;
1684 +
1685 if (perf_callchain_user32(regs, entry))
1686 return;
1687
1688 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
1689 index 41178c8..dd208a8 100644
1690 --- a/arch/x86/kernel/cpu/perf_event_intel.c
1691 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
1692 @@ -1495,6 +1495,7 @@ static __init int intel_pmu_init(void)
1693 break;
1694
1695 case 42: /* SandyBridge */
1696 + case 45: /* SandyBridge, "Romely-EP" */
1697 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1698 sizeof(hw_cache_event_ids));
1699
1700 diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
1701 index 60aeeb5..acea42e 100644
1702 --- a/arch/x86/xen/setup.c
1703 +++ b/arch/x86/xen/setup.c
1704 @@ -185,6 +185,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
1705 PFN_UP(start_pci), PFN_DOWN(last));
1706 return identity;
1707 }
1708 +
1709 +static unsigned long __init xen_get_max_pages(void)
1710 +{
1711 + unsigned long max_pages = MAX_DOMAIN_PAGES;
1712 + domid_t domid = DOMID_SELF;
1713 + int ret;
1714 +
1715 + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
1716 + if (ret > 0)
1717 + max_pages = ret;
1718 + return min(max_pages, MAX_DOMAIN_PAGES);
1719 +}
1720 +
1721 /**
1722 * machine_specific_memory_setup - Hook for machine specific memory setup.
1723 **/
1724 @@ -293,6 +306,14 @@ char * __init xen_memory_setup(void)
1725
1726 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1727
1728 + extra_limit = xen_get_max_pages();
1729 + if (max_pfn + extra_pages > extra_limit) {
1730 + if (extra_limit > max_pfn)
1731 + extra_pages = extra_limit - max_pfn;
1732 + else
1733 + extra_pages = 0;
1734 + }
1735 +
1736 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
1737
1738 /*
1739 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
1740 index e79dbb9..d4fc6d4 100644
1741 --- a/arch/x86/xen/smp.c
1742 +++ b/arch/x86/xen/smp.c
1743 @@ -32,6 +32,7 @@
1744 #include <xen/page.h>
1745 #include <xen/events.h>
1746
1747 +#include <xen/hvc-console.h>
1748 #include "xen-ops.h"
1749 #include "mmu.h"
1750
1751 @@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
1752 unsigned cpu;
1753 unsigned int i;
1754
1755 + if (skip_ioapic_setup) {
1756 + char *m = (max_cpus == 0) ?
1757 + "The nosmp parameter is incompatible with Xen; " \
1758 + "use Xen dom0_max_vcpus=1 parameter" :
1759 + "The noapic parameter is incompatible with Xen";
1760 +
1761 + xen_raw_printk(m);
1762 + panic(m);
1763 + }
1764 xen_init_lock_cpu(0);
1765
1766 smp_store_cpu_info(0);
1767 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
1768 index 22a2093..b040b0e 100644
1769 --- a/arch/x86/xen/xen-asm_32.S
1770 +++ b/arch/x86/xen/xen-asm_32.S
1771 @@ -113,11 +113,13 @@ xen_iret_start_crit:
1772
1773 /*
1774 * If there's something pending, mask events again so we can
1775 - * jump back into xen_hypervisor_callback
1776 + * jump back into xen_hypervisor_callback. Otherwise do not
1777 + * touch XEN_vcpu_info_mask.
1778 */
1779 - sete XEN_vcpu_info_mask(%eax)
1780 + jne 1f
1781 + movb $1, XEN_vcpu_info_mask(%eax)
1782
1783 - popl %eax
1784 +1: popl %eax
1785
1786 /*
1787 * From this point on the registers are restored and the stack
1788 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1789 index bcaf16e..b596e54 100644
1790 --- a/block/blk-cgroup.c
1791 +++ b/block/blk-cgroup.c
1792 @@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
1793 {
1794 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
1795 int ret;
1796 - unsigned long major, minor, temp;
1797 + unsigned long major, minor;
1798 int i = 0;
1799 dev_t dev;
1800 - u64 bps, iops;
1801 + u64 temp;
1802
1803 memset(s, 0, sizeof(s));
1804
1805 @@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
1806
1807 dev = MKDEV(major, minor);
1808
1809 - ret = blkio_check_dev_num(dev);
1810 + ret = strict_strtoull(s[1], 10, &temp);
1811 if (ret)
1812 - return ret;
1813 + return -EINVAL;
1814
1815 - newpn->dev = dev;
1816 + /* For rule removal, do not check for device presence. */
1817 + if (temp) {
1818 + ret = blkio_check_dev_num(dev);
1819 + if (ret)
1820 + return ret;
1821 + }
1822
1823 - if (s[1] == NULL)
1824 - return -EINVAL;
1825 + newpn->dev = dev;
1826
1827 switch (plid) {
1828 case BLKIO_POLICY_PROP:
1829 - ret = strict_strtoul(s[1], 10, &temp);
1830 - if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1831 - temp > BLKIO_WEIGHT_MAX)
1832 + if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1833 + temp > BLKIO_WEIGHT_MAX)
1834 return -EINVAL;
1835
1836 newpn->plid = plid;
1837 @@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
1838 switch(fileid) {
1839 case BLKIO_THROTL_read_bps_device:
1840 case BLKIO_THROTL_write_bps_device:
1841 - ret = strict_strtoull(s[1], 10, &bps);
1842 - if (ret)
1843 - return -EINVAL;
1844 -
1845 newpn->plid = plid;
1846 newpn->fileid = fileid;
1847 - newpn->val.bps = bps;
1848 + newpn->val.bps = temp;
1849 break;
1850 case BLKIO_THROTL_read_iops_device:
1851 case BLKIO_THROTL_write_iops_device:
1852 - ret = strict_strtoull(s[1], 10, &iops);
1853 - if (ret)
1854 - return -EINVAL;
1855 -
1856 - if (iops > THROTL_IOPS_MAX)
1857 + if (temp > THROTL_IOPS_MAX)
1858 return -EINVAL;
1859
1860 newpn->plid = plid;
1861 newpn->fileid = fileid;
1862 - newpn->val.iops = (unsigned int)iops;
1863 + newpn->val.iops = (unsigned int)temp;
1864 break;
1865 }
1866 break;
1867 diff --git a/block/blk-core.c b/block/blk-core.c
1868 index 1d49e1c..847d04e 100644
1869 --- a/block/blk-core.c
1870 +++ b/block/blk-core.c
1871 @@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q)
1872 EXPORT_SYMBOL(blk_put_queue);
1873
1874 /*
1875 - * Note: If a driver supplied the queue lock, it should not zap that lock
1876 - * unexpectedly as some queue cleanup components like elevator_exit() and
1877 - * blk_throtl_exit() need queue lock.
1878 + * Note: If a driver supplied the queue lock, it is disconnected
1879 + * by this function. The actual state of the lock doesn't matter
1880 + * here as the request_queue isn't accessible after this point
1881 + * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
1882 */
1883 void blk_cleanup_queue(struct request_queue *q)
1884 {
1885 @@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q)
1886 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
1887 mutex_unlock(&q->sysfs_lock);
1888
1889 - if (q->elevator)
1890 - elevator_exit(q->elevator);
1891 -
1892 - blk_throtl_exit(q);
1893 + if (q->queue_lock != &q->__queue_lock)
1894 + q->queue_lock = &q->__queue_lock;
1895
1896 blk_put_queue(q);
1897 }
1898 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
1899 index d935bd8..45c56d8 100644
1900 --- a/block/blk-sysfs.c
1901 +++ b/block/blk-sysfs.c
1902 @@ -472,6 +472,11 @@ static void blk_release_queue(struct kobject *kobj)
1903
1904 blk_sync_queue(q);
1905
1906 + if (q->elevator)
1907 + elevator_exit(q->elevator);
1908 +
1909 + blk_throtl_exit(q);
1910 +
1911 if (rl->rq_pool)
1912 mempool_destroy(rl->rq_pool);
1913
1914 diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
1915 index bc533dd..f895a24 100644
1916 --- a/drivers/acpi/acpica/acconfig.h
1917 +++ b/drivers/acpi/acpica/acconfig.h
1918 @@ -121,7 +121,7 @@
1919
1920 /* Maximum sleep allowed via Sleep() operator */
1921
1922 -#define ACPI_MAX_SLEEP 20000 /* Two seconds */
1923 +#define ACPI_MAX_SLEEP 2000 /* Two seconds */
1924
1925 /******************************************************************************
1926 *
1927 diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
1928 index c7f743c..5552125 100644
1929 --- a/drivers/acpi/acpica/aclocal.h
1930 +++ b/drivers/acpi/acpica/aclocal.h
1931 @@ -357,6 +357,7 @@ struct acpi_predefined_data {
1932 char *pathname;
1933 const union acpi_predefined_info *predefined;
1934 union acpi_operand_object *parent_package;
1935 + struct acpi_namespace_node *node;
1936 u32 flags;
1937 u8 node_flags;
1938 };
1939 diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
1940 index 9fb03fa..dc00582 100644
1941 --- a/drivers/acpi/acpica/nspredef.c
1942 +++ b/drivers/acpi/acpica/nspredef.c
1943 @@ -212,6 +212,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
1944 goto cleanup;
1945 }
1946 data->predefined = predefined;
1947 + data->node = node;
1948 data->node_flags = node->flags;
1949 data->pathname = pathname;
1950
1951 diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
1952 index 973883b..024c4f2 100644
1953 --- a/drivers/acpi/acpica/nsrepair2.c
1954 +++ b/drivers/acpi/acpica/nsrepair2.c
1955 @@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
1956 {
1957 union acpi_operand_object *return_object = *return_object_ptr;
1958 acpi_status status;
1959 + struct acpi_namespace_node *node;
1960 +
1961 + /*
1962 + * We can only sort the _TSS return package if there is no _PSS in the
1963 + * same scope. This is because if _PSS is present, the ACPI specification
1964 + * dictates that the _TSS Power Dissipation field is to be ignored, and
1965 + * therefore some BIOSs leave garbage values in the _TSS Power field(s).
1966 + * In this case, it is best to just return the _TSS package as-is.
1967 + * (May, 2011)
1968 + */
1969 + status =
1970 + acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node);
1971 + if (ACPI_SUCCESS(status)) {
1972 + return (AE_OK);
1973 + }
1974
1975 status = acpi_ns_check_sorted_list(data, return_object, 5, 1,
1976 ACPI_SORT_DESCENDING,
1977 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1978 index 71afe03..cab6960 100644
1979 --- a/drivers/ata/ahci.c
1980 +++ b/drivers/ata/ahci.c
1981 @@ -267,6 +267,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1982 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
1983 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
1984 { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
1985 + { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
1986
1987 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1988 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1989 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
1990 index ac8d7d9..d6d4f57 100644
1991 --- a/drivers/ata/pata_via.c
1992 +++ b/drivers/ata/pata_via.c
1993 @@ -124,6 +124,17 @@ static const struct via_isa_bridge {
1994 { NULL }
1995 };
1996
1997 +static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
1998 + {
1999 + .ident = "AVERATEC 3200",
2000 + .matches = {
2001 + DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
2002 + DMI_MATCH(DMI_BOARD_NAME, "3200"),
2003 + },
2004 + },
2005 + { }
2006 +};
2007 +
2008 struct via_port {
2009 u8 cached_device;
2010 };
2011 @@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
2012 mask &= ~ ATA_MASK_UDMA;
2013 }
2014 }
2015 +
2016 + if (dev->class == ATA_DEV_ATAPI &&
2017 + dmi_check_system(no_atapi_dma_dmi_table)) {
2018 + ata_dev_printk(dev, KERN_WARNING, "controller locks up on ATAPI DMA, forcing PIO\n");
2019 + mask &= ATA_MASK_PIO;
2020 + }
2021 +
2022 return mask;
2023 }
2024
2025 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
2026 index bbb03e6..06ed6b4 100644
2027 --- a/drivers/base/firmware_class.c
2028 +++ b/drivers/base/firmware_class.c
2029 @@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,
2030 if (!firmware_p)
2031 return -EINVAL;
2032
2033 - if (WARN_ON(usermodehelper_is_disabled())) {
2034 - dev_err(device, "firmware: %s will not be loaded\n", name);
2035 - return -EBUSY;
2036 - }
2037 -
2038 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
2039 if (!firmware) {
2040 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
2041 @@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,
2042 return 0;
2043 }
2044
2045 + if (WARN_ON(usermodehelper_is_disabled())) {
2046 + dev_err(device, "firmware: %s will not be loaded\n", name);
2047 + retval = -EBUSY;
2048 + goto out;
2049 + }
2050 +
2051 if (uevent)
2052 dev_dbg(device, "firmware: requesting %s\n", name);
2053
2054 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
2055 index 98de8f4..9955a53 100644
2056 --- a/drivers/block/floppy.c
2057 +++ b/drivers/block/floppy.c
2058 @@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
2059 use_virtual_dma = can_use_virtual_dma & 1;
2060 fdc_state[0].address = FDC1;
2061 if (fdc_state[0].address == -1) {
2062 - del_timer(&fd_timeout);
2063 + del_timer_sync(&fd_timeout);
2064 err = -ENODEV;
2065 goto out_unreg_region;
2066 }
2067 @@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
2068 fdc = 0; /* reset fdc in case of unexpected interrupt */
2069 err = floppy_grab_irq_and_dma();
2070 if (err) {
2071 - del_timer(&fd_timeout);
2072 + del_timer_sync(&fd_timeout);
2073 err = -EBUSY;
2074 goto out_unreg_region;
2075 }
2076 @@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
2077 user_reset_fdc(-1, FD_RESET_ALWAYS, false);
2078 }
2079 fdc = 0;
2080 - del_timer(&fd_timeout);
2081 + del_timer_sync(&fd_timeout);
2082 current_drive = 0;
2083 initialized = true;
2084 if (have_no_fdc) {
2085 @@ -4368,7 +4368,7 @@ out_unreg_blkdev:
2086 unregister_blkdev(FLOPPY_MAJOR, "fd");
2087 out_put_disk:
2088 while (dr--) {
2089 - del_timer(&motor_off_timer[dr]);
2090 + del_timer_sync(&motor_off_timer[dr]);
2091 if (disks[dr]->queue)
2092 blk_cleanup_queue(disks[dr]->queue);
2093 put_disk(disks[dr]);
2094 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
2095 index 7beb0e2..b85ee76 100644
2096 --- a/drivers/char/tpm/tpm.c
2097 +++ b/drivers/char/tpm/tpm.c
2098 @@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
2099 u32 count, ordinal;
2100 unsigned long stop;
2101
2102 + if (bufsiz > TPM_BUFSIZE)
2103 + bufsiz = TPM_BUFSIZE;
2104 +
2105 count = be32_to_cpu(*((__be32 *) (buf + 2)));
2106 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
2107 if (count == 0)
2108 @@ -1052,6 +1055,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
2109 {
2110 struct tpm_chip *chip = file->private_data;
2111 ssize_t ret_size;
2112 + int rc;
2113
2114 del_singleshot_timer_sync(&chip->user_read_timer);
2115 flush_work_sync(&chip->work);
2116 @@ -1062,8 +1066,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
2117 ret_size = size;
2118
2119 mutex_lock(&chip->buffer_mutex);
2120 - if (copy_to_user(buf, chip->data_buffer, ret_size))
2121 + rc = copy_to_user(buf, chip->data_buffer, ret_size);
2122 + memset(chip->data_buffer, 0, ret_size);
2123 + if (rc)
2124 ret_size = -EFAULT;
2125 +
2126 mutex_unlock(&chip->buffer_mutex);
2127 }
2128
2129 diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
2130 index 7b0603e..cdc02ac 100644
2131 --- a/drivers/cpufreq/pcc-cpufreq.c
2132 +++ b/drivers/cpufreq/pcc-cpufreq.c
2133 @@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
2134 pr = per_cpu(processors, cpu);
2135 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
2136
2137 + if (!pr)
2138 + return -ENODEV;
2139 +
2140 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
2141 if (ACPI_FAILURE(status))
2142 return -ENODEV;
2143 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
2144 index ebb8973..ee76c8e 100644
2145 --- a/drivers/firewire/ohci.c
2146 +++ b/drivers/firewire/ohci.c
2147 @@ -291,6 +291,9 @@ static const struct {
2148 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
2149 QUIRK_CYCLE_TIMER},
2150
2151 + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
2152 + QUIRK_NO_MSI},
2153 +
2154 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
2155 QUIRK_CYCLE_TIMER},
2156
2157 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
2158 index 82fad91..ca6028f 100644
2159 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
2160 +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
2161 @@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
2162 return -ENOMEM;
2163
2164 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
2165 - if (!nvbe->ttm_alloced)
2166 + if (!nvbe->ttm_alloced) {
2167 + kfree(nvbe->pages);
2168 + nvbe->pages = NULL;
2169 return -ENOMEM;
2170 + }
2171
2172 nvbe->nr_pages = 0;
2173 while (num_pages--) {
2174 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
2175 index 15bd047..c975581 100644
2176 --- a/drivers/gpu/drm/radeon/evergreen.c
2177 +++ b/drivers/gpu/drm/radeon/evergreen.c
2178 @@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
2179 void evergreen_fini(struct radeon_device *rdev);
2180 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
2181
2182 +void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
2183 +{
2184 + u16 ctl, v;
2185 + int cap, err;
2186 +
2187 + cap = pci_pcie_cap(rdev->pdev);
2188 + if (!cap)
2189 + return;
2190 +
2191 + err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
2192 + if (err)
2193 + return;
2194 +
2195 + v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
2196 +
2197 + /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
2198 + * to avoid hangs or perfomance issues
2199 + */
2200 + if ((v == 0) || (v == 6) || (v == 7)) {
2201 + ctl &= ~PCI_EXP_DEVCTL_READRQ;
2202 + ctl |= (2 << 12);
2203 + pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
2204 + }
2205 +}
2206 +
2207 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
2208 {
2209 /* enable the pflip int */
2210 @@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
2211 SOFT_RESET_PA |
2212 SOFT_RESET_SH |
2213 SOFT_RESET_VGT |
2214 + SOFT_RESET_SPI |
2215 SOFT_RESET_SX));
2216 RREG32(GRBM_SOFT_RESET);
2217 mdelay(15);
2218 @@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
2219 /* Initialize the ring buffer's read and write pointers */
2220 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2221 WREG32(CP_RB_RPTR_WR, 0);
2222 - WREG32(CP_RB_WPTR, 0);
2223 + rdev->cp.wptr = 0;
2224 + WREG32(CP_RB_WPTR, rdev->cp.wptr);
2225
2226 /* set the wb address wether it's enabled or not */
2227 WREG32(CP_RB_RPTR_ADDR,
2228 @@ -1403,7 +1430,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
2229 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2230
2231 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2232 - rdev->cp.wptr = RREG32(CP_RB_WPTR);
2233
2234 evergreen_cp_start(rdev);
2235 rdev->cp.ready = true;
2236 @@ -1865,6 +1891,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2237
2238 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2239
2240 + evergreen_fix_pci_max_read_req_size(rdev);
2241 +
2242 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
2243
2244 cc_gc_shader_pipe_config |=
2245 @@ -3142,21 +3170,23 @@ int evergreen_suspend(struct radeon_device *rdev)
2246 }
2247
2248 int evergreen_copy_blit(struct radeon_device *rdev,
2249 - uint64_t src_offset, uint64_t dst_offset,
2250 - unsigned num_pages, struct radeon_fence *fence)
2251 + uint64_t src_offset,
2252 + uint64_t dst_offset,
2253 + unsigned num_gpu_pages,
2254 + struct radeon_fence *fence)
2255 {
2256 int r;
2257
2258 mutex_lock(&rdev->r600_blit.mutex);
2259 rdev->r600_blit.vb_ib = NULL;
2260 - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2261 + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2262 if (r) {
2263 if (rdev->r600_blit.vb_ib)
2264 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2265 mutex_unlock(&rdev->r600_blit.mutex);
2266 return r;
2267 }
2268 - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2269 + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2270 evergreen_blit_done_copy(rdev, fence);
2271 mutex_unlock(&rdev->r600_blit.mutex);
2272 return 0;
2273 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
2274 index 559dbd4..0b132a3 100644
2275 --- a/drivers/gpu/drm/radeon/ni.c
2276 +++ b/drivers/gpu/drm/radeon/ni.c
2277 @@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
2278 extern void evergreen_mc_program(struct radeon_device *rdev);
2279 extern void evergreen_irq_suspend(struct radeon_device *rdev);
2280 extern int evergreen_mc_init(struct radeon_device *rdev);
2281 +extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
2282
2283 #define EVERGREEN_PFP_UCODE_SIZE 1120
2284 #define EVERGREEN_PM4_UCODE_SIZE 1376
2285 @@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
2286
2287 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2288
2289 + evergreen_fix_pci_max_read_req_size(rdev);
2290 +
2291 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
2292 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
2293
2294 @@ -1158,6 +1161,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
2295 SOFT_RESET_PA |
2296 SOFT_RESET_SH |
2297 SOFT_RESET_VGT |
2298 + SOFT_RESET_SPI |
2299 SOFT_RESET_SX));
2300 RREG32(GRBM_SOFT_RESET);
2301 mdelay(15);
2302 @@ -1182,7 +1186,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
2303
2304 /* Initialize the ring buffer's read and write pointers */
2305 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
2306 - WREG32(CP_RB0_WPTR, 0);
2307 + rdev->cp.wptr = 0;
2308 + WREG32(CP_RB0_WPTR, rdev->cp.wptr);
2309
2310 /* set the wb address wether it's enabled or not */
2311 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
2312 @@ -1202,7 +1207,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
2313 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
2314
2315 rdev->cp.rptr = RREG32(CP_RB0_RPTR);
2316 - rdev->cp.wptr = RREG32(CP_RB0_WPTR);
2317
2318 /* ring1 - compute only */
2319 /* Set ring buffer size */
2320 @@ -1215,7 +1219,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
2321
2322 /* Initialize the ring buffer's read and write pointers */
2323 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
2324 - WREG32(CP_RB1_WPTR, 0);
2325 + rdev->cp1.wptr = 0;
2326 + WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
2327
2328 /* set the wb address wether it's enabled or not */
2329 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
2330 @@ -1227,7 +1232,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
2331 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
2332
2333 rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
2334 - rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
2335
2336 /* ring2 - compute only */
2337 /* Set ring buffer size */
2338 @@ -1240,7 +1244,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
2339
2340 /* Initialize the ring buffer's read and write pointers */
2341 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
2342 - WREG32(CP_RB2_WPTR, 0);
2343 + rdev->cp2.wptr = 0;
2344 + WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
2345
2346 /* set the wb address wether it's enabled or not */
2347 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
2348 @@ -1252,7 +1257,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
2349 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
2350
2351 rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
2352 - rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
2353
2354 /* start the rings */
2355 cayman_cp_start(rdev);
2356 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
2357 index f2204cb..830e1f1 100644
2358 --- a/drivers/gpu/drm/radeon/r100.c
2359 +++ b/drivers/gpu/drm/radeon/r100.c
2360 @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
2361 int r100_copy_blit(struct radeon_device *rdev,
2362 uint64_t src_offset,
2363 uint64_t dst_offset,
2364 - unsigned num_pages,
2365 + unsigned num_gpu_pages,
2366 struct radeon_fence *fence)
2367 {
2368 uint32_t cur_pages;
2369 - uint32_t stride_bytes = PAGE_SIZE;
2370 + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
2371 uint32_t pitch;
2372 uint32_t stride_pixels;
2373 unsigned ndw;
2374 @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
2375 /* radeon pitch is /64 */
2376 pitch = stride_bytes / 64;
2377 stride_pixels = stride_bytes / 4;
2378 - num_loops = DIV_ROUND_UP(num_pages, 8191);
2379 + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
2380
2381 /* Ask for enough room for blit + flush + fence */
2382 ndw = 64 + (10 * num_loops);
2383 @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
2384 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
2385 return -EINVAL;
2386 }
2387 - while (num_pages > 0) {
2388 - cur_pages = num_pages;
2389 + while (num_gpu_pages > 0) {
2390 + cur_pages = num_gpu_pages;
2391 if (cur_pages > 8191) {
2392 cur_pages = 8191;
2393 }
2394 - num_pages -= cur_pages;
2395 + num_gpu_pages -= cur_pages;
2396
2397 /* pages are in Y direction - height
2398 page width in X direction - width */
2399 @@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
2400 /* Force read & write ptr to 0 */
2401 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
2402 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2403 - WREG32(RADEON_CP_RB_WPTR, 0);
2404 + rdev->cp.wptr = 0;
2405 + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
2406
2407 /* set the wb address whether it's enabled or not */
2408 WREG32(R_00070C_CP_RB_RPTR_ADDR,
2409 @@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
2410 WREG32(RADEON_CP_RB_CNTL, tmp);
2411 udelay(10);
2412 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2413 - rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
2414 - /* protect against crazy HW on resume */
2415 - rdev->cp.wptr &= rdev->cp.ptr_mask;
2416 /* Set cp mode to bus mastering & enable cp*/
2417 WREG32(RADEON_CP_CSQ_MODE,
2418 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
2419 diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
2420 index f240583..a1f3ba0 100644
2421 --- a/drivers/gpu/drm/radeon/r200.c
2422 +++ b/drivers/gpu/drm/radeon/r200.c
2423 @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
2424 int r200_copy_dma(struct radeon_device *rdev,
2425 uint64_t src_offset,
2426 uint64_t dst_offset,
2427 - unsigned num_pages,
2428 + unsigned num_gpu_pages,
2429 struct radeon_fence *fence)
2430 {
2431 uint32_t size;
2432 @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
2433 int r = 0;
2434
2435 /* radeon pitch is /64 */
2436 - size = num_pages << PAGE_SHIFT;
2437 + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
2438 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
2439 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
2440 if (r) {
2441 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
2442 index bc54b26..1dea9d6 100644
2443 --- a/drivers/gpu/drm/radeon/r600.c
2444 +++ b/drivers/gpu/drm/radeon/r600.c
2445 @@ -2208,7 +2208,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2446 /* Initialize the ring buffer's read and write pointers */
2447 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2448 WREG32(CP_RB_RPTR_WR, 0);
2449 - WREG32(CP_RB_WPTR, 0);
2450 + rdev->cp.wptr = 0;
2451 + WREG32(CP_RB_WPTR, rdev->cp.wptr);
2452
2453 /* set the wb address whether it's enabled or not */
2454 WREG32(CP_RB_RPTR_ADDR,
2455 @@ -2233,7 +2234,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2456 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2457
2458 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2459 - rdev->cp.wptr = RREG32(CP_RB_WPTR);
2460
2461 r600_cp_start(rdev);
2462 rdev->cp.ready = true;
2463 @@ -2355,21 +2355,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2464 }
2465
2466 int r600_copy_blit(struct radeon_device *rdev,
2467 - uint64_t src_offset, uint64_t dst_offset,
2468 - unsigned num_pages, struct radeon_fence *fence)
2469 + uint64_t src_offset,
2470 + uint64_t dst_offset,
2471 + unsigned num_gpu_pages,
2472 + struct radeon_fence *fence)
2473 {
2474 int r;
2475
2476 mutex_lock(&rdev->r600_blit.mutex);
2477 rdev->r600_blit.vb_ib = NULL;
2478 - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2479 + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2480 if (r) {
2481 if (rdev->r600_blit.vb_ib)
2482 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2483 mutex_unlock(&rdev->r600_blit.mutex);
2484 return r;
2485 }
2486 - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2487 + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2488 r600_blit_done_copy(rdev, fence);
2489 mutex_unlock(&rdev->r600_blit.mutex);
2490 return 0;
2491 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2492 index ef0e0e0..0bb4ddf 100644
2493 --- a/drivers/gpu/drm/radeon/radeon.h
2494 +++ b/drivers/gpu/drm/radeon/radeon.h
2495 @@ -322,6 +322,7 @@ union radeon_gart_table {
2496
2497 #define RADEON_GPU_PAGE_SIZE 4096
2498 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
2499 +#define RADEON_GPU_PAGE_SHIFT 12
2500
2501 struct radeon_gart {
2502 dma_addr_t table_addr;
2503 @@ -914,17 +915,17 @@ struct radeon_asic {
2504 int (*copy_blit)(struct radeon_device *rdev,
2505 uint64_t src_offset,
2506 uint64_t dst_offset,
2507 - unsigned num_pages,
2508 + unsigned num_gpu_pages,
2509 struct radeon_fence *fence);
2510 int (*copy_dma)(struct radeon_device *rdev,
2511 uint64_t src_offset,
2512 uint64_t dst_offset,
2513 - unsigned num_pages,
2514 + unsigned num_gpu_pages,
2515 struct radeon_fence *fence);
2516 int (*copy)(struct radeon_device *rdev,
2517 uint64_t src_offset,
2518 uint64_t dst_offset,
2519 - unsigned num_pages,
2520 + unsigned num_gpu_pages,
2521 struct radeon_fence *fence);
2522 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
2523 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
2524 diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
2525 index 3d7a0d7..3dedaa0 100644
2526 --- a/drivers/gpu/drm/radeon/radeon_asic.h
2527 +++ b/drivers/gpu/drm/radeon/radeon_asic.h
2528 @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
2529 int r100_copy_blit(struct radeon_device *rdev,
2530 uint64_t src_offset,
2531 uint64_t dst_offset,
2532 - unsigned num_pages,
2533 + unsigned num_gpu_pages,
2534 struct radeon_fence *fence);
2535 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2536 uint32_t tiling_flags, uint32_t pitch,
2537 @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
2538 extern int r200_copy_dma(struct radeon_device *rdev,
2539 uint64_t src_offset,
2540 uint64_t dst_offset,
2541 - unsigned num_pages,
2542 + unsigned num_gpu_pages,
2543 struct radeon_fence *fence);
2544 void r200_set_safe_registers(struct radeon_device *rdev);
2545
2546 @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
2547 int r600_ring_test(struct radeon_device *rdev);
2548 int r600_copy_blit(struct radeon_device *rdev,
2549 uint64_t src_offset, uint64_t dst_offset,
2550 - unsigned num_pages, struct radeon_fence *fence);
2551 + unsigned num_gpu_pages, struct radeon_fence *fence);
2552 void r600_hpd_init(struct radeon_device *rdev);
2553 void r600_hpd_fini(struct radeon_device *rdev);
2554 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
2555 @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
2556 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
2557 int evergreen_copy_blit(struct radeon_device *rdev,
2558 uint64_t src_offset, uint64_t dst_offset,
2559 - unsigned num_pages, struct radeon_fence *fence);
2560 + unsigned num_gpu_pages, struct radeon_fence *fence);
2561 void evergreen_hpd_init(struct radeon_device *rdev);
2562 void evergreen_hpd_fini(struct radeon_device *rdev);
2563 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
2564 diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
2565 index 2d48e7a..b956cf1 100644
2566 --- a/drivers/gpu/drm/radeon/radeon_clocks.c
2567 +++ b/drivers/gpu/drm/radeon/radeon_clocks.c
2568 @@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
2569 } else {
2570 DRM_INFO("Using generic clock info\n");
2571
2572 + /* may need to be per card */
2573 + rdev->clock.max_pixel_clock = 35000;
2574 +
2575 if (rdev->flags & RADEON_IS_IGP) {
2576 p1pll->reference_freq = 1432;
2577 p2pll->reference_freq = 1432;
2578 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2579 index a74217c..cd3c86c 100644
2580 --- a/drivers/gpu/drm/radeon/radeon_combios.c
2581 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
2582 @@ -3279,6 +3279,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
2583 rdev->pdev->subsystem_device == 0x30a4)
2584 return;
2585
2586 + /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
2587 + * - it hangs on resume inside the dynclk 1 table.
2588 + */
2589 + if (rdev->family == CHIP_RS480 &&
2590 + rdev->pdev->subsystem_vendor == 0x103c &&
2591 + rdev->pdev->subsystem_device == 0x30ae)
2592 + return;
2593 +
2594 /* DYN CLK 1 */
2595 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
2596 if (table)
2597 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
2598 index 319d85d..13690f3 100644
2599 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
2600 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
2601 @@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
2602 switch (mode) {
2603 case DRM_MODE_DPMS_ON:
2604 args.ucAction = ATOM_ENABLE;
2605 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2606 + /* workaround for DVOOutputControl on some RS690 systems */
2607 + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
2608 + u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
2609 + WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
2610 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2611 + WREG32(RADEON_BIOS_3_SCRATCH, reg);
2612 + } else
2613 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2614 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2615 args.ucAction = ATOM_LCD_BLON;
2616 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2617 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
2618 index 60125dd..3e9b41b 100644
2619 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
2620 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
2621 @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
2622 DRM_ERROR("Trying to move memory with CP turned off.\n");
2623 return -EINVAL;
2624 }
2625 - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
2626 +
2627 + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
2628 +
2629 + r = radeon_copy(rdev, old_start, new_start,
2630 + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
2631 + fence);
2632 /* FIXME: handle copy error */
2633 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
2634 evict, no_wait_reserve, no_wait_gpu, new_mem);
2635 diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
2636 index 257957c..4f7c3fc 100644
2637 --- a/drivers/hwmon/ds620.c
2638 +++ b/drivers/hwmon/ds620.c
2639 @@ -72,7 +72,7 @@ struct ds620_data {
2640 char valid; /* !=0 if following fields are valid */
2641 unsigned long last_updated; /* In jiffies */
2642
2643 - u16 temp[3]; /* Register values, word */
2644 + s16 temp[3]; /* Register values, word */
2645 };
2646
2647 /*
2648 diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
2649 index d94a24f..dd2d7b9 100644
2650 --- a/drivers/hwmon/max16065.c
2651 +++ b/drivers/hwmon/max16065.c
2652 @@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
2653
2654 static inline int ADC_TO_CURR(int adc, int gain)
2655 {
2656 - return adc * 1400000 / gain * 255;
2657 + return adc * 1400000 / (gain * 255);
2658 }
2659
2660 /*
2661 diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
2662 index 0a5008f..2332dc2 100644
2663 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
2664 +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
2665 @@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
2666 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
2667 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
2668 dst_release(ep->dst);
2669 - l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2670 + l2t_release(ep->com.tdev, ep->l2t);
2671 }
2672 kfree(ep);
2673 }
2674 @@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2675 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
2676 cxgb3_free_atid(ep->com.tdev, ep->atid);
2677 dst_release(ep->dst);
2678 - l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2679 + l2t_release(ep->com.tdev, ep->l2t);
2680 put_ep(&ep->com);
2681 return CPL_RET_BUF_DONE;
2682 }
2683 @@ -1375,7 +1375,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2684 if (!child_ep) {
2685 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
2686 __func__);
2687 - l2t_release(L2DATA(tdev), l2t);
2688 + l2t_release(tdev, l2t);
2689 dst_release(dst);
2690 goto reject;
2691 }
2692 @@ -1952,7 +1952,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2693 if (!err)
2694 goto out;
2695
2696 - l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
2697 + l2t_release(h->rdev.t3cdev_p, ep->l2t);
2698 fail4:
2699 dst_release(ep->dst);
2700 fail3:
2701 @@ -2123,7 +2123,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2702 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2703 l2t);
2704 dst_hold(new);
2705 - l2t_release(L2DATA(ep->com.tdev), ep->l2t);
2706 + l2t_release(ep->com.tdev, ep->l2t);
2707 ep->l2t = l2t;
2708 dst_release(old);
2709 ep->dst = new;
2710 diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
2711 index d87c9d0..328c64c 100644
2712 --- a/drivers/leds/ledtrig-timer.c
2713 +++ b/drivers/leds/ledtrig-timer.c
2714 @@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
2715
2716 if (count == size) {
2717 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
2718 + led_cdev->blink_delay_on = state;
2719 ret = count;
2720 }
2721
2722 @@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
2723
2724 if (count == size) {
2725 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
2726 + led_cdev->blink_delay_off = state;
2727 ret = count;
2728 }
2729
2730 diff --git a/drivers/md/linear.h b/drivers/md/linear.h
2731 index 0ce29b6..2f2da05 100644
2732 --- a/drivers/md/linear.h
2733 +++ b/drivers/md/linear.h
2734 @@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
2735
2736 struct linear_private_data
2737 {
2738 + struct rcu_head rcu;
2739 sector_t array_sectors;
2740 dev_info_t disks[0];
2741 - struct rcu_head rcu;
2742 };
2743
2744
2745 diff --git a/drivers/md/md.c b/drivers/md/md.c
2746 index 91e31e2..8554082 100644
2747 --- a/drivers/md/md.c
2748 +++ b/drivers/md/md.c
2749 @@ -1084,8 +1084,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
2750 ret = 0;
2751 }
2752 rdev->sectors = rdev->sb_start;
2753 + /* Limit to 4TB as metadata cannot record more than that */
2754 + if (rdev->sectors >= (2ULL << 32))
2755 + rdev->sectors = (2ULL << 32) - 2;
2756
2757 - if (rdev->sectors < sb->size * 2 && sb->level > 1)
2758 + if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
2759 /* "this cannot possibly happen" ... */
2760 ret = -EINVAL;
2761
2762 @@ -1119,7 +1122,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
2763 mddev->clevel[0] = 0;
2764 mddev->layout = sb->layout;
2765 mddev->raid_disks = sb->raid_disks;
2766 - mddev->dev_sectors = sb->size * 2;
2767 + mddev->dev_sectors = ((sector_t)sb->size) * 2;
2768 mddev->events = ev1;
2769 mddev->bitmap_info.offset = 0;
2770 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
2771 @@ -1361,6 +1364,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
2772 rdev->sb_start = calc_dev_sboffset(rdev);
2773 if (!num_sectors || num_sectors > rdev->sb_start)
2774 num_sectors = rdev->sb_start;
2775 + /* Limit to 4TB as metadata cannot record more than that.
2776 + * 4TB == 2^32 KB, or 2*2^32 sectors.
2777 + */
2778 + if (num_sectors >= (2ULL << 32))
2779 + num_sectors = (2ULL << 32) - 2;
2780 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2781 rdev->sb_page);
2782 md_super_wait(rdev->mddev);
2783 diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
2784 index 3db89e3..536c16c 100644
2785 --- a/drivers/media/dvb/dvb-usb/vp7045.c
2786 +++ b/drivers/media/dvb/dvb-usb/vp7045.c
2787 @@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
2788 static int vp7045_usb_probe(struct usb_interface *intf,
2789 const struct usb_device_id *id)
2790 {
2791 - struct dvb_usb_device *d;
2792 - int ret = dvb_usb_device_init(intf, &vp7045_properties,
2793 - THIS_MODULE, &d, adapter_nr);
2794 - if (ret)
2795 - return ret;
2796 -
2797 - d->priv = kmalloc(20, GFP_KERNEL);
2798 - if (!d->priv) {
2799 - dvb_usb_device_exit(intf);
2800 - return -ENOMEM;
2801 - }
2802 -
2803 - return ret;
2804 -}
2805 -
2806 -static void vp7045_usb_disconnect(struct usb_interface *intf)
2807 -{
2808 - struct dvb_usb_device *d = usb_get_intfdata(intf);
2809 - kfree(d->priv);
2810 - dvb_usb_device_exit(intf);
2811 + return dvb_usb_device_init(intf, &vp7045_properties,
2812 + THIS_MODULE, NULL, adapter_nr);
2813 }
2814
2815 static struct usb_device_id vp7045_usb_table [] = {
2816 @@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
2817 static struct dvb_usb_device_properties vp7045_properties = {
2818 .usb_ctrl = CYPRESS_FX2,
2819 .firmware = "dvb-usb-vp7045-01.fw",
2820 - .size_of_priv = sizeof(u8 *),
2821 + .size_of_priv = 20,
2822
2823 .num_adapters = 1,
2824 .adapter = {
2825 @@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
2826 static struct usb_driver vp7045_usb_driver = {
2827 .name = "dvb_usb_vp7045",
2828 .probe = vp7045_usb_probe,
2829 - .disconnect = vp7045_usb_disconnect,
2830 + .disconnect = dvb_usb_device_exit,
2831 .id_table = vp7045_usb_table,
2832 };
2833
2834 diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
2835 index ce595f9..9fd019e 100644
2836 --- a/drivers/media/rc/nuvoton-cir.c
2837 +++ b/drivers/media/rc/nuvoton-cir.c
2838 @@ -624,7 +624,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
2839 static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
2840 {
2841 DEFINE_IR_RAW_EVENT(rawir);
2842 - unsigned int count;
2843 u32 carrier;
2844 u8 sample;
2845 int i;
2846 @@ -637,65 +636,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
2847 if (nvt->carrier_detect_enabled)
2848 carrier = nvt_rx_carrier_detect(nvt);
2849
2850 - count = nvt->pkts;
2851 - nvt_dbg_verbose("Processing buffer of len %d", count);
2852 + nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
2853
2854 init_ir_raw_event(&rawir);
2855
2856 - for (i = 0; i < count; i++) {
2857 - nvt->pkts--;
2858 + for (i = 0; i < nvt->pkts; i++) {
2859 sample = nvt->buf[i];
2860
2861 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
2862 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
2863 * SAMPLE_PERIOD);
2864
2865 - if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
2866 - if (nvt->rawir.pulse == rawir.pulse)
2867 - nvt->rawir.duration += rawir.duration;
2868 - else {
2869 - nvt->rawir.duration = rawir.duration;
2870 - nvt->rawir.pulse = rawir.pulse;
2871 - }
2872 - continue;
2873 - }
2874 -
2875 - rawir.duration += nvt->rawir.duration;
2876 + nvt_dbg("Storing %s with duration %d",
2877 + rawir.pulse ? "pulse" : "space", rawir.duration);
2878
2879 - init_ir_raw_event(&nvt->rawir);
2880 - nvt->rawir.duration = 0;
2881 - nvt->rawir.pulse = rawir.pulse;
2882 -
2883 - if (sample == BUF_PULSE_BIT)
2884 - rawir.pulse = false;
2885 -
2886 - if (rawir.duration) {
2887 - nvt_dbg("Storing %s with duration %d",
2888 - rawir.pulse ? "pulse" : "space",
2889 - rawir.duration);
2890 -
2891 - ir_raw_event_store_with_filter(nvt->rdev, &rawir);
2892 - }
2893 + ir_raw_event_store_with_filter(nvt->rdev, &rawir);
2894
2895 /*
2896 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
2897 * indicates end of IR signal, but new data incoming. In both
2898 * cases, it means we're ready to call ir_raw_event_handle
2899 */
2900 - if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
2901 + if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
2902 nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
2903 ir_raw_event_handle(nvt->rdev);
2904 }
2905 }
2906
2907 + nvt->pkts = 0;
2908 +
2909 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
2910 ir_raw_event_handle(nvt->rdev);
2911
2912 - if (nvt->pkts) {
2913 - nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
2914 - nvt->pkts = 0;
2915 - }
2916 -
2917 nvt_dbg_verbose("%s done", __func__);
2918 }
2919
2920 @@ -1054,7 +1026,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
2921
2922 spin_lock_init(&nvt->nvt_lock);
2923 spin_lock_init(&nvt->tx.lock);
2924 - init_ir_raw_event(&nvt->rawir);
2925
2926 ret = -EBUSY;
2927 /* now claim resources */
2928 diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
2929 index 1241fc8..0d5e087 100644
2930 --- a/drivers/media/rc/nuvoton-cir.h
2931 +++ b/drivers/media/rc/nuvoton-cir.h
2932 @@ -67,7 +67,6 @@ static int debug;
2933 struct nvt_dev {
2934 struct pnp_dev *pdev;
2935 struct rc_dev *rdev;
2936 - struct ir_raw_event rawir;
2937
2938 spinlock_t nvt_lock;
2939
2940 diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
2941 index 1717144..e67c3d3 100644
2942 --- a/drivers/mfd/omap-usb-host.c
2943 +++ b/drivers/mfd/omap-usb-host.c
2944 @@ -676,7 +676,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
2945 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
2946 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
2947
2948 - reg |= (1 << (i + 1));
2949 } else
2950 continue;
2951
2952 diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
2953 index 2bfad5c..a56be93 100644
2954 --- a/drivers/mfd/tps65910-irq.c
2955 +++ b/drivers/mfd/tps65910-irq.c
2956 @@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
2957 switch (tps65910_chip_id(tps65910)) {
2958 case TPS65910:
2959 tps65910->irq_num = TPS65910_NUM_IRQ;
2960 + break;
2961 case TPS65911:
2962 tps65910->irq_num = TPS65911_NUM_IRQ;
2963 + break;
2964 }
2965
2966 /* Register with genirq */
2967 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
2968 index 7843efe..38089b2 100644
2969 --- a/drivers/mmc/core/core.c
2970 +++ b/drivers/mmc/core/core.c
2971 @@ -132,7 +132,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
2972 if (mrq->done)
2973 mrq->done(mrq);
2974
2975 - mmc_host_clk_gate(host);
2976 + mmc_host_clk_release(host);
2977 }
2978 }
2979
2980 @@ -191,7 +191,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
2981 mrq->stop->mrq = mrq;
2982 }
2983 }
2984 - mmc_host_clk_ungate(host);
2985 + mmc_host_clk_hold(host);
2986 led_trigger_event(host->led, LED_FULL);
2987 host->ops->request(host, mrq);
2988 }
2989 @@ -634,15 +634,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
2990 */
2991 void mmc_set_chip_select(struct mmc_host *host, int mode)
2992 {
2993 + mmc_host_clk_hold(host);
2994 host->ios.chip_select = mode;
2995 mmc_set_ios(host);
2996 + mmc_host_clk_release(host);
2997 }
2998
2999 /*
3000 * Sets the host clock to the highest possible frequency that
3001 * is below "hz".
3002 */
3003 -void mmc_set_clock(struct mmc_host *host, unsigned int hz)
3004 +static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
3005 {
3006 WARN_ON(hz < host->f_min);
3007
3008 @@ -653,6 +655,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
3009 mmc_set_ios(host);
3010 }
3011
3012 +void mmc_set_clock(struct mmc_host *host, unsigned int hz)
3013 +{
3014 + mmc_host_clk_hold(host);
3015 + __mmc_set_clock(host, hz);
3016 + mmc_host_clk_release(host);
3017 +}
3018 +
3019 #ifdef CONFIG_MMC_CLKGATE
3020 /*
3021 * This gates the clock by setting it to 0 Hz.
3022 @@ -685,7 +694,7 @@ void mmc_ungate_clock(struct mmc_host *host)
3023 if (host->clk_old) {
3024 BUG_ON(host->ios.clock);
3025 /* This call will also set host->clk_gated to false */
3026 - mmc_set_clock(host, host->clk_old);
3027 + __mmc_set_clock(host, host->clk_old);
3028 }
3029 }
3030
3031 @@ -713,8 +722,10 @@ void mmc_set_ungated(struct mmc_host *host)
3032 */
3033 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
3034 {
3035 + mmc_host_clk_hold(host);
3036 host->ios.bus_mode = mode;
3037 mmc_set_ios(host);
3038 + mmc_host_clk_release(host);
3039 }
3040
3041 /*
3042 @@ -722,8 +733,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
3043 */
3044 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
3045 {
3046 + mmc_host_clk_hold(host);
3047 host->ios.bus_width = width;
3048 mmc_set_ios(host);
3049 + mmc_host_clk_release(host);
3050 }
3051
3052 /**
3053 @@ -921,8 +934,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
3054
3055 ocr &= 3 << bit;
3056
3057 + mmc_host_clk_hold(host);
3058 host->ios.vdd = bit;
3059 mmc_set_ios(host);
3060 + mmc_host_clk_release(host);
3061 } else {
3062 pr_warning("%s: host doesn't support card's voltages\n",
3063 mmc_hostname(host));
3064 @@ -969,8 +984,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
3065 */
3066 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
3067 {
3068 + mmc_host_clk_hold(host);
3069 host->ios.timing = timing;
3070 mmc_set_ios(host);
3071 + mmc_host_clk_release(host);
3072 }
3073
3074 /*
3075 @@ -978,8 +995,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
3076 */
3077 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
3078 {
3079 + mmc_host_clk_hold(host);
3080 host->ios.drv_type = drv_type;
3081 mmc_set_ios(host);
3082 + mmc_host_clk_release(host);
3083 }
3084
3085 /*
3086 @@ -997,6 +1016,8 @@ static void mmc_power_up(struct mmc_host *host)
3087 {
3088 int bit;
3089
3090 + mmc_host_clk_hold(host);
3091 +
3092 /* If ocr is set, we use it */
3093 if (host->ocr)
3094 bit = ffs(host->ocr) - 1;
3095 @@ -1032,10 +1053,14 @@ static void mmc_power_up(struct mmc_host *host)
3096 * time required to reach a stable voltage.
3097 */
3098 mmc_delay(10);
3099 +
3100 + mmc_host_clk_release(host);
3101 }
3102
3103 static void mmc_power_off(struct mmc_host *host)
3104 {
3105 + mmc_host_clk_hold(host);
3106 +
3107 host->ios.clock = 0;
3108 host->ios.vdd = 0;
3109
3110 @@ -1053,6 +1078,8 @@ static void mmc_power_off(struct mmc_host *host)
3111 host->ios.bus_width = MMC_BUS_WIDTH_1;
3112 host->ios.timing = MMC_TIMING_LEGACY;
3113 mmc_set_ios(host);
3114 +
3115 + mmc_host_clk_release(host);
3116 }
3117
3118 /*
3119 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
3120 index b29d3e8..793d0a0 100644
3121 --- a/drivers/mmc/core/host.c
3122 +++ b/drivers/mmc/core/host.c
3123 @@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
3124 }
3125
3126 /**
3127 - * mmc_host_clk_ungate - ungate hardware MCI clocks
3128 + * mmc_host_clk_hold - ungate hardware MCI clocks
3129 * @host: host to ungate.
3130 *
3131 * Makes sure the host ios.clock is restored to a non-zero value
3132 * past this call. Increase clock reference count and ungate clock
3133 * if we're the first user.
3134 */
3135 -void mmc_host_clk_ungate(struct mmc_host *host)
3136 +void mmc_host_clk_hold(struct mmc_host *host)
3137 {
3138 unsigned long flags;
3139
3140 @@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
3141 }
3142
3143 /**
3144 - * mmc_host_clk_gate - gate off hardware MCI clocks
3145 + * mmc_host_clk_release - gate off hardware MCI clocks
3146 * @host: host to gate.
3147 *
3148 * Calls the host driver with ios.clock set to zero as often as possible
3149 * in order to gate off hardware MCI clocks. Decrease clock reference
3150 * count and schedule disabling of clock.
3151 */
3152 -void mmc_host_clk_gate(struct mmc_host *host)
3153 +void mmc_host_clk_release(struct mmc_host *host)
3154 {
3155 unsigned long flags;
3156
3157 @@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
3158 host->clk_requests--;
3159 if (mmc_host_may_gate_card(host->card) &&
3160 !host->clk_requests)
3161 - schedule_work(&host->clk_gate_work);
3162 + queue_work(system_nrt_wq, &host->clk_gate_work);
3163 spin_unlock_irqrestore(&host->clk_lock, flags);
3164 }
3165
3166 @@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
3167 if (cancel_work_sync(&host->clk_gate_work))
3168 mmc_host_clk_gate_delayed(host);
3169 if (host->clk_gated)
3170 - mmc_host_clk_ungate(host);
3171 + mmc_host_clk_hold(host);
3172 /* There should be only one user now */
3173 WARN_ON(host->clk_requests > 1);
3174 }
3175 diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
3176 index de199f9..fb8a5cd 100644
3177 --- a/drivers/mmc/core/host.h
3178 +++ b/drivers/mmc/core/host.h
3179 @@ -16,16 +16,16 @@ int mmc_register_host_class(void);
3180 void mmc_unregister_host_class(void);
3181
3182 #ifdef CONFIG_MMC_CLKGATE
3183 -void mmc_host_clk_ungate(struct mmc_host *host);
3184 -void mmc_host_clk_gate(struct mmc_host *host);
3185 +void mmc_host_clk_hold(struct mmc_host *host);
3186 +void mmc_host_clk_release(struct mmc_host *host);
3187 unsigned int mmc_host_clk_rate(struct mmc_host *host);
3188
3189 #else
3190 -static inline void mmc_host_clk_ungate(struct mmc_host *host)
3191 +static inline void mmc_host_clk_hold(struct mmc_host *host)
3192 {
3193 }
3194
3195 -static inline void mmc_host_clk_gate(struct mmc_host *host)
3196 +static inline void mmc_host_clk_release(struct mmc_host *host)
3197 {
3198 }
3199
3200 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
3201 index 69e3ee3..8cd999f 100644
3202 --- a/drivers/mmc/host/sdhci-s3c.c
3203 +++ b/drivers/mmc/host/sdhci-s3c.c
3204 @@ -301,6 +301,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
3205 ctrl &= ~SDHCI_CTRL_8BITBUS;
3206 break;
3207 default:
3208 + ctrl &= ~SDHCI_CTRL_4BITBUS;
3209 + ctrl &= ~SDHCI_CTRL_8BITBUS;
3210 break;
3211 }
3212
3213 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
3214 index 57d3293..74580bb 100644
3215 --- a/drivers/net/bnx2.c
3216 +++ b/drivers/net/bnx2.c
3217 @@ -416,6 +416,9 @@ struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
3218 struct bnx2 *bp = netdev_priv(dev);
3219 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3220
3221 + if (!cp->max_iscsi_conn)
3222 + return NULL;
3223 +
3224 cp->drv_owner = THIS_MODULE;
3225 cp->chip_id = bp->chip_id;
3226 cp->pdev = bp->pdev;
3227 @@ -8177,6 +8180,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
3228 bp->timer.data = (unsigned long) bp;
3229 bp->timer.function = bnx2_timer;
3230
3231 +#ifdef BCM_CNIC
3232 + bp->cnic_eth_dev.max_iscsi_conn =
3233 + bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN);
3234 +#endif
3235 pci_save_state(pdev);
3236
3237 return 0;
3238 diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
3239 index 410a49e..d11af7c 100644
3240 --- a/drivers/net/bnx2x/bnx2x_dcb.c
3241 +++ b/drivers/net/bnx2x/bnx2x_dcb.c
3242 @@ -1858,6 +1858,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
3243 break;
3244 case DCB_CAP_ATTR_DCBX:
3245 *cap = BNX2X_DCBX_CAPS;
3246 + break;
3247 default:
3248 rval = -EINVAL;
3249 break;
3250 diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
3251 index 74be989..04976db 100644
3252 --- a/drivers/net/bnx2x/bnx2x_main.c
3253 +++ b/drivers/net/bnx2x/bnx2x_main.c
3254 @@ -4138,7 +4138,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3255 int igu_seg_id;
3256 int port = BP_PORT(bp);
3257 int func = BP_FUNC(bp);
3258 - int reg_offset;
3259 + int reg_offset, reg_offset_en5;
3260 u64 section;
3261 int index;
3262 struct hc_sp_status_block_data sp_sb_data;
3263 @@ -4161,6 +4161,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3264
3265 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3266 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3267 + reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
3268 + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
3269 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3270 int sindex;
3271 /* take care of sig[0]..sig[4] */
3272 @@ -4175,7 +4177,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
3273 * and not 16 between the different groups
3274 */
3275 bp->attn_group[index].sig[4] = REG_RD(bp,
3276 - reg_offset + 0x10 + 0x4*index);
3277 + reg_offset_en5 + 0x4*index);
3278 else
3279 bp->attn_group[index].sig[4] = 0;
3280 }
3281 diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
3282 index 86bba25..0380b3a 100644
3283 --- a/drivers/net/bnx2x/bnx2x_reg.h
3284 +++ b/drivers/net/bnx2x/bnx2x_reg.h
3285 @@ -1325,6 +1325,18 @@
3286 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
3287 #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
3288 #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
3289 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
3290 + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
3291 + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
3292 + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
3293 + * parity; [31-10] Reserved; */
3294 +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
3295 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
3296 + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
3297 + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
3298 + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
3299 + * parity; [31-10] Reserved; */
3300 +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
3301 /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
3302 128 bit vector */
3303 #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
3304 diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
3305 index 11a92af..363c7f3 100644
3306 --- a/drivers/net/cnic.c
3307 +++ b/drivers/net/cnic.c
3308 @@ -605,11 +605,12 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
3309 }
3310 EXPORT_SYMBOL(cnic_unregister_driver);
3311
3312 -static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
3313 +static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
3314 + u32 next)
3315 {
3316 id_tbl->start = start_id;
3317 id_tbl->max = size;
3318 - id_tbl->next = 0;
3319 + id_tbl->next = next;
3320 spin_lock_init(&id_tbl->lock);
3321 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
3322 if (!id_tbl->table)
3323 @@ -2778,13 +2779,10 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
3324
3325 /* Tell compiler that status_blk fields can change. */
3326 barrier();
3327 - if (status_idx != *cp->kcq1.status_idx_ptr) {
3328 - status_idx = (u16) *cp->kcq1.status_idx_ptr;
3329 - /* status block index must be read first */
3330 - rmb();
3331 - cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
3332 - } else
3333 - break;
3334 + status_idx = (u16) *cp->kcq1.status_idx_ptr;
3335 + /* status block index must be read first */
3336 + rmb();
3337 + cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
3338 }
3339
3340 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
3341 @@ -2908,8 +2906,6 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3342
3343 /* Tell compiler that sblk fields can change. */
3344 barrier();
3345 - if (last_status == *info->status_idx_ptr)
3346 - break;
3347
3348 last_status = *info->status_idx_ptr;
3349 /* status block index must be read before reading the KCQ */
3350 @@ -3772,7 +3768,13 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3351 break;
3352
3353 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3354 - cnic_cm_upcall(cp, csk, opcode);
3355 + /* after we already sent CLOSE_REQ */
3356 + if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3357 + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3358 + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3359 + cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3360 + else
3361 + cnic_cm_upcall(cp, csk, opcode);
3362 break;
3363 }
3364 csk_put(csk);
3365 @@ -3803,14 +3805,17 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
3366 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3367 {
3368 struct cnic_local *cp = dev->cnic_priv;
3369 + u32 port_id;
3370
3371 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3372 GFP_KERNEL);
3373 if (!cp->csk_tbl)
3374 return -ENOMEM;
3375
3376 + get_random_bytes(&port_id, sizeof(port_id));
3377 + port_id %= CNIC_LOCAL_PORT_RANGE;
3378 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3379 - CNIC_LOCAL_PORT_MIN)) {
3380 + CNIC_LOCAL_PORT_MIN, port_id)) {
3381 cnic_cm_free_mem(dev);
3382 return -ENOMEM;
3383 }
3384 @@ -3826,12 +3831,14 @@ static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3385 }
3386
3387 /* 1. If event opcode matches the expected event in csk->state
3388 - * 2. If the expected event is CLOSE_COMP, we accept any event
3389 + * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3390 + * event
3391 * 3. If the expected event is 0, meaning the connection was never
3392 * never established, we accept the opcode from cm_abort.
3393 */
3394 if (opcode == csk->state || csk->state == 0 ||
3395 - csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3396 + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
3397 + csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3398 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3399 if (csk->state == 0)
3400 csk->state = opcode;
3401 @@ -4218,14 +4225,6 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
3402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3403 }
3404
3405 -static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
3406 -{
3407 - u32 max_conn;
3408 -
3409 - max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
3410 - dev->max_iscsi_conn = max_conn;
3411 -}
3412 -
3413 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
3414 {
3415 struct cnic_local *cp = dev->cnic_priv;
3416 @@ -4550,8 +4549,6 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3417 return err;
3418 }
3419
3420 - cnic_get_bnx2_iscsi_info(dev);
3421 -
3422 return 0;
3423 }
3424
3425 @@ -4826,7 +4823,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
3426 pfid = cp->pfid;
3427
3428 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
3429 - cp->iscsi_start_cid);
3430 + cp->iscsi_start_cid, 0);
3431
3432 if (ret)
3433 return -ENOMEM;
3434 @@ -4834,7 +4831,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
3435 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
3436 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
3437 BNX2X_FCOE_NUM_CONNECTIONS,
3438 - cp->fcoe_start_cid);
3439 + cp->fcoe_start_cid, 0);
3440
3441 if (ret)
3442 return -ENOMEM;
3443 @@ -5217,6 +5214,8 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
3444 cdev->pcidev = pdev;
3445 cp->chip_id = ethdev->chip_id;
3446
3447 + cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
3448 +
3449 cp->cnic_ops = &cnic_bnx2_ops;
3450 cp->start_hw = cnic_start_bnx2_hw;
3451 cp->stop_hw = cnic_stop_bnx2_hw;
3452 @@ -5335,7 +5334,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
3453
3454 dev = cnic_from_netdev(netdev);
3455
3456 - if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
3457 + if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
3458 /* Check for the hot-plug device */
3459 dev = is_cnic_dev(netdev);
3460 if (dev) {
3461 @@ -5351,7 +5350,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
3462 else if (event == NETDEV_UNREGISTER)
3463 cnic_ulp_exit(dev);
3464
3465 - if (event == NETDEV_UP) {
3466 + if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
3467 if (cnic_register_netdev(dev) != 0) {
3468 cnic_put(dev);
3469 goto done;
3470 diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
3471 index 862804f..3f2e12c 100644
3472 --- a/drivers/net/cxgb3/cxgb3_offload.c
3473 +++ b/drivers/net/cxgb3/cxgb3_offload.c
3474 @@ -1149,12 +1149,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
3475 if (te && te->ctx && te->client && te->client->redirect) {
3476 update_tcb = te->client->redirect(te->ctx, old, new, e);
3477 if (update_tcb) {
3478 + rcu_read_lock();
3479 l2t_hold(L2DATA(tdev), e);
3480 + rcu_read_unlock();
3481 set_l2t_ix(tdev, tid, e);
3482 }
3483 }
3484 }
3485 - l2t_release(L2DATA(tdev), e);
3486 + l2t_release(tdev, e);
3487 }
3488
3489 /*
3490 @@ -1267,7 +1269,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
3491 goto out_free;
3492
3493 err = -ENOMEM;
3494 - L2DATA(dev) = t3_init_l2t(l2t_capacity);
3495 + RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
3496 if (!L2DATA(dev))
3497 goto out_free;
3498
3499 @@ -1301,16 +1303,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
3500
3501 out_free_l2t:
3502 t3_free_l2t(L2DATA(dev));
3503 - L2DATA(dev) = NULL;
3504 + rcu_assign_pointer(dev->l2opt, NULL);
3505 out_free:
3506 kfree(t);
3507 return err;
3508 }
3509
3510 +static void clean_l2_data(struct rcu_head *head)
3511 +{
3512 + struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
3513 + t3_free_l2t(d);
3514 +}
3515 +
3516 +
3517 void cxgb3_offload_deactivate(struct adapter *adapter)
3518 {
3519 struct t3cdev *tdev = &adapter->tdev;
3520 struct t3c_data *t = T3C_DATA(tdev);
3521 + struct l2t_data *d;
3522
3523 remove_adapter(adapter);
3524 if (list_empty(&adapter_list))
3525 @@ -1318,8 +1328,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
3526
3527 free_tid_maps(&t->tid_maps);
3528 T3C_DATA(tdev) = NULL;
3529 - t3_free_l2t(L2DATA(tdev));
3530 - L2DATA(tdev) = NULL;
3531 + rcu_read_lock();
3532 + d = L2DATA(tdev);
3533 + rcu_read_unlock();
3534 + rcu_assign_pointer(tdev->l2opt, NULL);
3535 + call_rcu(&d->rcu_head, clean_l2_data);
3536 if (t->nofail_skb)
3537 kfree_skb(t->nofail_skb);
3538 kfree(t);
3539 diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
3540 index f452c40..4154097 100644
3541 --- a/drivers/net/cxgb3/l2t.c
3542 +++ b/drivers/net/cxgb3/l2t.c
3543 @@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
3544 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
3545 struct net_device *dev)
3546 {
3547 - struct l2t_entry *e;
3548 - struct l2t_data *d = L2DATA(cdev);
3549 + struct l2t_entry *e = NULL;
3550 + struct l2t_data *d;
3551 + int hash;
3552 u32 addr = *(u32 *) neigh->primary_key;
3553 int ifidx = neigh->dev->ifindex;
3554 - int hash = arp_hash(addr, ifidx, d);
3555 struct port_info *p = netdev_priv(dev);
3556 int smt_idx = p->port_id;
3557
3558 + rcu_read_lock();
3559 + d = L2DATA(cdev);
3560 + if (!d)
3561 + goto done_rcu;
3562 +
3563 + hash = arp_hash(addr, ifidx, d);
3564 +
3565 write_lock_bh(&d->lock);
3566 for (e = d->l2tab[hash].first; e; e = e->next)
3567 if (e->addr == addr && e->ifindex == ifidx &&
3568 @@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
3569 }
3570 done:
3571 write_unlock_bh(&d->lock);
3572 +done_rcu:
3573 + rcu_read_unlock();
3574 return e;
3575 }
3576
3577 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
3578 index fd3eb07..c4dd066 100644
3579 --- a/drivers/net/cxgb3/l2t.h
3580 +++ b/drivers/net/cxgb3/l2t.h
3581 @@ -76,6 +76,7 @@ struct l2t_data {
3582 atomic_t nfree; /* number of free entries */
3583 rwlock_t lock;
3584 struct l2t_entry l2tab[0];
3585 + struct rcu_head rcu_head; /* to handle rcu cleanup */
3586 };
3587
3588 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
3589 @@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
3590 /*
3591 * Getting to the L2 data from an offload device.
3592 */
3593 -#define L2DATA(dev) ((dev)->l2opt)
3594 +#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
3595
3596 #define W_TCB_L2T_IX 0
3597 #define S_TCB_L2T_IX 7
3598 @@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
3599 return t3_l2t_send_slow(dev, skb, e);
3600 }
3601
3602 -static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
3603 +static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
3604 {
3605 - if (atomic_dec_and_test(&e->refcnt))
3606 + struct l2t_data *d;
3607 +
3608 + rcu_read_lock();
3609 + d = L2DATA(t);
3610 +
3611 + if (atomic_dec_and_test(&e->refcnt) && d)
3612 t3_l2e_free(d, e);
3613 +
3614 + rcu_read_unlock();
3615 }
3616
3617 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
3618 {
3619 - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
3620 + if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
3621 atomic_dec(&d->nfree);
3622 }
3623
3624 diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
3625 index 7501d97..f17aaa1 100644
3626 --- a/drivers/net/e1000/e1000_hw.c
3627 +++ b/drivers/net/e1000/e1000_hw.c
3628 @@ -4028,6 +4028,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3629 checksum += eeprom_data;
3630 }
3631
3632 +#ifdef CONFIG_PARISC
3633 + /* This is a signature and not a checksum on HP c8000 */
3634 + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
3635 + return E1000_SUCCESS;
3636 +
3637 +#endif
3638 if (checksum == (u16) EEPROM_SUM)
3639 return E1000_SUCCESS;
3640 else {
3641 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
3642 index b388d78..145c924 100644
3643 --- a/drivers/net/ibmveth.c
3644 +++ b/drivers/net/ibmveth.c
3645 @@ -394,7 +394,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
3646 }
3647
3648 /* recycle the current buffer on the rx queue */
3649 -static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
3650 +static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
3651 {
3652 u32 q_index = adapter->rx_queue.index;
3653 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
3654 @@ -402,6 +402,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
3655 unsigned int index = correlator & 0xffffffffUL;
3656 union ibmveth_buf_desc desc;
3657 unsigned long lpar_rc;
3658 + int ret = 1;
3659
3660 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
3661 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
3662 @@ -409,7 +410,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
3663 if (!adapter->rx_buff_pool[pool].active) {
3664 ibmveth_rxq_harvest_buffer(adapter);
3665 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
3666 - return;
3667 + goto out;
3668 }
3669
3670 desc.fields.flags_len = IBMVETH_BUF_VALID |
3671 @@ -422,12 +423,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
3672 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
3673 "during recycle rc=%ld", lpar_rc);
3674 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
3675 + ret = 0;
3676 }
3677
3678 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
3679 adapter->rx_queue.index = 0;
3680 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
3681 }
3682 +
3683 +out:
3684 + return ret;
3685 }
3686
3687 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
3688 @@ -806,7 +811,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
3689 } else
3690 adapter->fw_ipv6_csum_support = data;
3691
3692 - if (ret != H_SUCCESS || ret6 != H_SUCCESS)
3693 + if (ret == H_SUCCESS || ret6 == H_SUCCESS)
3694 adapter->rx_csum = data;
3695 else
3696 rc1 = -EIO;
3697 @@ -924,6 +929,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
3698 union ibmveth_buf_desc descs[6];
3699 int last, i;
3700 int force_bounce = 0;
3701 + dma_addr_t dma_addr;
3702
3703 /*
3704 * veth handles a maximum of 6 segments including the header, so
3705 @@ -988,17 +994,16 @@ retry_bounce:
3706 }
3707
3708 /* Map the header */
3709 - descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
3710 - skb_headlen(skb),
3711 - DMA_TO_DEVICE);
3712 - if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
3713 + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
3714 + skb_headlen(skb), DMA_TO_DEVICE);
3715 + if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
3716 goto map_failed;
3717
3718 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
3719 + descs[0].fields.address = dma_addr;
3720
3721 /* Map the frags */
3722 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3723 - unsigned long dma_addr;
3724 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3725
3726 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
3727 @@ -1020,7 +1025,12 @@ retry_bounce:
3728 netdev->stats.tx_bytes += skb->len;
3729 }
3730
3731 - for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
3732 + dma_unmap_single(&adapter->vdev->dev,
3733 + descs[0].fields.address,
3734 + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
3735 + DMA_TO_DEVICE);
3736 +
3737 + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
3738 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
3739 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
3740 DMA_TO_DEVICE);
3741 @@ -1083,8 +1093,9 @@ restart_poll:
3742 if (rx_flush)
3743 ibmveth_flush_buffer(skb->data,
3744 length + offset);
3745 + if (!ibmveth_rxq_recycle_buffer(adapter))
3746 + kfree_skb(skb);
3747 skb = new_skb;
3748 - ibmveth_rxq_recycle_buffer(adapter);
3749 } else {
3750 ibmveth_rxq_harvest_buffer(adapter);
3751 skb_reserve(skb, offset);
3752 diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
3753 index 2c28621..97f46ac 100644
3754 --- a/drivers/net/igb/igb_main.c
3755 +++ b/drivers/net/igb/igb_main.c
3756 @@ -1985,7 +1985,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
3757
3758 if (hw->bus.func == 0)
3759 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
3760 - else if (hw->mac.type == e1000_82580)
3761 + else if (hw->mac.type >= e1000_82580)
3762 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3763 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3764 &eeprom_data);
3765 diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
3766 index 8800e1f..6a4826a 100644
3767 --- a/drivers/net/irda/smsc-ircc2.c
3768 +++ b/drivers/net/irda/smsc-ircc2.c
3769 @@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
3770 * Try to open driver instance
3771 *
3772 */
3773 -static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
3774 +static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
3775 {
3776 struct smsc_ircc_cb *self;
3777 struct net_device *dev;
3778 diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
3779 index 08e8e25..83f197d 100644
3780 --- a/drivers/net/ixgbe/ixgbe_main.c
3781 +++ b/drivers/net/ixgbe/ixgbe_main.c
3782 @@ -1366,8 +1366,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
3783 if (ring_is_rsc_enabled(rx_ring))
3784 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
3785
3786 - /* if this is a skb from previous receive DMA will be 0 */
3787 - if (rx_buffer_info->dma) {
3788 + /* linear means we are building an skb from multiple pages */
3789 + if (!skb_is_nonlinear(skb)) {
3790 u16 hlen;
3791 if (pkt_is_rsc &&
3792 !(staterr & IXGBE_RXD_STAT_EOP) &&
3793 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
3794 index 5d3436d..ca4694e 100644
3795 --- a/drivers/net/rionet.c
3796 +++ b/drivers/net/rionet.c
3797 @@ -80,13 +80,13 @@ static int rionet_capable = 1;
3798 */
3799 static struct rio_dev **rionet_active;
3800
3801 -#define is_rionet_capable(pef, src_ops, dst_ops) \
3802 - ((pef & RIO_PEF_INB_MBOX) && \
3803 - (pef & RIO_PEF_INB_DOORBELL) && \
3804 +#define is_rionet_capable(src_ops, dst_ops) \
3805 + ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
3806 + (dst_ops & RIO_DST_OPS_DATA_MSG) && \
3807 (src_ops & RIO_SRC_OPS_DOORBELL) && \
3808 (dst_ops & RIO_DST_OPS_DOORBELL))
3809 #define dev_rionet_capable(dev) \
3810 - is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
3811 + is_rionet_capable(dev->src_ops, dev->dst_ops)
3812
3813 #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
3814 #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
3815 @@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
3816 {
3817 int i, rc = 0;
3818 struct rionet_peer *peer, *tmp;
3819 - u32 pwdcsr;
3820 struct rionet_private *rnet = netdev_priv(ndev);
3821
3822 if (netif_msg_ifup(rnet))
3823 @@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
3824 continue;
3825 }
3826
3827 - /*
3828 - * If device has initialized inbound doorbells,
3829 - * send a join message
3830 - */
3831 - rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
3832 - if (pwdcsr & RIO_DOORBELL_AVAIL)
3833 - rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
3834 + /* Send a join message */
3835 + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
3836 }
3837
3838 out:
3839 @@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
3840 static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
3841 {
3842 int rc = -ENODEV;
3843 - u32 lpef, lsrc_ops, ldst_ops;
3844 + u32 lsrc_ops, ldst_ops;
3845 struct rionet_peer *peer;
3846 struct net_device *ndev = NULL;
3847
3848 @@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
3849 * on later probes
3850 */
3851 if (!rionet_check) {
3852 - rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
3853 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
3854 &lsrc_ops);
3855 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
3856 &ldst_ops);
3857 - if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
3858 + if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
3859 printk(KERN_ERR
3860 "%s: local device is not network capable\n",
3861 DRV_NAME);
3862 diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
3863 index c914729..7d1651b 100644
3864 --- a/drivers/net/sfc/efx.c
3865 +++ b/drivers/net/sfc/efx.c
3866 @@ -1051,7 +1051,6 @@ static int efx_init_io(struct efx_nic *efx)
3867 {
3868 struct pci_dev *pci_dev = efx->pci_dev;
3869 dma_addr_t dma_mask = efx->type->max_dma_mask;
3870 - bool use_wc;
3871 int rc;
3872
3873 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
3874 @@ -1102,21 +1101,8 @@ static int efx_init_io(struct efx_nic *efx)
3875 rc = -EIO;
3876 goto fail3;
3877 }
3878 -
3879 - /* bug22643: If SR-IOV is enabled then tx push over a write combined
3880 - * mapping is unsafe. We need to disable write combining in this case.
3881 - * MSI is unsupported when SR-IOV is enabled, and the firmware will
3882 - * have removed the MSI capability. So write combining is safe if
3883 - * there is an MSI capability.
3884 - */
3885 - use_wc = (!EFX_WORKAROUND_22643(efx) ||
3886 - pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
3887 - if (use_wc)
3888 - efx->membase = ioremap_wc(efx->membase_phys,
3889 - efx->type->mem_map_size);
3890 - else
3891 - efx->membase = ioremap_nocache(efx->membase_phys,
3892 - efx->type->mem_map_size);
3893 + efx->membase = ioremap_nocache(efx->membase_phys,
3894 + efx->type->mem_map_size);
3895 if (!efx->membase) {
3896 netif_err(efx, probe, efx->net_dev,
3897 "could not map memory BAR at %llx+%x\n",
3898 diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
3899 index cc97880..dc45110 100644
3900 --- a/drivers/net/sfc/io.h
3901 +++ b/drivers/net/sfc/io.h
3902 @@ -48,9 +48,9 @@
3903 * replacing the low 96 bits with zero does not affect functionality.
3904 * - If the host writes to the last dword address of such a register
3905 * (i.e. the high 32 bits) the underlying register will always be
3906 - * written. If the collector and the current write together do not
3907 - * provide values for all 128 bits of the register, the low 96 bits
3908 - * will be written as zero.
3909 + * written. If the collector does not hold values for the low 96
3910 + * bits of the register, they will be written as zero. Writing to
3911 + * the last qword does not have this effect and must not be done.
3912 * - If the host writes to the address of any other part of such a
3913 * register while the collector already holds values for some other
3914 * register, the write is discarded and the collector maintains its
3915 @@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
3916 _efx_writed(efx, value->u32[2], reg + 8);
3917 _efx_writed(efx, value->u32[3], reg + 12);
3918 #endif
3919 - wmb();
3920 mmiowb();
3921 spin_unlock_irqrestore(&efx->biu_lock, flags);
3922 }
3923 @@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
3924 __raw_writel((__force u32)value->u32[0], membase + addr);
3925 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
3926 #endif
3927 - wmb();
3928 mmiowb();
3929 spin_unlock_irqrestore(&efx->biu_lock, flags);
3930 }
3931 @@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
3932
3933 /* No lock required */
3934 _efx_writed(efx, value->u32[0], reg);
3935 - wmb();
3936 }
3937
3938 /* Read a 128-bit CSR, locking as appropriate. */
3939 @@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
3940
3941 spin_lock_irqsave(&efx->biu_lock, flags);
3942 value->u32[0] = _efx_readd(efx, reg + 0);
3943 - rmb();
3944 value->u32[1] = _efx_readd(efx, reg + 4);
3945 value->u32[2] = _efx_readd(efx, reg + 8);
3946 value->u32[3] = _efx_readd(efx, reg + 12);
3947 @@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
3948 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
3949 #else
3950 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
3951 - rmb();
3952 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
3953 #endif
3954 spin_unlock_irqrestore(&efx->biu_lock, flags);
3955 @@ -242,14 +237,12 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
3956
3957 #ifdef EFX_USE_QWORD_IO
3958 _efx_writeq(efx, value->u64[0], reg + 0);
3959 - _efx_writeq(efx, value->u64[1], reg + 8);
3960 #else
3961 _efx_writed(efx, value->u32[0], reg + 0);
3962 _efx_writed(efx, value->u32[1], reg + 4);
3963 +#endif
3964 _efx_writed(efx, value->u32[2], reg + 8);
3965 _efx_writed(efx, value->u32[3], reg + 12);
3966 -#endif
3967 - wmb();
3968 }
3969 #define efx_writeo_page(efx, value, reg, page) \
3970 _efx_writeo_page(efx, value, \
3971 diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
3972 index 3dd45ed..81a4253 100644
3973 --- a/drivers/net/sfc/mcdi.c
3974 +++ b/drivers/net/sfc/mcdi.c
3975 @@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
3976 return &nic_data->mcdi;
3977 }
3978
3979 -static inline void
3980 -efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
3981 -{
3982 - struct siena_nic_data *nic_data = efx->nic_data;
3983 - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
3984 -}
3985 -
3986 -static inline void
3987 -efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
3988 -{
3989 - struct siena_nic_data *nic_data = efx->nic_data;
3990 - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
3991 -}
3992 -
3993 void efx_mcdi_init(struct efx_nic *efx)
3994 {
3995 struct efx_mcdi_iface *mcdi;
3996 @@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
3997 const u8 *inbuf, size_t inlen)
3998 {
3999 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
4000 - unsigned pdu = MCDI_PDU(efx);
4001 - unsigned doorbell = MCDI_DOORBELL(efx);
4002 + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
4003 + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
4004 unsigned int i;
4005 efx_dword_t hdr;
4006 u32 xflags, seqno;
4007 @@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
4008 MCDI_HEADER_SEQ, seqno,
4009 MCDI_HEADER_XFLAGS, xflags);
4010
4011 - efx_mcdi_writed(efx, &hdr, pdu);
4012 + efx_writed(efx, &hdr, pdu);
4013
4014 for (i = 0; i < inlen; i += 4)
4015 - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
4016 - pdu + 4 + i);
4017 + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
4018 +
4019 + /* Ensure the payload is written out before the header */
4020 + wmb();
4021
4022 /* ring the doorbell with a distinctive value */
4023 - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
4024 - efx_mcdi_writed(efx, &hdr, doorbell);
4025 + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
4026 }
4027
4028 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
4029 {
4030 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
4031 - unsigned int pdu = MCDI_PDU(efx);
4032 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
4033 int i;
4034
4035 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
4036 BUG_ON(outlen & 3 || outlen >= 0x100);
4037
4038 for (i = 0; i < outlen; i += 4)
4039 - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
4040 + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
4041 }
4042
4043 static int efx_mcdi_poll(struct efx_nic *efx)
4044 @@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
4045 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
4046 unsigned int time, finish;
4047 unsigned int respseq, respcmd, error;
4048 - unsigned int pdu = MCDI_PDU(efx);
4049 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
4050 unsigned int rc, spins;
4051 efx_dword_t reg;
4052
4053 @@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
4054
4055 time = get_seconds();
4056
4057 - efx_mcdi_readd(efx, &reg, pdu);
4058 + rmb();
4059 + efx_readd(efx, &reg, pdu);
4060
4061 /* All 1's indicates that shared memory is in reset (and is
4062 * not a valid header). Wait for it to come out reset before
4063 @@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
4064 respseq, mcdi->seqno);
4065 rc = EIO;
4066 } else if (error) {
4067 - efx_mcdi_readd(efx, &reg, pdu + 4);
4068 + efx_readd(efx, &reg, pdu + 4);
4069 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
4070 #define TRANSLATE_ERROR(name) \
4071 case MC_CMD_ERR_ ## name: \
4072 @@ -222,21 +210,21 @@ out:
4073 /* Test and clear MC-rebooted flag for this port/function */
4074 int efx_mcdi_poll_reboot(struct efx_nic *efx)
4075 {
4076 - unsigned int addr = MCDI_REBOOT_FLAG(efx);
4077 + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
4078 efx_dword_t reg;
4079 uint32_t value;
4080
4081 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
4082 return false;
4083
4084 - efx_mcdi_readd(efx, &reg, addr);
4085 + efx_readd(efx, &reg, addr);
4086 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
4087
4088 if (value == 0)
4089 return 0;
4090
4091 EFX_ZERO_DWORD(reg);
4092 - efx_mcdi_writed(efx, &reg, addr);
4093 + efx_writed(efx, &reg, addr);
4094
4095 if (value == MC_STATUS_DWORD_ASSERT)
4096 return -EINTR;
4097 diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
4098 index f2a2b94..5ac9fa2 100644
4099 --- a/drivers/net/sfc/nic.c
4100 +++ b/drivers/net/sfc/nic.c
4101 @@ -1935,13 +1935,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
4102
4103 size = min_t(size_t, table->step, 16);
4104
4105 - if (table->offset >= efx->type->mem_map_size) {
4106 - /* No longer mapped; return dummy data */
4107 - memcpy(buf, "\xde\xc0\xad\xde", 4);
4108 - buf += table->rows * size;
4109 - continue;
4110 - }
4111 -
4112 for (i = 0; i < table->rows; i++) {
4113 switch (table->step) {
4114 case 4: /* 32-bit register or SRAM */
4115 diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
4116 index 4bd1f28..7443f99 100644
4117 --- a/drivers/net/sfc/nic.h
4118 +++ b/drivers/net/sfc/nic.h
4119 @@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
4120 /**
4121 * struct siena_nic_data - Siena NIC state
4122 * @mcdi: Management-Controller-to-Driver Interface
4123 - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
4124 * @wol_filter_id: Wake-on-LAN packet filter id
4125 */
4126 struct siena_nic_data {
4127 struct efx_mcdi_iface mcdi;
4128 - void __iomem *mcdi_smem;
4129 int wol_filter_id;
4130 };
4131
4132 diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
4133 index fb4721f..ceac1c9 100644
4134 --- a/drivers/net/sfc/siena.c
4135 +++ b/drivers/net/sfc/siena.c
4136 @@ -220,26 +220,12 @@ static int siena_probe_nic(struct efx_nic *efx)
4137 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
4138 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
4139
4140 - /* Initialise MCDI */
4141 - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
4142 - FR_CZ_MC_TREG_SMEM,
4143 - FR_CZ_MC_TREG_SMEM_STEP *
4144 - FR_CZ_MC_TREG_SMEM_ROWS);
4145 - if (!nic_data->mcdi_smem) {
4146 - netif_err(efx, probe, efx->net_dev,
4147 - "could not map MCDI at %llx+%x\n",
4148 - (unsigned long long)efx->membase_phys +
4149 - FR_CZ_MC_TREG_SMEM,
4150 - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
4151 - rc = -ENOMEM;
4152 - goto fail1;
4153 - }
4154 efx_mcdi_init(efx);
4155
4156 /* Recover from a failed assertion before probing */
4157 rc = efx_mcdi_handle_assertion(efx);
4158 if (rc)
4159 - goto fail2;
4160 + goto fail1;
4161
4162 /* Let the BMC know that the driver is now in charge of link and
4163 * filter settings. We must do this before we reset the NIC */
4164 @@ -294,7 +280,6 @@ fail4:
4165 fail3:
4166 efx_mcdi_drv_attach(efx, false, NULL);
4167 fail2:
4168 - iounmap(nic_data->mcdi_smem);
4169 fail1:
4170 kfree(efx->nic_data);
4171 return rc;
4172 @@ -374,8 +359,6 @@ static int siena_init_nic(struct efx_nic *efx)
4173
4174 static void siena_remove_nic(struct efx_nic *efx)
4175 {
4176 - struct siena_nic_data *nic_data = efx->nic_data;
4177 -
4178 efx_nic_free_buffer(efx, &efx->irq_status);
4179
4180 siena_reset_hw(efx, RESET_TYPE_ALL);
4181 @@ -385,8 +368,7 @@ static void siena_remove_nic(struct efx_nic *efx)
4182 efx_mcdi_drv_attach(efx, false, NULL);
4183
4184 /* Tear down the private nic state */
4185 - iounmap(nic_data->mcdi_smem);
4186 - kfree(nic_data);
4187 + kfree(efx->nic_data);
4188 efx->nic_data = NULL;
4189 }
4190
4191 @@ -624,7 +606,8 @@ const struct efx_nic_type siena_a0_nic_type = {
4192 .default_mac_ops = &efx_mcdi_mac_operations,
4193
4194 .revision = EFX_REV_SIENA_A0,
4195 - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
4196 + .mem_map_size = (FR_CZ_MC_TREG_SMEM +
4197 + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
4198 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
4199 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
4200 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
4201 diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
4202 index 99ff114..e4dd3a7 100644
4203 --- a/drivers/net/sfc/workarounds.h
4204 +++ b/drivers/net/sfc/workarounds.h
4205 @@ -38,8 +38,6 @@
4206 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
4207 /* Legacy interrupt storm when interrupt fifo fills */
4208 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
4209 -/* Write combining and sriov=enabled are incompatible */
4210 -#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
4211
4212 /* Spurious parity errors in TSORT buffers */
4213 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
4214 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
4215 index a1f9f9e..38f6859 100644
4216 --- a/drivers/net/tg3.c
4217 +++ b/drivers/net/tg3.c
4218 @@ -7267,16 +7267,11 @@ static int tg3_chip_reset(struct tg3 *tp)
4219 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4220 }
4221
4222 - if (tg3_flag(tp, ENABLE_APE))
4223 - tp->mac_mode = MAC_MODE_APE_TX_EN |
4224 - MAC_MODE_APE_RX_EN |
4225 - MAC_MODE_TDE_ENABLE;
4226 -
4227 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
4228 - tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4229 + tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4230 val = tp->mac_mode;
4231 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
4232 - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4233 + tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4234 val = tp->mac_mode;
4235 } else
4236 val = 0;
4237 @@ -8408,12 +8403,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
4238 udelay(10);
4239 }
4240
4241 - if (tg3_flag(tp, ENABLE_APE))
4242 - tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
4243 - else
4244 - tp->mac_mode = 0;
4245 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
4246 - MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
4247 + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
4248 + MAC_MODE_FHDE_ENABLE;
4249 + if (tg3_flag(tp, ENABLE_APE))
4250 + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
4251 if (!tg3_flag(tp, 5705_PLUS) &&
4252 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
4253 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
4254 @@ -8988,7 +8982,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
4255 * Turn off MSI one shot mode. Otherwise this test has no
4256 * observable way to know whether the interrupt was delivered.
4257 */
4258 - if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
4259 + if (tg3_flag(tp, 57765_PLUS)) {
4260 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
4261 tw32(MSGINT_MODE, val);
4262 }
4263 @@ -9016,6 +9010,10 @@ static int tg3_test_interrupt(struct tg3 *tp)
4264 break;
4265 }
4266
4267 + if (tg3_flag(tp, 57765_PLUS) &&
4268 + tnapi->hw_status->status_tag != tnapi->last_tag)
4269 + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
4270 +
4271 msleep(10);
4272 }
4273
4274 @@ -9030,7 +9028,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
4275
4276 if (intr_ok) {
4277 /* Reenable MSI one shot mode. */
4278 - if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
4279 + if (tg3_flag(tp, 57765_PLUS)) {
4280 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
4281 tw32(MSGINT_MODE, val);
4282 }
4283 @@ -12947,7 +12945,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
4284 }
4285
4286 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
4287 - ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
4288 + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
4289 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
4290 + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
4291 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
4292 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
4293 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
4294 diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
4295 index 6998aa6..5250288 100644
4296 --- a/drivers/net/usb/asix.c
4297 +++ b/drivers/net/usb/asix.c
4298 @@ -1502,6 +1502,10 @@ static const struct usb_device_id products [] = {
4299 USB_DEVICE (0x04f1, 0x3008),
4300 .driver_info = (unsigned long) &ax8817x_info,
4301 }, {
4302 + // ASIX AX88772B 10/100
4303 + USB_DEVICE (0x0b95, 0x772b),
4304 + .driver_info = (unsigned long) &ax88772_info,
4305 +}, {
4306 // ASIX AX88772 10/100
4307 USB_DEVICE (0x0b95, 0x7720),
4308 .driver_info = (unsigned long) &ax88772_info,
4309 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
4310 index f33ca6a..d3b9e95 100644
4311 --- a/drivers/net/usb/cdc_ncm.c
4312 +++ b/drivers/net/usb/cdc_ncm.c
4313 @@ -54,7 +54,7 @@
4314 #include <linux/usb/usbnet.h>
4315 #include <linux/usb/cdc.h>
4316
4317 -#define DRIVER_VERSION "01-June-2011"
4318 +#define DRIVER_VERSION "04-Aug-2011"
4319
4320 /* CDC NCM subclass 3.2.1 */
4321 #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
4322 @@ -164,35 +164,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
4323 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
4324 }
4325
4326 -static int
4327 -cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
4328 - void *data, u16 flags, u16 *actlen, u16 timeout)
4329 -{
4330 - int err;
4331 -
4332 - err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
4333 - usb_rcvctrlpipe(ctx->udev, 0) :
4334 - usb_sndctrlpipe(ctx->udev, 0),
4335 - req->bNotificationType, req->bmRequestType,
4336 - req->wValue,
4337 - req->wIndex, data,
4338 - req->wLength, timeout);
4339 -
4340 - if (err < 0) {
4341 - if (actlen)
4342 - *actlen = 0;
4343 - return err;
4344 - }
4345 -
4346 - if (actlen)
4347 - *actlen = err;
4348 -
4349 - return 0;
4350 -}
4351 -
4352 static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4353 {
4354 - struct usb_cdc_notification req;
4355 u32 val;
4356 u8 flags;
4357 u8 iface_no;
4358 @@ -201,14 +174,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4359
4360 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
4361
4362 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
4363 - req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
4364 - req.wValue = 0;
4365 - req.wIndex = cpu_to_le16(iface_no);
4366 - req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
4367 -
4368 - err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
4369 - if (err) {
4370 + err = usb_control_msg(ctx->udev,
4371 + usb_rcvctrlpipe(ctx->udev, 0),
4372 + USB_CDC_GET_NTB_PARAMETERS,
4373 + USB_TYPE_CLASS | USB_DIR_IN
4374 + | USB_RECIP_INTERFACE,
4375 + 0, iface_no, &ctx->ncm_parm,
4376 + sizeof(ctx->ncm_parm), 10000);
4377 + if (err < 0) {
4378 pr_debug("failed GET_NTB_PARAMETERS\n");
4379 return 1;
4380 }
4381 @@ -254,31 +227,26 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4382
4383 /* inform device about NTB input size changes */
4384 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
4385 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
4386 - USB_RECIP_INTERFACE;
4387 - req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
4388 - req.wValue = 0;
4389 - req.wIndex = cpu_to_le16(iface_no);
4390
4391 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
4392 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
4393 -
4394 - req.wLength = 8;
4395 - ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
4396 - ndp_in_sz.wNtbInMaxDatagrams =
4397 - cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
4398 - ndp_in_sz.wReserved = 0;
4399 - err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
4400 - 1000);
4401 + err = usb_control_msg(ctx->udev,
4402 + usb_sndctrlpipe(ctx->udev, 0),
4403 + USB_CDC_SET_NTB_INPUT_SIZE,
4404 + USB_TYPE_CLASS | USB_DIR_OUT
4405 + | USB_RECIP_INTERFACE,
4406 + 0, iface_no, &ndp_in_sz, 8, 1000);
4407 } else {
4408 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
4409 -
4410 - req.wLength = 4;
4411 - err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
4412 - NULL, 1000);
4413 + err = usb_control_msg(ctx->udev,
4414 + usb_sndctrlpipe(ctx->udev, 0),
4415 + USB_CDC_SET_NTB_INPUT_SIZE,
4416 + USB_TYPE_CLASS | USB_DIR_OUT
4417 + | USB_RECIP_INTERFACE,
4418 + 0, iface_no, &dwNtbInMaxSize, 4, 1000);
4419 }
4420
4421 - if (err)
4422 + if (err < 0)
4423 pr_debug("Setting NTB Input Size failed\n");
4424 }
4425
4426 @@ -333,29 +301,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4427
4428 /* set CRC Mode */
4429 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
4430 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
4431 - USB_RECIP_INTERFACE;
4432 - req.bNotificationType = USB_CDC_SET_CRC_MODE;
4433 - req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
4434 - req.wIndex = cpu_to_le16(iface_no);
4435 - req.wLength = 0;
4436 -
4437 - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
4438 - if (err)
4439 + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
4440 + USB_CDC_SET_CRC_MODE,
4441 + USB_TYPE_CLASS | USB_DIR_OUT
4442 + | USB_RECIP_INTERFACE,
4443 + USB_CDC_NCM_CRC_NOT_APPENDED,
4444 + iface_no, NULL, 0, 1000);
4445 + if (err < 0)
4446 pr_debug("Setting CRC mode off failed\n");
4447 }
4448
4449 /* set NTB format, if both formats are supported */
4450 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
4451 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
4452 - USB_RECIP_INTERFACE;
4453 - req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
4454 - req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
4455 - req.wIndex = cpu_to_le16(iface_no);
4456 - req.wLength = 0;
4457 -
4458 - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
4459 - if (err)
4460 + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
4461 + USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
4462 + | USB_DIR_OUT | USB_RECIP_INTERFACE,
4463 + USB_CDC_NCM_NTB16_FORMAT,
4464 + iface_no, NULL, 0, 1000);
4465 + if (err < 0)
4466 pr_debug("Setting NTB format to 16-bit failed\n");
4467 }
4468
4469 @@ -365,17 +328,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4470 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
4471 __le16 max_datagram_size;
4472 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
4473 -
4474 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
4475 - USB_RECIP_INTERFACE;
4476 - req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
4477 - req.wValue = 0;
4478 - req.wIndex = cpu_to_le16(iface_no);
4479 - req.wLength = cpu_to_le16(2);
4480 -
4481 - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
4482 - 1000);
4483 - if (err) {
4484 + err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
4485 + USB_CDC_GET_MAX_DATAGRAM_SIZE,
4486 + USB_TYPE_CLASS | USB_DIR_IN
4487 + | USB_RECIP_INTERFACE,
4488 + 0, iface_no, &max_datagram_size,
4489 + 2, 1000);
4490 + if (err < 0) {
4491 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
4492 CDC_NCM_MIN_DATAGRAM_SIZE);
4493 } else {
4494 @@ -396,17 +355,15 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
4495 CDC_NCM_MIN_DATAGRAM_SIZE;
4496
4497 /* if value changed, update device */
4498 - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
4499 - USB_RECIP_INTERFACE;
4500 - req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
4501 - req.wValue = 0;
4502 - req.wIndex = cpu_to_le16(iface_no);
4503 - req.wLength = 2;
4504 - max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
4505 -
4506 - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
4507 - 0, NULL, 1000);
4508 - if (err)
4509 + err = usb_control_msg(ctx->udev,
4510 + usb_sndctrlpipe(ctx->udev, 0),
4511 + USB_CDC_SET_MAX_DATAGRAM_SIZE,
4512 + USB_TYPE_CLASS | USB_DIR_OUT
4513 + | USB_RECIP_INTERFACE,
4514 + 0,
4515 + iface_no, &max_datagram_size,
4516 + 2, 1000);
4517 + if (err < 0)
4518 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
4519 }
4520
4521 @@ -672,7 +629,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
4522 u32 rem;
4523 u32 offset;
4524 u32 last_offset;
4525 - u16 n = 0;
4526 + u16 n = 0, index;
4527 u8 ready2send = 0;
4528
4529 /* if there is a remaining skb, it gets priority */
4530 @@ -860,8 +817,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
4531 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
4532 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
4533 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
4534 - ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
4535 - ctx->tx_ndp_modulus);
4536 + index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
4537 + ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
4538
4539 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
4540 ctx->tx_seq++;
4541 @@ -874,12 +831,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
4542 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
4543 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
4544
4545 - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
4546 + memcpy(((u8 *)skb_out->data) + index,
4547 &(ctx->tx_ncm.ndp16),
4548 sizeof(ctx->tx_ncm.ndp16));
4549
4550 - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
4551 - sizeof(ctx->tx_ncm.ndp16),
4552 + memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
4553 &(ctx->tx_ncm.dpe16),
4554 (ctx->tx_curr_frame_num + 1) *
4555 sizeof(struct usb_cdc_ncm_dpe16));
4556 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
4557 index 2d4c091..2d394af 100644
4558 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
4559 +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
4560 @@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
4561 case ADC_DC_CAL:
4562 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
4563 if (!IS_CHAN_B(chan) &&
4564 - !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
4565 + !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
4566 + IS_CHAN_HT20(chan)))
4567 supported = true;
4568 break;
4569 }
4570 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
4571 index e8ac70d..029773c 100644
4572 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
4573 +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
4574 @@ -1516,7 +1516,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {
4575 {0x00008258, 0x00000000},
4576 {0x0000825c, 0x40000000},
4577 {0x00008260, 0x00080922},
4578 - {0x00008264, 0x9bc00010},
4579 + {0x00008264, 0x9d400010},
4580 {0x00008268, 0xffffffff},
4581 {0x0000826c, 0x0000ffff},
4582 {0x00008270, 0x00000000},
4583 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4584 index 7e07f0f..417106b 100644
4585 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4586 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4587 @@ -68,7 +68,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
4588 static const struct ar9300_eeprom ar9300_default = {
4589 .eepromVersion = 2,
4590 .templateVersion = 2,
4591 - .macAddr = {1, 2, 3, 4, 5, 6},
4592 + .macAddr = {0, 2, 3, 4, 5, 6},
4593 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4594 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
4595 .baseEepHeader = {
4596 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
4597 index 2ca351f..5362306 100644
4598 --- a/drivers/net/wireless/ath/ath9k/main.c
4599 +++ b/drivers/net/wireless/ath/ath9k/main.c
4600 @@ -2260,7 +2260,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
4601
4602 mutex_lock(&sc->mutex);
4603 ah->coverage_class = coverage_class;
4604 +
4605 + ath9k_ps_wakeup(sc);
4606 ath9k_hw_init_global_settings(ah);
4607 + ath9k_ps_restore(sc);
4608 +
4609 mutex_unlock(&sc->mutex);
4610 }
4611
4612 diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
4613 index 54d093c..b54966c 100644
4614 --- a/drivers/net/wireless/ath/carl9170/main.c
4615 +++ b/drivers/net/wireless/ath/carl9170/main.c
4616 @@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
4617 * the high througput speed in 802.11n networks.
4618 */
4619
4620 - if (!is_main_vif(ar, vif))
4621 + if (!is_main_vif(ar, vif)) {
4622 + mutex_lock(&ar->mutex);
4623 goto err_softw;
4624 + }
4625
4626 /*
4627 * While the hardware supports *catch-all* key, for offloading
4628 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
4629 index eb41596..b1fe4fe 100644
4630 --- a/drivers/net/wireless/b43/main.c
4631 +++ b/drivers/net/wireless/b43/main.c
4632 @@ -1571,7 +1571,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
4633 u32 cmd, beacon0_valid, beacon1_valid;
4634
4635 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
4636 - !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
4637 + !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
4638 + !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
4639 return;
4640
4641 /* This is the bottom half of the asynchronous beacon update. */
4642 diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
4643 index 977bd24..164bcae 100644
4644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
4645 +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
4646 @@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
4647
4648 out:
4649
4650 - rs_sta->last_txrate_idx = index;
4651 - if (sband->band == IEEE80211_BAND_5GHZ)
4652 - info->control.rates[0].idx = rs_sta->last_txrate_idx -
4653 - IWL_FIRST_OFDM_RATE;
4654 - else
4655 + if (sband->band == IEEE80211_BAND_5GHZ) {
4656 + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
4657 + index = IWL_FIRST_OFDM_RATE;
4658 + rs_sta->last_txrate_idx = index;
4659 + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
4660 + } else {
4661 + rs_sta->last_txrate_idx = index;
4662 info->control.rates[0].idx = rs_sta->last_txrate_idx;
4663 + }
4664
4665 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
4666 }
4667 diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
4668 index 3be76bd..d273d50 100644
4669 --- a/drivers/net/wireless/iwlegacy/iwl-core.c
4670 +++ b/drivers/net/wireless/iwlegacy/iwl-core.c
4671 @@ -938,7 +938,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
4672 &priv->contexts[IWL_RXON_CTX_BSS]);
4673 #endif
4674
4675 - wake_up_interruptible(&priv->wait_command_queue);
4676 + wake_up(&priv->wait_command_queue);
4677
4678 /* Keep the restart process from trying to send host
4679 * commands by clearing the INIT status bit */
4680 @@ -1776,7 +1776,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
4681 IWL_ERR(priv, "On demand firmware reload\n");
4682 /* Set the FW error flag -- cleared on iwl_down */
4683 set_bit(STATUS_FW_ERROR, &priv->status);
4684 - wake_up_interruptible(&priv->wait_command_queue);
4685 + wake_up(&priv->wait_command_queue);
4686 /*
4687 * Keep the restart process from trying to send host
4688 * commands by clearing the INIT status bit
4689 diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
4690 index 62b4b09..ce1fc9f 100644
4691 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
4692 +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
4693 @@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
4694 goto out;
4695 }
4696
4697 - ret = wait_event_interruptible_timeout(priv->wait_command_queue,
4698 + ret = wait_event_timeout(priv->wait_command_queue,
4699 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
4700 HOST_COMPLETE_TIMEOUT);
4701 if (!ret) {
4702 diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
4703 index 4fff995..ef9e268 100644
4704 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c
4705 +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
4706 @@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
4707 cmd = txq->cmd[cmd_index];
4708 meta = &txq->meta[cmd_index];
4709
4710 + txq->time_stamp = jiffies;
4711 +
4712 pci_unmap_single(priv->pci_dev,
4713 dma_unmap_addr(meta, mapping),
4714 dma_unmap_len(meta, len),
4715 @@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
4716 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4717 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
4718 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
4719 - wake_up_interruptible(&priv->wait_command_queue);
4720 + wake_up(&priv->wait_command_queue);
4721 }
4722
4723 /* Mark as unmapped */
4724 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
4725 index 0ee6be6..421d5c8 100644
4726 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
4727 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
4728 @@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
4729 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
4730 test_bit(STATUS_RF_KILL_HW, &priv->status));
4731 else
4732 - wake_up_interruptible(&priv->wait_command_queue);
4733 + wake_up(&priv->wait_command_queue);
4734 }
4735
4736 /**
4737 @@ -2518,7 +2518,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
4738 iwl3945_reg_txpower_periodic(priv);
4739
4740 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
4741 - wake_up_interruptible(&priv->wait_command_queue);
4742 + wake_up(&priv->wait_command_queue);
4743
4744 return;
4745
4746 @@ -2549,7 +2549,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
4747 iwl_legacy_clear_driver_stations(priv);
4748
4749 /* Unblock any waiting calls */
4750 - wake_up_interruptible_all(&priv->wait_command_queue);
4751 + wake_up_all(&priv->wait_command_queue);
4752
4753 /* Wipe out the EXIT_PENDING status bit if we are not actually
4754 * exiting the module */
4755 @@ -3125,7 +3125,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
4756
4757 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
4758 * mac80211 will not be run successfully. */
4759 - ret = wait_event_interruptible_timeout(priv->wait_command_queue,
4760 + ret = wait_event_timeout(priv->wait_command_queue,
4761 test_bit(STATUS_READY, &priv->status),
4762 UCODE_READY_TIMEOUT);
4763 if (!ret) {
4764 diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
4765 index 7157ba5..0c37c02 100644
4766 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
4767 +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
4768 @@ -704,7 +704,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
4769 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
4770 test_bit(STATUS_RF_KILL_HW, &priv->status));
4771 else
4772 - wake_up_interruptible(&priv->wait_command_queue);
4773 + wake_up(&priv->wait_command_queue);
4774 }
4775
4776 /**
4777 @@ -1054,7 +1054,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4778 handled |= CSR_INT_BIT_FH_TX;
4779 /* Wake up uCode load routine, now that load is complete */
4780 priv->ucode_write_complete = 1;
4781 - wake_up_interruptible(&priv->wait_command_queue);
4782 + wake_up(&priv->wait_command_queue);
4783 }
4784
4785 if (inta & ~handled) {
4786 @@ -2126,7 +2126,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
4787 iwl4965_rf_kill_ct_config(priv);
4788
4789 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
4790 - wake_up_interruptible(&priv->wait_command_queue);
4791 + wake_up(&priv->wait_command_queue);
4792
4793 iwl_legacy_power_update_mode(priv, true);
4794 IWL_DEBUG_INFO(priv, "Updated power mode\n");
4795 @@ -2159,7 +2159,7 @@ static void __iwl4965_down(struct iwl_priv *priv)
4796 iwl_legacy_clear_driver_stations(priv);
4797
4798 /* Unblock any waiting calls */
4799 - wake_up_interruptible_all(&priv->wait_command_queue);
4800 + wake_up_all(&priv->wait_command_queue);
4801
4802 /* Wipe out the EXIT_PENDING status bit if we are not actually
4803 * exiting the module */
4804 @@ -2597,7 +2597,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)
4805
4806 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
4807 * mac80211 will not be run successfully. */
4808 - ret = wait_event_interruptible_timeout(priv->wait_command_queue,
4809 + ret = wait_event_timeout(priv->wait_command_queue,
4810 test_bit(STATUS_READY, &priv->status),
4811 UCODE_READY_TIMEOUT);
4812 if (!ret) {
4813 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
4814 index 8e1942e..f24165d 100644
4815 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
4816 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
4817 @@ -2440,7 +2440,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
4818 IEEE80211_HW_SPECTRUM_MGMT |
4819 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
4820
4821 + /*
4822 + * Including the following line will crash some AP's. This
4823 + * workaround removes the stimulus which causes the crash until
4824 + * the AP software can be fixed.
4825 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
4826 + */
4827
4828 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
4829 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
4830 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
4831 index d60d630..f524016 100644
4832 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
4833 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
4834 @@ -406,31 +406,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
4835
4836 mutex_lock(&priv->mutex);
4837
4838 - if (test_bit(STATUS_SCANNING, &priv->status) &&
4839 - priv->scan_type != IWL_SCAN_NORMAL) {
4840 - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
4841 - ret = -EAGAIN;
4842 - goto out_unlock;
4843 - }
4844 -
4845 - /* mac80211 will only ask for one band at a time */
4846 - priv->scan_request = req;
4847 - priv->scan_vif = vif;
4848 -
4849 /*
4850 * If an internal scan is in progress, just set
4851 * up the scan_request as per above.
4852 */
4853 if (priv->scan_type != IWL_SCAN_NORMAL) {
4854 - IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
4855 + IWL_DEBUG_SCAN(priv,
4856 + "SCAN request during internal scan - defer\n");
4857 + priv->scan_request = req;
4858 + priv->scan_vif = vif;
4859 ret = 0;
4860 - } else
4861 + } else {
4862 + priv->scan_request = req;
4863 + priv->scan_vif = vif;
4864 + /*
4865 + * mac80211 will only ask for one band at a time
4866 + * so using channels[0] here is ok
4867 + */
4868 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
4869 req->channels[0]->band);
4870 + if (ret) {
4871 + priv->scan_request = NULL;
4872 + priv->scan_vif = NULL;
4873 + }
4874 + }
4875
4876 IWL_DEBUG_MAC80211(priv, "leave\n");
4877
4878 -out_unlock:
4879 mutex_unlock(&priv->mutex);
4880
4881 return ret;
4882 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
4883 index 137dba9..c368c50 100644
4884 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
4885 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
4886 @@ -802,6 +802,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
4887 cmd = txq->cmd[cmd_index];
4888 meta = &txq->meta[cmd_index];
4889
4890 + txq->time_stamp = jiffies;
4891 +
4892 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
4893
4894 /* Input error checking is done when commands are added to queue. */
4895 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
4896 index 5a45228..3f7ea1c 100644
4897 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
4898 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
4899 @@ -38,6 +38,7 @@
4900 #include <linux/kernel.h>
4901 #include <linux/module.h>
4902 #include <linux/slab.h>
4903 +#include <linux/sched.h>
4904
4905 #include "rt2x00.h"
4906 #include "rt2800lib.h"
4907 @@ -607,6 +608,15 @@ static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
4908 int wcid, ack, pid;
4909 int tx_wcid, tx_ack, tx_pid;
4910
4911 + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
4912 + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
4913 + WARNING(entry->queue->rt2x00dev,
4914 + "Data pending for entry %u in queue %u\n",
4915 + entry->entry_idx, entry->queue->qid);
4916 + cond_resched();
4917 + return false;
4918 + }
4919 +
4920 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
4921 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
4922 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
4923 @@ -754,12 +764,11 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
4924 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
4925 if (rt2800_txdone_entry_check(entry, reg))
4926 break;
4927 + entry = NULL;
4928 }
4929
4930 - if (!entry || rt2x00queue_empty(queue))
4931 - break;
4932 -
4933 - rt2800_txdone_entry(entry, reg);
4934 + if (entry)
4935 + rt2800_txdone_entry(entry, reg);
4936 }
4937 }
4938 EXPORT_SYMBOL_GPL(rt2800_txdone);
4939 @@ -3503,14 +3512,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
4940 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
4941
4942 /* Apparently the data is read from end to start */
4943 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
4944 - (u32 *)&rt2x00dev->eeprom[i]);
4945 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
4946 - (u32 *)&rt2x00dev->eeprom[i + 2]);
4947 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
4948 - (u32 *)&rt2x00dev->eeprom[i + 4]);
4949 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
4950 - (u32 *)&rt2x00dev->eeprom[i + 6]);
4951 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
4952 + /* The returned value is in CPU order, but eeprom is le */
4953 + rt2x00dev->eeprom[i] = cpu_to_le32(reg);
4954 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
4955 + *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
4956 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
4957 + *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
4958 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
4959 + *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
4960
4961 mutex_unlock(&rt2x00dev->csr_mutex);
4962 }
4963 @@ -3676,19 +3686,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4964 return -ENODEV;
4965 }
4966
4967 - if (!rt2x00_rf(rt2x00dev, RF2820) &&
4968 - !rt2x00_rf(rt2x00dev, RF2850) &&
4969 - !rt2x00_rf(rt2x00dev, RF2720) &&
4970 - !rt2x00_rf(rt2x00dev, RF2750) &&
4971 - !rt2x00_rf(rt2x00dev, RF3020) &&
4972 - !rt2x00_rf(rt2x00dev, RF2020) &&
4973 - !rt2x00_rf(rt2x00dev, RF3021) &&
4974 - !rt2x00_rf(rt2x00dev, RF3022) &&
4975 - !rt2x00_rf(rt2x00dev, RF3052) &&
4976 - !rt2x00_rf(rt2x00dev, RF3320) &&
4977 - !rt2x00_rf(rt2x00dev, RF5370) &&
4978 - !rt2x00_rf(rt2x00dev, RF5390)) {
4979 - ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
4980 + switch (rt2x00dev->chip.rf) {
4981 + case RF2820:
4982 + case RF2850:
4983 + case RF2720:
4984 + case RF2750:
4985 + case RF3020:
4986 + case RF2020:
4987 + case RF3021:
4988 + case RF3022:
4989 + case RF3052:
4990 + case RF3320:
4991 + case RF5370:
4992 + case RF5390:
4993 + break;
4994 + default:
4995 + ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
4996 + rt2x00dev->chip.rf);
4997 return -ENODEV;
4998 }
4999
5000 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
5001 index ba82c97..6e7fe94 100644
5002 --- a/drivers/net/wireless/rt2x00/rt2800usb.c
5003 +++ b/drivers/net/wireless/rt2x00/rt2800usb.c
5004 @@ -477,8 +477,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
5005 while (!rt2x00queue_empty(queue)) {
5006 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
5007
5008 - if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
5009 + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
5010 + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
5011 break;
5012 +
5013 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
5014 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
5015 else if (rt2x00queue_status_timeout(entry))
5016 diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
5017 index 241a099..54f0b13 100644
5018 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c
5019 +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
5020 @@ -870,18 +870,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
5021 {
5022 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
5023 struct rt2x00_dev *rt2x00dev = hw->priv;
5024 - int retval;
5025 -
5026 - retval = rt2x00lib_suspend(rt2x00dev, state);
5027 - if (retval)
5028 - return retval;
5029
5030 - /*
5031 - * Decrease usbdev refcount.
5032 - */
5033 - usb_put_dev(interface_to_usbdev(usb_intf));
5034 -
5035 - return 0;
5036 + return rt2x00lib_suspend(rt2x00dev, state);
5037 }
5038 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
5039
5040 @@ -890,8 +880,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
5041 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
5042 struct rt2x00_dev *rt2x00dev = hw->priv;
5043
5044 - usb_get_dev(interface_to_usbdev(usb_intf));
5045 -
5046 return rt2x00lib_resume(rt2x00dev);
5047 }
5048 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
5049 diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
5050 index d2ec253..ce0444c 100644
5051 --- a/drivers/net/wireless/rtlwifi/core.c
5052 +++ b/drivers/net/wireless/rtlwifi/core.c
5053 @@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
5054
5055 mac->link_state = MAC80211_NOLINK;
5056 memset(mac->bssid, 0, 6);
5057 +
5058 + /* reset sec info */
5059 + rtl_cam_reset_sec_info(hw);
5060 +
5061 + rtl_cam_reset_all_entry(hw);
5062 mac->vendor = PEER_UNKNOWN;
5063
5064 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
5065 @@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5066 *or clear all entry here.
5067 */
5068 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
5069 +
5070 + rtl_cam_reset_sec_info(hw);
5071 +
5072 break;
5073 default:
5074 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
5075 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
5076 index 3a92ba3..10b2ef0 100644
5077 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
5078 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
5079 @@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
5080 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
5081 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
5082 if (mac->bw_40) {
5083 - if (tcb_desc->packet_bw) {
5084 + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
5085 SET_TX_DESC_DATA_BW(txdesc, 1);
5086 SET_TX_DESC_DATA_SC(txdesc, 3);
5087 + } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
5088 + SET_TX_DESC_DATA_BW(txdesc, 1);
5089 + SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
5090 } else {
5091 SET_TX_DESC_DATA_BW(txdesc, 0);
5092 - if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
5093 - SET_TX_DESC_DATA_SC(txdesc,
5094 - mac->cur_40_prime_sc);
5095 - }
5096 + SET_TX_DESC_DATA_SC(txdesc, 0);
5097 + }
5098 } else {
5099 SET_TX_DESC_DATA_BW(txdesc, 0);
5100 SET_TX_DESC_DATA_SC(txdesc, 0);
5101 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
5102 index a9367eb..e4272b9 100644
5103 --- a/drivers/net/wireless/rtlwifi/usb.c
5104 +++ b/drivers/net/wireless/rtlwifi/usb.c
5105 @@ -861,6 +861,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
5106 u8 tid = 0;
5107 u16 seq_number = 0;
5108
5109 + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
5110 if (ieee80211_is_auth(fc)) {
5111 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
5112 rtl_ips_nic_on(hw);
5113 diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
5114 index 3dc9bef..6dcc7e2 100644
5115 --- a/drivers/pci/dmar.c
5116 +++ b/drivers/pci/dmar.c
5117 @@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
5118 return ret;
5119 }
5120
5121 - ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
5122 + ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
5123 if (ret)
5124 printk(KERN_ERR "IOMMU: can't request irq\n");
5125 return ret;
5126 diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
5127 index ee89358..ebe77dd 100644
5128 --- a/drivers/rapidio/rio-scan.c
5129 +++ b/drivers/rapidio/rio-scan.c
5130 @@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
5131 rdev->dev.dma_mask = &rdev->dma_mask;
5132 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
5133
5134 - if ((rdev->pef & RIO_PEF_INB_DOORBELL) &&
5135 - (rdev->dst_ops & RIO_DST_OPS_DOORBELL))
5136 + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
5137 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
5138 0, 0xffff);
5139
5140 diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
5141 index 55dd4e6..425aab3 100644
5142 --- a/drivers/regulator/tps65910-regulator.c
5143 +++ b/drivers/regulator/tps65910-regulator.c
5144 @@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
5145 mult = (selector / VDD1_2_NUM_VOLTS) + 1;
5146 volt = VDD1_2_MIN_VOLT +
5147 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
5148 + break;
5149 case TPS65911_REG_VDDCTRL:
5150 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
5151 + break;
5152 + default:
5153 + BUG();
5154 + return -EINVAL;
5155 }
5156
5157 return volt * 100 * mult;
5158 @@ -898,9 +903,11 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
5159 case TPS65910:
5160 pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
5161 info = tps65910_regs;
5162 + break;
5163 case TPS65911:
5164 pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
5165 info = tps65911_regs;
5166 + break;
5167 default:
5168 pr_err("Invalid tps chip version\n");
5169 return -ENODEV;
5170 diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
5171 index 3195dbd..eb4c883 100644
5172 --- a/drivers/rtc/interface.c
5173 +++ b/drivers/rtc/interface.c
5174 @@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
5175 int err = 0;
5176 unsigned long flags;
5177
5178 - if (freq <= 0 || freq > 5000)
5179 + if (freq <= 0 || freq > RTC_MAX_FREQ)
5180 return -EINVAL;
5181 retry:
5182 spin_lock_irqsave(&rtc->irq_task_lock, flags);
5183 diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
5184 index 5c4e741..68be6e1 100644
5185 --- a/drivers/s390/cio/qdio_thinint.c
5186 +++ b/drivers/s390/cio/qdio_thinint.c
5187 @@ -95,9 +95,11 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
5188 }
5189 }
5190
5191 -static inline u32 shared_ind_set(void)
5192 +static inline u32 clear_shared_ind(void)
5193 {
5194 - return q_indicators[TIQDIO_SHARED_IND].ind;
5195 + if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
5196 + return 0;
5197 + return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
5198 }
5199
5200 /**
5201 @@ -107,7 +109,7 @@ static inline u32 shared_ind_set(void)
5202 */
5203 static void tiqdio_thinint_handler(void *alsi, void *data)
5204 {
5205 - u32 si_used = shared_ind_set();
5206 + u32 si_used = clear_shared_ind();
5207 struct qdio_q *q;
5208
5209 last_ai_time = S390_lowcore.int_clock;
5210 @@ -150,13 +152,6 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
5211 qperf_inc(q, adapter_int);
5212 }
5213 rcu_read_unlock();
5214 -
5215 - /*
5216 - * If the shared indicator was used clear it now after all queues
5217 - * were processed.
5218 - */
5219 - if (si_used && shared_ind_set())
5220 - xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
5221 }
5222
5223 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
5224 diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
5225 index b7bd5b0..3868ab2 100644
5226 --- a/drivers/scsi/3w-9xxx.c
5227 +++ b/drivers/scsi/3w-9xxx.c
5228 @@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
5229 switch (retval) {
5230 case SCSI_MLQUEUE_HOST_BUSY:
5231 twa_free_request_id(tw_dev, request_id);
5232 + twa_unmap_scsi_data(tw_dev, request_id);
5233 break;
5234 case 1:
5235 tw_dev->state[request_id] = TW_S_COMPLETED;
5236 twa_free_request_id(tw_dev, request_id);
5237 + twa_unmap_scsi_data(tw_dev, request_id);
5238 SCpnt->result = (DID_ERROR << 16);
5239 done(SCpnt);
5240 retval = 0;
5241 diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
5242 index 3c08f53..6153a66 100644
5243 --- a/drivers/scsi/Makefile
5244 +++ b/drivers/scsi/Makefile
5245 @@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
5246 obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
5247 obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
5248 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
5249 -obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
5250 +obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
5251 obj-$(CONFIG_SCSI_LPFC) += lpfc/
5252 obj-$(CONFIG_SCSI_BFA_FC) += bfa/
5253 obj-$(CONFIG_SCSI_PAS16) += pas16.o
5254 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
5255 index e7d0d47..e5f2d7d 100644
5256 --- a/drivers/scsi/aacraid/commsup.c
5257 +++ b/drivers/scsi/aacraid/commsup.c
5258 @@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
5259 kfree(aac->queues);
5260 aac->queues = NULL;
5261 free_irq(aac->pdev->irq, aac);
5262 + if (aac->msi)
5263 + pci_disable_msi(aac->pdev);
5264 kfree(aac->fsa_dev);
5265 aac->fsa_dev = NULL;
5266 quirks = aac_get_driver_ident(index)->quirks;
5267 diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
5268 index 0a404bf..856fcbf 100644
5269 --- a/drivers/scsi/bnx2fc/bnx2fc.h
5270 +++ b/drivers/scsi/bnx2fc/bnx2fc.h
5271 @@ -152,7 +152,6 @@ struct bnx2fc_percpu_s {
5272 spinlock_t fp_work_lock;
5273 };
5274
5275 -
5276 struct bnx2fc_hba {
5277 struct list_head link;
5278 struct cnic_dev *cnic;
5279 @@ -179,6 +178,7 @@ struct bnx2fc_hba {
5280 #define BNX2FC_CTLR_INIT_DONE 1
5281 #define BNX2FC_CREATE_DONE 2
5282 struct fcoe_ctlr ctlr;
5283 + struct list_head vports;
5284 u8 vlan_enabled;
5285 int vlan_id;
5286 u32 next_conn_id;
5287 @@ -232,6 +232,11 @@ struct bnx2fc_hba {
5288
5289 #define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
5290
5291 +struct bnx2fc_lport {
5292 + struct list_head list;
5293 + struct fc_lport *lport;
5294 +};
5295 +
5296 struct bnx2fc_cmd_mgr {
5297 struct bnx2fc_hba *hba;
5298 u16 next_idx;
5299 @@ -423,6 +428,7 @@ struct bnx2fc_work {
5300 struct bnx2fc_unsol_els {
5301 struct fc_lport *lport;
5302 struct fc_frame *fp;
5303 + struct bnx2fc_hba *hba;
5304 struct work_struct unsol_els_work;
5305 };
5306
5307 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
5308 index ab255fb..bdf62a5 100644
5309 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
5310 +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
5311 @@ -1225,6 +1225,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
5312 hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
5313 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
5314
5315 + INIT_LIST_HEAD(&hba->vports);
5316 rc = bnx2fc_netdev_setup(hba);
5317 if (rc)
5318 goto setup_err;
5319 @@ -1261,8 +1262,15 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
5320 struct fcoe_port *port;
5321 struct Scsi_Host *shost;
5322 struct fc_vport *vport = dev_to_vport(parent);
5323 + struct bnx2fc_lport *blport;
5324 int rc = 0;
5325
5326 + blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
5327 + if (!blport) {
5328 + BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n");
5329 + return NULL;
5330 + }
5331 +
5332 /* Allocate Scsi_Host structure */
5333 if (!npiv)
5334 lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
5335 @@ -1271,7 +1279,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
5336
5337 if (!lport) {
5338 printk(KERN_ERR PFX "could not allocate scsi host structure\n");
5339 - return NULL;
5340 + goto free_blport;
5341 }
5342 shost = lport->host;
5343 port = lport_priv(lport);
5344 @@ -1327,12 +1335,20 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
5345 }
5346
5347 bnx2fc_interface_get(hba);
5348 +
5349 + spin_lock_bh(&hba->hba_lock);
5350 + blport->lport = lport;
5351 + list_add_tail(&blport->list, &hba->vports);
5352 + spin_unlock_bh(&hba->hba_lock);
5353 +
5354 return lport;
5355
5356 shost_err:
5357 scsi_remove_host(shost);
5358 lp_config_err:
5359 scsi_host_put(lport->host);
5360 +free_blport:
5361 + kfree(blport);
5362 return NULL;
5363 }
5364
5365 @@ -1348,6 +1364,7 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
5366 {
5367 struct fcoe_port *port = lport_priv(lport);
5368 struct bnx2fc_hba *hba = port->priv;
5369 + struct bnx2fc_lport *blport, *tmp;
5370
5371 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
5372 /* Stop the transmit retry timer */
5373 @@ -1372,6 +1389,15 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
5374 /* Free memory used by statistical counters */
5375 fc_lport_free_stats(lport);
5376
5377 + spin_lock_bh(&hba->hba_lock);
5378 + list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
5379 + if (blport->lport == lport) {
5380 + list_del(&blport->list);
5381 + kfree(blport);
5382 + }
5383 + }
5384 + spin_unlock_bh(&hba->hba_lock);
5385 +
5386 /* Release Scsi_Host */
5387 scsi_host_put(lport->host);
5388
5389 diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
5390 index f756d5f..78baa46 100644
5391 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
5392 +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
5393 @@ -480,16 +480,36 @@ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
5394 return rc;
5395 }
5396
5397 +static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
5398 +{
5399 + struct bnx2fc_lport *blport;
5400 +
5401 + spin_lock_bh(&hba->hba_lock);
5402 + list_for_each_entry(blport, &hba->vports, list) {
5403 + if (blport->lport == lport) {
5404 + spin_unlock_bh(&hba->hba_lock);
5405 + return true;
5406 + }
5407 + }
5408 + spin_unlock_bh(&hba->hba_lock);
5409 + return false;
5410 +
5411 +}
5412 +
5413 +
5414 static void bnx2fc_unsol_els_work(struct work_struct *work)
5415 {
5416 struct bnx2fc_unsol_els *unsol_els;
5417 struct fc_lport *lport;
5418 + struct bnx2fc_hba *hba;
5419 struct fc_frame *fp;
5420
5421 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
5422 lport = unsol_els->lport;
5423 fp = unsol_els->fp;
5424 - fc_exch_recv(lport, fp);
5425 + hba = unsol_els->hba;
5426 + if (is_valid_lport(hba, lport))
5427 + fc_exch_recv(lport, fp);
5428 kfree(unsol_els);
5429 }
5430
5431 @@ -499,6 +519,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
5432 {
5433 struct fcoe_port *port = tgt->port;
5434 struct fc_lport *lport = port->lport;
5435 + struct bnx2fc_hba *hba = port->priv;
5436 struct bnx2fc_unsol_els *unsol_els;
5437 struct fc_frame_header *fh;
5438 struct fc_frame *fp;
5439 @@ -559,6 +580,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
5440 fr_eof(fp) = FC_EOF_T;
5441 fr_crc(fp) = cpu_to_le32(~crc);
5442 unsol_els->lport = lport;
5443 + unsol_els->hba = hba;
5444 unsol_els->fp = fp;
5445 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
5446 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
5447 diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
5448 index b5b5c34..454c72c 100644
5449 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c
5450 +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
5451 @@ -1734,7 +1734,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
5452 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
5453 return;
5454 }
5455 - io_req->sc_cmd = NULL;
5456
5457 if (io_req->on_active_queue) {
5458 list_del_init(&io_req->link);
5459 @@ -1754,6 +1753,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
5460 }
5461
5462 bnx2fc_unmap_sg_list(io_req);
5463 + io_req->sc_cmd = NULL;
5464
5465 switch (io_req->fcp_status) {
5466 case FC_GOOD:
5467 diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
5468 index fc2cdb6..b2d6611 100644
5469 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
5470 +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
5471 @@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk)
5472 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
5473
5474 if (csk->l2t) {
5475 - l2t_release(L2DATA(t3dev), csk->l2t);
5476 + l2t_release(t3dev, csk->l2t);
5477 csk->l2t = NULL;
5478 cxgbi_sock_put(csk);
5479 }
5480 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
5481 index 155d7b9..8885b3e 100644
5482 --- a/drivers/scsi/fcoe/fcoe.c
5483 +++ b/drivers/scsi/fcoe/fcoe.c
5484 @@ -749,12 +749,27 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
5485 * The offload EM that this routine is associated with will handle any
5486 * packets that are for SCSI read requests.
5487 *
5488 + * This has been enhanced to work when FCoE stack is operating in target
5489 + * mode.
5490 + *
5491 * Returns: True for read types I/O, otherwise returns false.
5492 */
5493 bool fcoe_oem_match(struct fc_frame *fp)
5494 {
5495 - return fc_fcp_is_read(fr_fsp(fp)) &&
5496 - (fr_fsp(fp)->data_len > fcoe_ddp_min);
5497 + struct fc_frame_header *fh = fc_frame_header_get(fp);
5498 + struct fcp_cmnd *fcp;
5499 +
5500 + if (fc_fcp_is_read(fr_fsp(fp)) &&
5501 + (fr_fsp(fp)->data_len > fcoe_ddp_min))
5502 + return true;
5503 + else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
5504 + fcp = fc_frame_payload_get(fp, sizeof(*fcp));
5505 + if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
5506 + fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
5507 + (fcp->fc_flags & FCP_CFL_WRDATA))
5508 + return true;
5509 + }
5510 + return false;
5511 }
5512
5513 /**
5514 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
5515 index 6bba23a..78c2e20 100644
5516 --- a/drivers/scsi/hpsa.c
5517 +++ b/drivers/scsi/hpsa.c
5518 @@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
5519 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
5520 removed[*nremoved] = h->dev[entry];
5521 (*nremoved)++;
5522 +
5523 + /*
5524 + * New physical devices won't have target/lun assigned yet
5525 + * so we need to preserve the values in the slot we are replacing.
5526 + */
5527 + if (new_entry->target == -1) {
5528 + new_entry->target = h->dev[entry]->target;
5529 + new_entry->lun = h->dev[entry]->lun;
5530 + }
5531 +
5532 h->dev[entry] = new_entry;
5533 added[*nadded] = new_entry;
5534 (*nadded)++;
5535 @@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
5536 }
5537
5538 static int hpsa_update_device_info(struct ctlr_info *h,
5539 - unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
5540 + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
5541 + unsigned char *is_OBDR_device)
5542 {
5543 -#define OBDR_TAPE_INQ_SIZE 49
5544 +
5545 +#define OBDR_SIG_OFFSET 43
5546 +#define OBDR_TAPE_SIG "$DR-10"
5547 +#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
5548 +#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
5549 +
5550 unsigned char *inq_buff;
5551 + unsigned char *obdr_sig;
5552
5553 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
5554 if (!inq_buff)
5555 @@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
5556 else
5557 this_device->raid_level = RAID_UNKNOWN;
5558
5559 + if (is_OBDR_device) {
5560 + /* See if this is a One-Button-Disaster-Recovery device
5561 + * by looking for "$DR-10" at offset 43 in inquiry data.
5562 + */
5563 + obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
5564 + *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
5565 + strncmp(obdr_sig, OBDR_TAPE_SIG,
5566 + OBDR_SIG_LEN) == 0);
5567 + }
5568 +
5569 kfree(inq_buff);
5570 return 0;
5571
5572 @@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
5573 return 0;
5574 }
5575
5576 - if (hpsa_update_device_info(h, scsi3addr, this_device))
5577 + if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
5578 return 0;
5579 (*nmsa2xxx_enclosures)++;
5580 hpsa_set_bus_target_lun(this_device, bus, target, 0);
5581 @@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5582 */
5583 struct ReportLUNdata *physdev_list = NULL;
5584 struct ReportLUNdata *logdev_list = NULL;
5585 - unsigned char *inq_buff = NULL;
5586 u32 nphysicals = 0;
5587 u32 nlogicals = 0;
5588 u32 ndev_allocated = 0;
5589 @@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5590 GFP_KERNEL);
5591 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
5592 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
5593 - inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
5594 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
5595
5596 - if (!currentsd || !physdev_list || !logdev_list ||
5597 - !inq_buff || !tmpdevice) {
5598 + if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
5599 dev_err(&h->pdev->dev, "out of memory\n");
5600 goto out;
5601 }
5602 @@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5603 /* adjust our table of devices */
5604 nmsa2xxx_enclosures = 0;
5605 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
5606 - u8 *lunaddrbytes;
5607 + u8 *lunaddrbytes, is_OBDR = 0;
5608
5609 /* Figure out where the LUN ID info is coming from */
5610 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
5611 @@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5612 continue;
5613
5614 /* Get device type, vendor, model, device id */
5615 - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
5616 + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
5617 + &is_OBDR))
5618 continue; /* skip it if we can't talk to it. */
5619 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
5620 tmpdevice);
5621 @@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5622 hpsa_set_bus_target_lun(this_device, bus, target, lun);
5623
5624 switch (this_device->devtype) {
5625 - case TYPE_ROM: {
5626 + case TYPE_ROM:
5627 /* We don't *really* support actual CD-ROM devices,
5628 * just "One Button Disaster Recovery" tape drive
5629 * which temporarily pretends to be a CD-ROM drive.
5630 @@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
5631 * device by checking for "$DR-10" in bytes 43-48 of
5632 * the inquiry data.
5633 */
5634 - char obdr_sig[7];
5635 -#define OBDR_TAPE_SIG "$DR-10"
5636 - strncpy(obdr_sig, &inq_buff[43], 6);
5637 - obdr_sig[6] = '\0';
5638 - if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
5639 - /* Not OBDR device, ignore it. */
5640 - break;
5641 - }
5642 - ncurrent++;
5643 + if (is_OBDR)
5644 + ncurrent++;
5645 break;
5646 case TYPE_DISK:
5647 if (i < nphysicals)
5648 @@ -1947,7 +1965,6 @@ out:
5649 for (i = 0; i < ndev_allocated; i++)
5650 kfree(currentsd[i]);
5651 kfree(currentsd);
5652 - kfree(inq_buff);
5653 kfree(physdev_list);
5654 kfree(logdev_list);
5655 }
5656 diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
5657 index 26072f1..ef46d83 100644
5658 --- a/drivers/scsi/isci/host.c
5659 +++ b/drivers/scsi/isci/host.c
5660 @@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
5661 break;
5662
5663 case SCU_COMPLETION_TYPE_EVENT:
5664 + sci_controller_event_completion(ihost, ent);
5665 + break;
5666 +
5667 case SCU_COMPLETION_TYPE_NOTIFY: {
5668 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
5669 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
5670 diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
5671 index 79313a7..430fc8f 100644
5672 --- a/drivers/scsi/isci/phy.c
5673 +++ b/drivers/scsi/isci/phy.c
5674 @@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
5675 u32 parity_count = 0;
5676 u32 llctl, link_rate;
5677 u32 clksm_value = 0;
5678 + u32 sp_timeouts = 0;
5679
5680 iphy->link_layer_registers = reg;
5681
5682 @@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
5683 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
5684 writel(llctl, &iphy->link_layer_registers->link_layer_control);
5685
5686 + sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
5687 +
5688 + /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
5689 + sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
5690 +
5691 + /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
5692 + * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
5693 + */
5694 + sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
5695 +
5696 + writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
5697 +
5698 if (is_a2(ihost->pdev)) {
5699 /* Program the max ARB time for the PHY to 700us so we inter-operate with
5700 * the PMC expander which shuts down PHYs if the expander PHY generates too
5701 diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
5702 index 9b266c7..00afc73 100644
5703 --- a/drivers/scsi/isci/registers.h
5704 +++ b/drivers/scsi/isci/registers.h
5705 @@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
5706 #define SCU_AFE_XCVRCR_OFFSET 0x00DC
5707 #define SCU_AFE_LUTCR_OFFSET 0x00E0
5708
5709 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
5710 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
5711 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
5712 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
5713 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
5714 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
5715 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
5716 +#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
5717 +
5718 +#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
5719 + SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
5720 +
5721 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
5722 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
5723 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
5724 diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
5725 index a46e07a..b5d3a8c 100644
5726 --- a/drivers/scsi/isci/request.c
5727 +++ b/drivers/scsi/isci/request.c
5728 @@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
5729 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
5730 return SCI_SUCCESS;
5731 case SCI_REQ_TASK_WAIT_TC_RESP:
5732 + /* The task frame was already confirmed to have been
5733 + * sent by the SCU HW. Since the state machine is
5734 + * now only waiting for the task response itself,
5735 + * abort the request and complete it immediately
5736 + * and don't wait for the task response.
5737 + */
5738 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
5739 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
5740 return SCI_SUCCESS;
5741 case SCI_REQ_ABORTING:
5742 - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
5743 - return SCI_SUCCESS;
5744 + /* If a request has a termination requested twice, return
5745 + * a failure indication, since HW confirmation of the first
5746 + * abort is still outstanding.
5747 + */
5748 case SCI_REQ_COMPLETED:
5749 default:
5750 dev_warn(&ireq->owning_controller->pdev->dev,
5751 @@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
5752 }
5753 }
5754
5755 -static void isci_request_process_stp_response(struct sas_task *task,
5756 - void *response_buffer)
5757 +static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
5758 {
5759 - struct dev_to_host_fis *d2h_reg_fis = response_buffer;
5760 struct task_status_struct *ts = &task->task_status;
5761 struct ata_task_resp *resp = (void *)&ts->buf[0];
5762
5763 - resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
5764 - memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
5765 + resp->frame_len = sizeof(*fis);
5766 + memcpy(resp->ending_fis, fis, sizeof(*fis));
5767 ts->buf_valid_size = sizeof(*resp);
5768
5769 - /**
5770 - * If the device fault bit is set in the status register, then
5771 + /* If the device fault bit is set in the status register, then
5772 * set the sense data and return.
5773 */
5774 - if (d2h_reg_fis->status & ATA_DF)
5775 + if (fis->status & ATA_DF)
5776 ts->stat = SAS_PROTO_RESPONSE;
5777 else
5778 ts->stat = SAM_STAT_GOOD;
5779 @@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
5780 {
5781 struct sas_task *task = isci_request_access_task(request);
5782 struct ssp_response_iu *resp_iu;
5783 - void *resp_buf;
5784 unsigned long task_flags;
5785 struct isci_remote_device *idev = isci_lookup_device(task->dev);
5786 enum service_response response = SAS_TASK_UNDELIVERED;
5787 @@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
5788 task);
5789
5790 if (sas_protocol_ata(task->task_proto)) {
5791 - resp_buf = &request->stp.rsp;
5792 - isci_request_process_stp_response(task,
5793 - resp_buf);
5794 + isci_process_stp_response(task, &request->stp.rsp);
5795 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
5796
5797 /* crack the iu response buffer. */
5798 diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
5799 index e9e1e2a..16f88ab 100644
5800 --- a/drivers/scsi/isci/unsolicited_frame_control.c
5801 +++ b/drivers/scsi/isci/unsolicited_frame_control.c
5802 @@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
5803 */
5804 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
5805 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
5806 - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
5807 + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
5808
5809 /*
5810 * The Unsolicited Frame buffers are set at the start of the UF
5811 diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
5812 index 31cb950..75d8966 100644
5813 --- a/drivers/scsi/isci/unsolicited_frame_control.h
5814 +++ b/drivers/scsi/isci/unsolicited_frame_control.h
5815 @@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
5816 * starting address of the UF address table.
5817 * 64-bit pointers are required by the hardware.
5818 */
5819 - dma_addr_t *array;
5820 + u64 *array;
5821
5822 /**
5823 * This field specifies the physical address location for the UF
5824 diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
5825 index 3df9853..7724414 100644
5826 --- a/drivers/scsi/iscsi_tcp.c
5827 +++ b/drivers/scsi/iscsi_tcp.c
5828 @@ -107,10 +107,12 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
5829 * If the socket is in CLOSE or CLOSE_WAIT we should
5830 * not close the connection if there is still some
5831 * data pending.
5832 + *
5833 + * Must be called with sk_callback_lock.
5834 */
5835 static inline int iscsi_sw_sk_state_check(struct sock *sk)
5836 {
5837 - struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
5838 + struct iscsi_conn *conn = sk->sk_user_data;
5839
5840 if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
5841 !atomic_read(&sk->sk_rmem_alloc)) {
5842 @@ -123,11 +125,17 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
5843
5844 static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
5845 {
5846 - struct iscsi_conn *conn = sk->sk_user_data;
5847 - struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
5848 + struct iscsi_conn *conn;
5849 + struct iscsi_tcp_conn *tcp_conn;
5850 read_descriptor_t rd_desc;
5851
5852 read_lock(&sk->sk_callback_lock);
5853 + conn = sk->sk_user_data;
5854 + if (!conn) {
5855 + read_unlock(&sk->sk_callback_lock);
5856 + return;
5857 + }
5858 + tcp_conn = conn->dd_data;
5859
5860 /*
5861 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
5862 @@ -141,11 +149,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
5863
5864 iscsi_sw_sk_state_check(sk);
5865
5866 - read_unlock(&sk->sk_callback_lock);
5867 -
5868 /* If we had to (atomically) map a highmem page,
5869 * unmap it now. */
5870 iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
5871 + read_unlock(&sk->sk_callback_lock);
5872 }
5873
5874 static void iscsi_sw_tcp_state_change(struct sock *sk)
5875 @@ -157,8 +164,11 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
5876 void (*old_state_change)(struct sock *);
5877
5878 read_lock(&sk->sk_callback_lock);
5879 -
5880 - conn = (struct iscsi_conn*)sk->sk_user_data;
5881 + conn = sk->sk_user_data;
5882 + if (!conn) {
5883 + read_unlock(&sk->sk_callback_lock);
5884 + return;
5885 + }
5886 session = conn->session;
5887
5888 iscsi_sw_sk_state_check(sk);
5889 @@ -178,11 +188,25 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
5890 **/
5891 static void iscsi_sw_tcp_write_space(struct sock *sk)
5892 {
5893 - struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
5894 - struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
5895 - struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
5896 + struct iscsi_conn *conn;
5897 + struct iscsi_tcp_conn *tcp_conn;
5898 + struct iscsi_sw_tcp_conn *tcp_sw_conn;
5899 + void (*old_write_space)(struct sock *);
5900 +
5901 + read_lock_bh(&sk->sk_callback_lock);
5902 + conn = sk->sk_user_data;
5903 + if (!conn) {
5904 + read_unlock_bh(&sk->sk_callback_lock);
5905 + return;
5906 + }
5907 +
5908 + tcp_conn = conn->dd_data;
5909 + tcp_sw_conn = tcp_conn->dd_data;
5910 + old_write_space = tcp_sw_conn->old_write_space;
5911 + read_unlock_bh(&sk->sk_callback_lock);
5912 +
5913 + old_write_space(sk);
5914
5915 - tcp_sw_conn->old_write_space(sk);
5916 ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
5917 iscsi_conn_queue_work(conn);
5918 }
5919 @@ -592,20 +616,17 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
5920 /* userspace may have goofed up and not bound us */
5921 if (!sock)
5922 return;
5923 - /*
5924 - * Make sure our recv side is stopped.
5925 - * Older tools called conn stop before ep_disconnect
5926 - * so IO could still be coming in.
5927 - */
5928 - write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
5929 - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
5930 - write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
5931
5932 sock->sk->sk_err = EIO;
5933 wake_up_interruptible(sk_sleep(sock->sk));
5934
5935 - iscsi_conn_stop(cls_conn, flag);
5936 + /* stop xmit side */
5937 + iscsi_suspend_tx(conn);
5938 +
5939 + /* stop recv side and release socket */
5940 iscsi_sw_tcp_release_conn(conn);
5941 +
5942 + iscsi_conn_stop(cls_conn, flag);
5943 }
5944
5945 static int
5946 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
5947 index 49e1ccc..3b66937 100644
5948 --- a/drivers/scsi/libfc/fc_rport.c
5949 +++ b/drivers/scsi/libfc/fc_rport.c
5950 @@ -801,6 +801,20 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
5951
5952 switch (rdata->rp_state) {
5953 case RPORT_ST_INIT:
5954 + /*
5955 + * If received the FLOGI request on RPORT which is INIT state
5956 + * (means not transition to FLOGI either fc_rport timeout
5957 + * function didn;t trigger or this end hasn;t received
5958 + * beacon yet from other end. In that case only, allow RPORT
5959 + * state machine to continue, otherwise fall through which
5960 + * causes the code to send reject response.
5961 + * NOTE; Not checking for FIP->state such as VNMP_UP or
5962 + * VNMP_CLAIM because if FIP state is not one of those,
5963 + * RPORT wouldn;t have created and 'rport_lookup' would have
5964 + * failed anyway in that case.
5965 + */
5966 + if (lport->point_to_multipoint)
5967 + break;
5968 case RPORT_ST_DELETE:
5969 mutex_unlock(&rdata->rp_mutex);
5970 rjt_data.reason = ELS_RJT_FIP;
5971 diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
5972 index e98ae33..09b232f 100644
5973 --- a/drivers/scsi/libiscsi_tcp.c
5974 +++ b/drivers/scsi/libiscsi_tcp.c
5975 @@ -1084,7 +1084,8 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
5976 struct iscsi_cls_conn *cls_conn;
5977 struct iscsi_tcp_conn *tcp_conn;
5978
5979 - cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
5980 + cls_conn = iscsi_conn_setup(cls_session,
5981 + sizeof(*tcp_conn) + dd_data_size, conn_idx);
5982 if (!cls_conn)
5983 return NULL;
5984 conn = cls_conn->dd_data;
5985 @@ -1096,22 +1097,13 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
5986
5987 tcp_conn = conn->dd_data;
5988 tcp_conn->iscsi_conn = conn;
5989 -
5990 - tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
5991 - if (!tcp_conn->dd_data) {
5992 - iscsi_conn_teardown(cls_conn);
5993 - return NULL;
5994 - }
5995 + tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
5996 return cls_conn;
5997 }
5998 EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
5999
6000 void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
6001 {
6002 - struct iscsi_conn *conn = cls_conn->dd_data;
6003 - struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
6004 -
6005 - kfree(tcp_conn->dd_data);
6006 iscsi_conn_teardown(cls_conn);
6007 }
6008 EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
6009 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
6010 index f84084b..c9e3dc0 100644
6011 --- a/drivers/scsi/libsas/sas_expander.c
6012 +++ b/drivers/scsi/libsas/sas_expander.c
6013 @@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
6014 list_for_each_entry(ch, &ex->children, siblings) {
6015 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
6016 res = sas_find_bcast_dev(ch, src_dev);
6017 - if (src_dev)
6018 + if (*src_dev)
6019 return res;
6020 }
6021 }
6022 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
6023 index 8ec2c86..0441361 100644
6024 --- a/drivers/scsi/lpfc/lpfc.h
6025 +++ b/drivers/scsi/lpfc/lpfc.h
6026 @@ -20,6 +20,11 @@
6027 *******************************************************************/
6028
6029 #include <scsi/scsi_host.h>
6030 +
6031 +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
6032 +#define CONFIG_SCSI_LPFC_DEBUG_FS
6033 +#endif
6034 +
6035 struct lpfc_sli2_slim;
6036
6037 #define LPFC_PCI_DEV_LP 0x1
6038 @@ -465,9 +470,10 @@ enum intr_type_t {
6039 struct unsol_rcv_ct_ctx {
6040 uint32_t ctxt_id;
6041 uint32_t SID;
6042 - uint32_t oxid;
6043 uint32_t flags;
6044 #define UNSOL_VALID 0x00000001
6045 + uint16_t oxid;
6046 + uint16_t rxid;
6047 };
6048
6049 #define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
6050 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
6051 index 135a53b..80ca11c 100644
6052 --- a/drivers/scsi/lpfc/lpfc_attr.c
6053 +++ b/drivers/scsi/lpfc/lpfc_attr.c
6054 @@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
6055 }
6056
6057 /**
6058 + * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
6059 + * @phba: lpfc_hba pointer.
6060 + *
6061 + * Description:
6062 + * SLI4 interface type-2 device to wait on the sliport status register for
6063 + * the readyness after performing a firmware reset.
6064 + *
6065 + * Returns:
6066 + * zero for success
6067 + **/
6068 +static int
6069 +lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
6070 +{
6071 + struct lpfc_register portstat_reg;
6072 + int i;
6073 +
6074 +
6075 + lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6076 + &portstat_reg.word0);
6077 +
6078 + /* wait for the SLI port firmware ready after firmware reset */
6079 + for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
6080 + msleep(10);
6081 + lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6082 + &portstat_reg.word0);
6083 + if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
6084 + continue;
6085 + if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
6086 + continue;
6087 + if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
6088 + continue;
6089 + break;
6090 + }
6091 +
6092 + if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
6093 + return 0;
6094 + else
6095 + return -EIO;
6096 +}
6097 +
6098 +/**
6099 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
6100 * @phba: lpfc_hba pointer.
6101 *
6102 @@ -805,7 +846,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
6103 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6104
6105 /* delay driver action following IF_TYPE_2 reset */
6106 - msleep(100);
6107 + rc = lpfc_sli4_pdev_status_reg_wait(phba);
6108 +
6109 + if (rc)
6110 + return -EIO;
6111
6112 init_completion(&online_compl);
6113 rc = lpfc_workq_post_event(phba, &status, &online_compl,
6114 @@ -895,6 +939,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
6115
6116 if (!phba->cfg_enable_hba_reset)
6117 return -EACCES;
6118 +
6119 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6120 + "3050 lpfc_board_mode set to %s\n", buf);
6121 +
6122 init_completion(&online_compl);
6123
6124 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
6125 @@ -1290,6 +1338,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
6126 if (phba->sli_rev == LPFC_SLI_REV4)
6127 val = 0;
6128
6129 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6130 + "3051 lpfc_poll changed from %d to %d\n",
6131 + phba->cfg_poll, val);
6132 +
6133 spin_lock_irq(&phba->hbalock);
6134
6135 old_val = phba->cfg_poll;
6136 @@ -1414,80 +1466,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
6137 struct Scsi_Host *shost = class_to_shost(dev);
6138 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6139 struct lpfc_hba *phba = vport->phba;
6140 - struct pci_dev *pdev = phba->pcidev;
6141 - union lpfc_sli4_cfg_shdr *shdr;
6142 - uint32_t shdr_status, shdr_add_status;
6143 - LPFC_MBOXQ_t *mboxq;
6144 - struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
6145 - struct lpfc_rsrc_desc_pcie *desc;
6146 - uint32_t max_nr_virtfn;
6147 - uint32_t desc_count;
6148 - int length, rc, i;
6149 -
6150 - if ((phba->sli_rev < LPFC_SLI_REV4) ||
6151 - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6152 - LPFC_SLI_INTF_IF_TYPE_2))
6153 - return -EPERM;
6154 -
6155 - if (!pdev->is_physfn)
6156 - return snprintf(buf, PAGE_SIZE, "%d\n", 0);
6157 -
6158 - mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6159 - if (!mboxq)
6160 - return -ENOMEM;
6161 -
6162 - /* get the maximum number of virtfn support by physfn */
6163 - length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
6164 - sizeof(struct lpfc_sli4_cfg_mhdr));
6165 - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6166 - LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
6167 - length, LPFC_SLI4_MBX_EMBED);
6168 - shdr = (union lpfc_sli4_cfg_shdr *)
6169 - &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6170 - bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
6171 - phba->sli4_hba.iov.pf_number + 1);
6172 -
6173 - get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
6174 - bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
6175 - LPFC_CFG_TYPE_CURRENT_ACTIVE);
6176 -
6177 - rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
6178 - lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
6179 + uint16_t max_nr_virtfn;
6180
6181 - if (rc != MBX_TIMEOUT) {
6182 - /* check return status */
6183 - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6184 - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6185 - &shdr->response);
6186 - if (shdr_status || shdr_add_status || rc)
6187 - goto error_out;
6188 -
6189 - } else
6190 - goto error_out;
6191 -
6192 - desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
6193 -
6194 - for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6195 - desc = (struct lpfc_rsrc_desc_pcie *)
6196 - &get_prof_cfg->u.response.prof_cfg.desc[i];
6197 - if (LPFC_RSRC_DESC_TYPE_PCIE ==
6198 - bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
6199 - max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
6200 - desc);
6201 - break;
6202 - }
6203 - }
6204 -
6205 - if (i < LPFC_RSRC_DESC_MAX_NUM) {
6206 - if (rc != MBX_TIMEOUT)
6207 - mempool_free(mboxq, phba->mbox_mem_pool);
6208 - return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
6209 - }
6210 -
6211 -error_out:
6212 - if (rc != MBX_TIMEOUT)
6213 - mempool_free(mboxq, phba->mbox_mem_pool);
6214 - return -EIO;
6215 + max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6216 + return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
6217 }
6218
6219 /**
6220 @@ -1605,6 +1587,9 @@ static int \
6221 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
6222 { \
6223 if (val >= minval && val <= maxval) {\
6224 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
6225 + "3052 lpfc_" #attr " changed from %d to %d\n", \
6226 + phba->cfg_##attr, val); \
6227 phba->cfg_##attr = val;\
6228 return 0;\
6229 }\
6230 @@ -1762,6 +1747,9 @@ static int \
6231 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
6232 { \
6233 if (val >= minval && val <= maxval) {\
6234 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
6235 + "3053 lpfc_" #attr " changed from %d to %d\n", \
6236 + vport->cfg_##attr, val); \
6237 vport->cfg_##attr = val;\
6238 return 0;\
6239 }\
6240 @@ -2678,6 +2666,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
6241 if (nolip)
6242 return strlen(buf);
6243
6244 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6245 + "3054 lpfc_topology changed from %d to %d\n",
6246 + prev_val, val);
6247 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
6248 if (err) {
6249 phba->cfg_topology = prev_val;
6250 @@ -3101,6 +3092,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
6251 if (sscanf(val_buf, "%i", &val) != 1)
6252 return -EINVAL;
6253
6254 + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6255 + "3055 lpfc_link_speed changed from %d to %d %s\n",
6256 + phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
6257 +
6258 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
6259 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
6260 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
6261 @@ -3678,7 +3673,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
6262 # - Default will result in registering capabilities for all profiles.
6263 #
6264 */
6265 -unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
6266 +unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
6267 + SHOST_DIX_TYPE0_PROTECTION |
6268 + SHOST_DIX_TYPE1_PROTECTION;
6269
6270 module_param(lpfc_prot_mask, uint, S_IRUGO);
6271 MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
6272 diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
6273 index 7fb0ba4..f46378f 100644
6274 --- a/drivers/scsi/lpfc/lpfc_bsg.c
6275 +++ b/drivers/scsi/lpfc/lpfc_bsg.c
6276 @@ -960,8 +960,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6277 evt_dat->immed_dat].oxid,
6278 phba->ct_ctx[
6279 evt_dat->immed_dat].SID);
6280 + phba->ct_ctx[evt_dat->immed_dat].rxid =
6281 + piocbq->iocb.ulpContext;
6282 phba->ct_ctx[evt_dat->immed_dat].oxid =
6283 - piocbq->iocb.ulpContext;
6284 + piocbq->iocb.unsli3.rcvsli3.ox_id;
6285 phba->ct_ctx[evt_dat->immed_dat].SID =
6286 piocbq->iocb.un.rcvels.remoteID;
6287 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
6288 @@ -1312,7 +1314,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
6289 rc = IOCB_ERROR;
6290 goto issue_ct_rsp_exit;
6291 }
6292 - icmd->ulpContext = phba->ct_ctx[tag].oxid;
6293 + icmd->ulpContext = phba->ct_ctx[tag].rxid;
6294 + icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
6295 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
6296 if (!ndlp) {
6297 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
6298 @@ -1337,9 +1340,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
6299 goto issue_ct_rsp_exit;
6300 }
6301
6302 - icmd->un.ulpWord[3] = ndlp->nlp_rpi;
6303 - if (phba->sli_rev == LPFC_SLI_REV4)
6304 - icmd->ulpContext =
6305 + icmd->un.ulpWord[3] =
6306 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
6307
6308 /* The exchange is done, mark the entry as invalid */
6309 @@ -1351,8 +1352,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
6310
6311 /* Xmit CT response on exchange <xid> */
6312 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
6313 - "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
6314 - icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
6315 + "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
6316 + icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
6317
6318 ctiocb->iocb_cmpl = NULL;
6319 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
6320 @@ -1471,13 +1472,12 @@ send_mgmt_rsp_exit:
6321 /**
6322 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
6323 * @phba: Pointer to HBA context object.
6324 - * @job: LPFC_BSG_VENDOR_DIAG_MODE
6325 *
6326 * This function is responsible for preparing driver for diag loopback
6327 * on device.
6328 */
6329 static int
6330 -lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
6331 +lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
6332 {
6333 struct lpfc_vport **vports;
6334 struct Scsi_Host *shost;
6335 @@ -1521,7 +1521,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
6336 /**
6337 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
6338 * @phba: Pointer to HBA context object.
6339 - * @job: LPFC_BSG_VENDOR_DIAG_MODE
6340 *
6341 * This function is responsible for driver exit processing of setting up
6342 * diag loopback mode on device.
6343 @@ -1586,7 +1585,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
6344 goto job_error;
6345 }
6346
6347 - rc = lpfc_bsg_diag_mode_enter(phba, job);
6348 + rc = lpfc_bsg_diag_mode_enter(phba);
6349 if (rc)
6350 goto job_error;
6351
6352 @@ -1758,7 +1757,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
6353 goto job_error;
6354 }
6355
6356 - rc = lpfc_bsg_diag_mode_enter(phba, job);
6357 + rc = lpfc_bsg_diag_mode_enter(phba);
6358 if (rc)
6359 goto job_error;
6360
6361 @@ -1982,7 +1981,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
6362 goto job_error;
6363 }
6364
6365 - rc = lpfc_bsg_diag_mode_enter(phba, job);
6366 + rc = lpfc_bsg_diag_mode_enter(phba);
6367 if (rc)
6368 goto job_error;
6369
6370 @@ -3511,7 +3510,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
6371 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
6372 "2947 Issued SLI_CONFIG ext-buffer "
6373 "maibox command, rc:x%x\n", rc);
6374 - return 1;
6375 + return SLI_CONFIG_HANDLED;
6376 }
6377 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
6378 "2948 Failed to issue SLI_CONFIG ext-buffer "
6379 @@ -3549,7 +3548,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
6380 LPFC_MBOXQ_t *pmboxq = NULL;
6381 MAILBOX_t *pmb;
6382 uint8_t *mbx;
6383 - int rc = 0, i;
6384 + int rc = SLI_CONFIG_NOT_HANDLED, i;
6385
6386 mbox_req =
6387 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
6388 @@ -3660,7 +3659,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
6389 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
6390 "2955 Issued SLI_CONFIG ext-buffer "
6391 "maibox command, rc:x%x\n", rc);
6392 - return 1;
6393 + return SLI_CONFIG_HANDLED;
6394 }
6395 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
6396 "2956 Failed to issue SLI_CONFIG ext-buffer "
6397 @@ -3668,6 +3667,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
6398 rc = -EPIPE;
6399 }
6400
6401 + /* wait for additoinal external buffers */
6402 + job->reply->result = 0;
6403 + job->job_done(job);
6404 + return SLI_CONFIG_HANDLED;
6405 +
6406 job_error:
6407 if (pmboxq)
6408 mempool_free(pmboxq, phba->mbox_mem_pool);
6409 @@ -3959,7 +3963,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
6410 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
6411 "2969 Issued SLI_CONFIG ext-buffer "
6412 "maibox command, rc:x%x\n", rc);
6413 - return 1;
6414 + return SLI_CONFIG_HANDLED;
6415 }
6416 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
6417 "2970 Failed to issue SLI_CONFIG ext-buffer "
6418 @@ -4039,14 +4043,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
6419 struct lpfc_dmabuf *dmabuf)
6420 {
6421 struct dfc_mbox_req *mbox_req;
6422 - int rc;
6423 + int rc = SLI_CONFIG_NOT_HANDLED;
6424
6425 mbox_req =
6426 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
6427
6428 /* mbox command with/without single external buffer */
6429 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
6430 - return SLI_CONFIG_NOT_HANDLED;
6431 + return rc;
6432
6433 /* mbox command and first external buffer */
6434 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
6435 @@ -4249,7 +4253,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
6436 * mailbox extension size
6437 */
6438 if ((transmit_length > receive_length) ||
6439 - (transmit_length > MAILBOX_EXT_SIZE)) {
6440 + (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
6441 rc = -ERANGE;
6442 goto job_done;
6443 }
6444 @@ -4272,7 +4276,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
6445 /* receive length cannot be greater than mailbox
6446 * extension size
6447 */
6448 - if (receive_length > MAILBOX_EXT_SIZE) {
6449 + if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
6450 rc = -ERANGE;
6451 goto job_done;
6452 }
6453 @@ -4306,7 +4310,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
6454 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
6455
6456 /* bde size cannot be greater than mailbox ext size */
6457 - if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
6458 + if (bde->tus.f.bdeSize >
6459 + BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
6460 rc = -ERANGE;
6461 goto job_done;
6462 }
6463 @@ -4332,7 +4337,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
6464 * mailbox extension size
6465 */
6466 if ((receive_length == 0) ||
6467 - (receive_length > MAILBOX_EXT_SIZE)) {
6468 + (receive_length >
6469 + BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
6470 rc = -ERANGE;
6471 goto job_done;
6472 }
6473 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
6474 index fc20c24..1e41af8 100644
6475 --- a/drivers/scsi/lpfc/lpfc_crtn.h
6476 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
6477 @@ -432,6 +432,7 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
6478 int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
6479 int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
6480 uint16_t, uint16_t, uint16_t);
6481 +uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
6482 void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
6483 void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
6484 struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
6485 @@ -439,3 +440,4 @@ struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
6486 int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
6487 /* functions to support SR-IOV */
6488 int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
6489 +uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
6490 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
6491 index 32a0845..1725b81 100644
6492 --- a/drivers/scsi/lpfc/lpfc_els.c
6493 +++ b/drivers/scsi/lpfc/lpfc_els.c
6494 @@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6495 }
6496 lpfc_cleanup_pending_mbox(vport);
6497
6498 - if (phba->sli_rev == LPFC_SLI_REV4)
6499 + if (phba->sli_rev == LPFC_SLI_REV4) {
6500 lpfc_sli4_unreg_all_rpis(vport);
6501 -
6502 - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
6503 lpfc_mbx_unreg_vpi(vport);
6504 spin_lock_irq(shost->host_lock);
6505 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6506 - spin_unlock_irq(shost->host_lock);
6507 - }
6508 - /*
6509 - * If VPI is unreged, driver need to do INIT_VPI
6510 - * before re-registering
6511 - */
6512 - if (phba->sli_rev == LPFC_SLI_REV4) {
6513 - spin_lock_irq(shost->host_lock);
6514 + /*
6515 + * If VPI is unreged, driver need to do INIT_VPI
6516 + * before re-registering
6517 + */
6518 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6519 spin_unlock_irq(shost->host_lock);
6520 }
6521 @@ -1096,11 +1090,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6522 /* Set the fcfi to the fcfi we registered with */
6523 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
6524 }
6525 - } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
6526 - sp->cmn.request_multiple_Nport = 1;
6527 - /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
6528 - icmd->ulpCt_h = 1;
6529 - icmd->ulpCt_l = 0;
6530 + } else {
6531 + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
6532 + sp->cmn.request_multiple_Nport = 1;
6533 + /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
6534 + icmd->ulpCt_h = 1;
6535 + icmd->ulpCt_l = 0;
6536 + } else
6537 + sp->cmn.request_multiple_Nport = 0;
6538 }
6539
6540 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
6541 @@ -3656,7 +3653,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6542 }
6543
6544 icmd = &elsiocb->iocb;
6545 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6546 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6547 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6548 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6549 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6550 pcmd += sizeof(uint32_t);
6551 @@ -3673,7 +3671,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6552 return 1;
6553
6554 icmd = &elsiocb->iocb;
6555 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6556 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6557 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6558 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6559
6560 if (mbox)
6561 @@ -3695,7 +3694,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6562 return 1;
6563
6564 icmd = &elsiocb->iocb;
6565 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6566 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6567 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6568 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6569
6570 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
6571 @@ -3781,7 +3781,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
6572
6573 icmd = &elsiocb->iocb;
6574 oldcmd = &oldiocb->iocb;
6575 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6576 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6577 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6578 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6579
6580 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
6581 @@ -3853,7 +3854,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6582
6583 icmd = &elsiocb->iocb;
6584 oldcmd = &oldiocb->iocb;
6585 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6586 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6587 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6588
6589 /* Xmit ADISC ACC response tag <ulpIoTag> */
6590 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6591 @@ -3931,7 +3933,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6592
6593 icmd = &elsiocb->iocb;
6594 oldcmd = &oldiocb->iocb;
6595 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6596 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6597 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6598 +
6599 /* Xmit PRLI ACC response tag <ulpIoTag> */
6600 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6601 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
6602 @@ -4035,7 +4039,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
6603
6604 icmd = &elsiocb->iocb;
6605 oldcmd = &oldiocb->iocb;
6606 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6607 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6608 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6609 +
6610 /* Xmit RNID ACC response tag <ulpIoTag> */
6611 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6612 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
6613 @@ -4163,7 +4169,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
6614 if (!elsiocb)
6615 return 1;
6616
6617 - elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
6618 + elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
6619 + elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
6620 +
6621 /* Xmit ECHO ACC response tag <ulpIoTag> */
6622 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6623 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
6624 @@ -5054,13 +5062,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6625 uint8_t *pcmd;
6626 struct lpfc_iocbq *elsiocb;
6627 struct lpfc_nodelist *ndlp;
6628 - uint16_t xri;
6629 + uint16_t oxid;
6630 + uint16_t rxid;
6631 uint32_t cmdsize;
6632
6633 mb = &pmb->u.mb;
6634
6635 ndlp = (struct lpfc_nodelist *) pmb->context2;
6636 - xri = (uint16_t) ((unsigned long)(pmb->context1));
6637 + rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6638 + oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
6639 pmb->context1 = NULL;
6640 pmb->context2 = NULL;
6641
6642 @@ -5082,7 +5092,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6643 return;
6644
6645 icmd = &elsiocb->iocb;
6646 - icmd->ulpContext = xri;
6647 + icmd->ulpContext = rxid;
6648 + icmd->unsli3.rcvsli3.ox_id = oxid;
6649
6650 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6651 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6652 @@ -5137,13 +5148,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6653 uint8_t *pcmd;
6654 struct lpfc_iocbq *elsiocb;
6655 struct lpfc_nodelist *ndlp;
6656 - uint16_t xri, status;
6657 + uint16_t status;
6658 + uint16_t oxid;
6659 + uint16_t rxid;
6660 uint32_t cmdsize;
6661
6662 mb = &pmb->u.mb;
6663
6664 ndlp = (struct lpfc_nodelist *) pmb->context2;
6665 - xri = (uint16_t) ((unsigned long)(pmb->context1));
6666 + rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6667 + oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
6668 pmb->context1 = NULL;
6669 pmb->context2 = NULL;
6670
6671 @@ -5165,7 +5179,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6672 return;
6673
6674 icmd = &elsiocb->iocb;
6675 - icmd->ulpContext = xri;
6676 + icmd->ulpContext = rxid;
6677 + icmd->unsli3.rcvsli3.ox_id = oxid;
6678
6679 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6680 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6681 @@ -5238,8 +5253,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6682 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6683 if (mbox) {
6684 lpfc_read_lnk_stat(phba, mbox);
6685 - mbox->context1 =
6686 - (void *)((unsigned long) cmdiocb->iocb.ulpContext);
6687 + mbox->context1 = (void *)((unsigned long)
6688 + ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6689 + cmdiocb->iocb.ulpContext)); /* rx_id */
6690 mbox->context2 = lpfc_nlp_get(ndlp);
6691 mbox->vport = vport;
6692 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
6693 @@ -5314,7 +5330,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6694 pcmd += sizeof(uint32_t); /* Skip past command */
6695
6696 /* use the command's xri in the response */
6697 - elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
6698 + elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
6699 + elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6700
6701 rtv_rsp = (struct RTV_RSP *)pcmd;
6702
6703 @@ -5399,8 +5416,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6704 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6705 if (mbox) {
6706 lpfc_read_lnk_stat(phba, mbox);
6707 - mbox->context1 =
6708 - (void *)((unsigned long) cmdiocb->iocb.ulpContext);
6709 + mbox->context1 = (void *)((unsigned long)
6710 + ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6711 + cmdiocb->iocb.ulpContext)); /* rx_id */
6712 mbox->context2 = lpfc_nlp_get(ndlp);
6713 mbox->vport = vport;
6714 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
6715 @@ -5554,7 +5572,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
6716
6717 icmd = &elsiocb->iocb;
6718 oldcmd = &oldiocb->iocb;
6719 - icmd->ulpContext = oldcmd->ulpContext; /* Xri */
6720 + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
6721 + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
6722
6723 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6724 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6725 @@ -6586,7 +6605,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6726 {
6727 struct lpfc_vport *vport;
6728 unsigned long flags;
6729 - int i;
6730 + int i = 0;
6731
6732 /* The physical ports are always vpi 0 - translate is unnecessary. */
6733 if (vpi > 0) {
6734 @@ -6609,7 +6628,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6735
6736 spin_lock_irqsave(&phba->hbalock, flags);
6737 list_for_each_entry(vport, &phba->port_list, listentry) {
6738 - if (vport->vpi == vpi) {
6739 + if (vport->vpi == i) {
6740 spin_unlock_irqrestore(&phba->hbalock, flags);
6741 return vport;
6742 }
6743 @@ -7787,6 +7806,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6744 {
6745 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6746 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
6747 + uint16_t lxri = 0;
6748
6749 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6750 unsigned long iflag = 0;
6751 @@ -7815,7 +7835,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6752 }
6753 }
6754 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6755 - sglq_entry = __lpfc_get_active_sglq(phba, xri);
6756 + lxri = lpfc_sli4_xri_inrange(phba, xri);
6757 + if (lxri == NO_XRI) {
6758 + spin_unlock_irqrestore(&phba->hbalock, iflag);
6759 + return;
6760 + }
6761 + sglq_entry = __lpfc_get_active_sglq(phba, lxri);
6762 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6763 spin_unlock_irqrestore(&phba->hbalock, iflag);
6764 return;
6765 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
6766 index 18d0dbf..bef17e3 100644
6767 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
6768 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
6769 @@ -2247,7 +2247,6 @@ read_next_fcf:
6770 spin_lock_irq(&phba->hbalock);
6771 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6772 spin_unlock_irq(&phba->hbalock);
6773 - lpfc_sli4_mbox_cmd_free(phba, mboxq);
6774 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6775 LPFC_FCOE_FCF_GET_FIRST);
6776 return;
6777 @@ -2645,6 +2644,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6778 vport->vpi_state |= LPFC_VPI_REGISTERED;
6779 vport->fc_flag |= FC_VFI_REGISTERED;
6780 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6781 + vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
6782 spin_unlock_irq(shost->host_lock);
6783
6784 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
6785 diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
6786 index 9059524..df53d10 100644
6787 --- a/drivers/scsi/lpfc/lpfc_hw.h
6788 +++ b/drivers/scsi/lpfc/lpfc_hw.h
6789 @@ -3470,11 +3470,16 @@ typedef struct {
6790 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
6791
6792 struct rcv_sli3 {
6793 - uint32_t word8Rsvd;
6794 #ifdef __BIG_ENDIAN_BITFIELD
6795 + uint16_t ox_id;
6796 + uint16_t seq_cnt;
6797 +
6798 uint16_t vpi;
6799 uint16_t word9Rsvd;
6800 #else /* __LITTLE_ENDIAN */
6801 + uint16_t seq_cnt;
6802 + uint16_t ox_id;
6803 +
6804 uint16_t word9Rsvd;
6805 uint16_t vpi;
6806 #endif
6807 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
6808 index 11e26a2..7f8003b 100644
6809 --- a/drivers/scsi/lpfc/lpfc_hw4.h
6810 +++ b/drivers/scsi/lpfc/lpfc_hw4.h
6811 @@ -170,15 +170,8 @@ struct lpfc_sli_intf {
6812 #define LPFC_PCI_FUNC3 3
6813 #define LPFC_PCI_FUNC4 4
6814
6815 -/* SLI4 interface type-2 control register offsets */
6816 -#define LPFC_CTL_PORT_SEM_OFFSET 0x400
6817 -#define LPFC_CTL_PORT_STA_OFFSET 0x404
6818 -#define LPFC_CTL_PORT_CTL_OFFSET 0x408
6819 -#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
6820 -#define LPFC_CTL_PORT_ER2_OFFSET 0x410
6821 +/* SLI4 interface type-2 PDEV_CTL register */
6822 #define LPFC_CTL_PDEV_CTL_OFFSET 0x414
6823 -
6824 -/* Some SLI4 interface type-2 PDEV_CTL register bits */
6825 #define LPFC_CTL_PDEV_CTL_DRST 0x00000001
6826 #define LPFC_CTL_PDEV_CTL_FRST 0x00000002
6827 #define LPFC_CTL_PDEV_CTL_DD 0x00000004
6828 @@ -337,6 +330,7 @@ struct lpfc_cqe {
6829 #define CQE_CODE_RELEASE_WQE 0x2
6830 #define CQE_CODE_RECEIVE 0x4
6831 #define CQE_CODE_XRI_ABORTED 0x5
6832 +#define CQE_CODE_RECEIVE_V1 0x9
6833
6834 /* completion queue entry for wqe completions */
6835 struct lpfc_wcqe_complete {
6836 @@ -440,7 +434,10 @@ struct lpfc_rcqe {
6837 #define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
6838 #define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
6839 #define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
6840 - uint32_t reserved1;
6841 + uint32_t word1;
6842 +#define lpfc_rcqe_fcf_id_v1_SHIFT 0
6843 +#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
6844 +#define lpfc_rcqe_fcf_id_v1_WORD word1
6845 uint32_t word2;
6846 #define lpfc_rcqe_length_SHIFT 16
6847 #define lpfc_rcqe_length_MASK 0x0000FFFF
6848 @@ -451,6 +448,9 @@ struct lpfc_rcqe {
6849 #define lpfc_rcqe_fcf_id_SHIFT 0
6850 #define lpfc_rcqe_fcf_id_MASK 0x0000003F
6851 #define lpfc_rcqe_fcf_id_WORD word2
6852 +#define lpfc_rcqe_rq_id_v1_SHIFT 0
6853 +#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
6854 +#define lpfc_rcqe_rq_id_v1_WORD word2
6855 uint32_t word3;
6856 #define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
6857 #define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
6858 @@ -515,7 +515,7 @@ struct lpfc_register {
6859 /* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
6860 #define LPFC_SLI_INTF 0x0058
6861
6862 -#define LPFC_SLIPORT_IF2_SMPHR 0x0400
6863 +#define LPFC_CTL_PORT_SEM_OFFSET 0x400
6864 #define lpfc_port_smphr_perr_SHIFT 31
6865 #define lpfc_port_smphr_perr_MASK 0x1
6866 #define lpfc_port_smphr_perr_WORD word0
6867 @@ -575,7 +575,7 @@ struct lpfc_register {
6868 #define LPFC_POST_STAGE_PORT_READY 0xC000
6869 #define LPFC_POST_STAGE_PORT_UE 0xF000
6870
6871 -#define LPFC_SLIPORT_STATUS 0x0404
6872 +#define LPFC_CTL_PORT_STA_OFFSET 0x404
6873 #define lpfc_sliport_status_err_SHIFT 31
6874 #define lpfc_sliport_status_err_MASK 0x1
6875 #define lpfc_sliport_status_err_WORD word0
6876 @@ -593,7 +593,7 @@ struct lpfc_register {
6877 #define lpfc_sliport_status_rdy_WORD word0
6878 #define MAX_IF_TYPE_2_RESETS 1000
6879
6880 -#define LPFC_SLIPORT_CNTRL 0x0408
6881 +#define LPFC_CTL_PORT_CTL_OFFSET 0x408
6882 #define lpfc_sliport_ctrl_end_SHIFT 30
6883 #define lpfc_sliport_ctrl_end_MASK 0x1
6884 #define lpfc_sliport_ctrl_end_WORD word0
6885 @@ -604,8 +604,8 @@ struct lpfc_register {
6886 #define lpfc_sliport_ctrl_ip_WORD word0
6887 #define LPFC_SLIPORT_INIT_PORT 1
6888
6889 -#define LPFC_SLIPORT_ERR_1 0x040C
6890 -#define LPFC_SLIPORT_ERR_2 0x0410
6891 +#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
6892 +#define LPFC_CTL_PORT_ER2_OFFSET 0x410
6893
6894 /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
6895 * reside in BAR 2.
6896 @@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
6897 #define lpfc_grp_hdr_id_MASK 0x000000FF
6898 #define lpfc_grp_hdr_id_WORD word2
6899 uint8_t rev_name[128];
6900 + uint8_t date[12];
6901 + uint8_t revision[32];
6902 };
6903
6904 #define FCP_COMMAND 0x0
6905 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
6906 index 148b98d..027b797 100644
6907 --- a/drivers/scsi/lpfc/lpfc_init.c
6908 +++ b/drivers/scsi/lpfc/lpfc_init.c
6909 @@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
6910 sizeof fc_host_symbolic_name(shost));
6911
6912 fc_host_supported_speeds(shost) = 0;
6913 + if (phba->lmt & LMT_16Gb)
6914 + fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
6915 if (phba->lmt & LMT_10Gb)
6916 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
6917 if (phba->lmt & LMT_8Gb)
6918 @@ -3647,7 +3649,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6919 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6920
6921 vport = lpfc_find_vport_by_vpid(phba,
6922 - acqe_fip->index - phba->vpi_base);
6923 + acqe_fip->index);
6924 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6925 if (!ndlp)
6926 break;
6927 @@ -4035,6 +4037,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
6928 }
6929
6930 /**
6931 + * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6932 + * @phba: pointer to lpfc hba data structure.
6933 + *
6934 + * This function enables the PCI SR-IOV virtual functions to a physical
6935 + * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6936 + * enable the number of virtual functions to the physical function. As
6937 + * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6938 + * API call does not considered as an error condition for most of the device.
6939 + **/
6940 +uint16_t
6941 +lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6942 +{
6943 + struct pci_dev *pdev = phba->pcidev;
6944 + uint16_t nr_virtfn;
6945 + int pos;
6946 +
6947 + if (!pdev->is_physfn)
6948 + return 0;
6949 +
6950 + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6951 + if (pos == 0)
6952 + return 0;
6953 +
6954 + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6955 + return nr_virtfn;
6956 +}
6957 +
6958 +/**
6959 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6960 * @phba: pointer to lpfc hba data structure.
6961 * @nr_vfn: number of virtual functions to be enabled.
6962 @@ -4049,8 +4079,17 @@ int
6963 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6964 {
6965 struct pci_dev *pdev = phba->pcidev;
6966 + uint16_t max_nr_vfn;
6967 int rc;
6968
6969 + max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6970 + if (nr_vfn > max_nr_vfn) {
6971 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6972 + "3057 Requested vfs (%d) greater than "
6973 + "supported vfs (%d)", nr_vfn, max_nr_vfn);
6974 + return -EINVAL;
6975 + }
6976 +
6977 rc = pci_enable_sriov(pdev, nr_vfn);
6978 if (rc) {
6979 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6980 @@ -4516,7 +4555,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6981 }
6982 }
6983
6984 - return rc;
6985 + return 0;
6986
6987 out_free_fcp_eq_hdl:
6988 kfree(phba->sli4_hba.fcp_eq_hdl);
6989 @@ -4966,17 +5005,14 @@ out_free_mem:
6990 * @phba: pointer to lpfc hba data structure.
6991 *
6992 * This routine is invoked to post rpi header templates to the
6993 - * HBA consistent with the SLI-4 interface spec. This routine
6994 + * port for those SLI4 ports that do not support extents. This routine
6995 * posts a PAGE_SIZE memory region to the port to hold up to
6996 - * PAGE_SIZE modulo 64 rpi context headers.
6997 - * No locks are held here because this is an initialization routine
6998 - * called only from probe or lpfc_online when interrupts are not
6999 - * enabled and the driver is reinitializing the device.
7000 + * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7001 + * and should be called only when interrupts are disabled.
7002 *
7003 * Return codes
7004 * 0 - successful
7005 - * -ENOMEM - No available memory
7006 - * -EIO - The mailbox failed to complete successfully.
7007 + * -ERROR - otherwise.
7008 **/
7009 int
7010 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7011 @@ -5687,17 +5723,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7012 break;
7013 case LPFC_SLI_INTF_IF_TYPE_2:
7014 phba->sli4_hba.u.if_type2.ERR1regaddr =
7015 - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
7016 + phba->sli4_hba.conf_regs_memmap_p +
7017 + LPFC_CTL_PORT_ER1_OFFSET;
7018 phba->sli4_hba.u.if_type2.ERR2regaddr =
7019 - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
7020 + phba->sli4_hba.conf_regs_memmap_p +
7021 + LPFC_CTL_PORT_ER2_OFFSET;
7022 phba->sli4_hba.u.if_type2.CTRLregaddr =
7023 - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
7024 + phba->sli4_hba.conf_regs_memmap_p +
7025 + LPFC_CTL_PORT_CTL_OFFSET;
7026 phba->sli4_hba.u.if_type2.STATUSregaddr =
7027 - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
7028 + phba->sli4_hba.conf_regs_memmap_p +
7029 + LPFC_CTL_PORT_STA_OFFSET;
7030 phba->sli4_hba.SLIINTFregaddr =
7031 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7032 phba->sli4_hba.PSMPHRregaddr =
7033 - phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
7034 + phba->sli4_hba.conf_regs_memmap_p +
7035 + LPFC_CTL_PORT_SEM_OFFSET;
7036 phba->sli4_hba.RQDBregaddr =
7037 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
7038 phba->sli4_hba.WQDBregaddr =
7039 @@ -8859,11 +8900,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
7040 return -EINVAL;
7041 }
7042 lpfc_decode_firmware_rev(phba, fwrev, 1);
7043 - if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
7044 + if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
7045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7046 "3023 Updating Firmware. Current Version:%s "
7047 "New Version:%s\n",
7048 - fwrev, image->rev_name);
7049 + fwrev, image->revision);
7050 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
7051 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
7052 GFP_KERNEL);
7053 @@ -8892,9 +8933,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
7054 fw->size - offset);
7055 break;
7056 }
7057 - temp_offset += SLI4_PAGE_SIZE;
7058 memcpy(dmabuf->virt, fw->data + temp_offset,
7059 SLI4_PAGE_SIZE);
7060 + temp_offset += SLI4_PAGE_SIZE;
7061 }
7062 rc = lpfc_wr_object(phba, &dma_buffer_list,
7063 (fw->size - offset), &offset);
7064 @@ -9483,6 +9524,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7065 }
7066
7067 pci_restore_state(pdev);
7068 +
7069 + /*
7070 + * As the new kernel behavior of pci_restore_state() API call clears
7071 + * device saved_state flag, need to save the restored state again.
7072 + */
7073 + pci_save_state(pdev);
7074 +
7075 if (pdev->is_busmaster)
7076 pci_set_master(pdev);
7077
7078 diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
7079 index 5567670..83450cc 100644
7080 --- a/drivers/scsi/lpfc/lpfc_mbox.c
7081 +++ b/drivers/scsi/lpfc/lpfc_mbox.c
7082 @@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
7083 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
7084 bf_set(lpfc_init_vfi_vfi, init_vfi,
7085 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
7086 - bf_set(lpfc_init_vpi_vpi, init_vfi,
7087 + bf_set(lpfc_init_vfi_vpi, init_vfi,
7088 vport->phba->vpi_ids[vport->vpi]);
7089 bf_set(lpfc_init_vfi_fcfi, init_vfi,
7090 vport->phba->fcf.fcfi);
7091 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
7092 index 3ccc974..eadd241 100644
7093 --- a/drivers/scsi/lpfc/lpfc_scsi.c
7094 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
7095 @@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7096 case SCSI_PROT_NORMAL:
7097 default:
7098 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7099 - "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
7100 - scsi_get_prot_op(sc), guard_type);
7101 + "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
7102 + scsi_get_prot_op(sc));
7103 ret = 1;
7104 break;
7105
7106 }
7107 - } else if (guard_type == SHOST_DIX_GUARD_CRC) {
7108 + } else {
7109 switch (scsi_get_prot_op(sc)) {
7110 case SCSI_PROT_READ_STRIP:
7111 case SCSI_PROT_WRITE_INSERT:
7112 @@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7113
7114 case SCSI_PROT_READ_INSERT:
7115 case SCSI_PROT_WRITE_STRIP:
7116 + *txop = BG_OP_IN_CRC_OUT_NODIF;
7117 + *rxop = BG_OP_IN_NODIF_OUT_CRC;
7118 + break;
7119 +
7120 case SCSI_PROT_NORMAL:
7121 default:
7122 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7123 - "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
7124 - scsi_get_prot_op(sc), guard_type);
7125 + "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
7126 + scsi_get_prot_op(sc));
7127 ret = 1;
7128 break;
7129 }
7130 - } else {
7131 - /* unsupported format */
7132 - BUG();
7133 }
7134
7135 return ret;
7136 @@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
7137 return sc->device->sector_size;
7138 }
7139
7140 -/**
7141 - * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
7142 - * @sc: in: SCSI command
7143 - * @apptagmask: out: app tag mask
7144 - * @apptagval: out: app tag value
7145 - * @reftag: out: ref tag (reference tag)
7146 - *
7147 - * Description:
7148 - * Extract DIF parameters from the command if possible. Otherwise,
7149 - * use default parameters.
7150 - *
7151 - **/
7152 -static inline void
7153 -lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
7154 - uint16_t *apptagval, uint32_t *reftag)
7155 -{
7156 - struct scsi_dif_tuple *spt;
7157 - unsigned char op = scsi_get_prot_op(sc);
7158 - unsigned int protcnt = scsi_prot_sg_count(sc);
7159 - static int cnt;
7160 -
7161 - if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
7162 - op == SCSI_PROT_WRITE_PASS)) {
7163 -
7164 - cnt++;
7165 - spt = page_address(sg_page(scsi_prot_sglist(sc))) +
7166 - scsi_prot_sglist(sc)[0].offset;
7167 - *apptagmask = 0;
7168 - *apptagval = 0;
7169 - *reftag = cpu_to_be32(spt->ref_tag);
7170 -
7171 - } else {
7172 - /* SBC defines ref tag to be lower 32bits of LBA */
7173 - *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
7174 - *apptagmask = 0;
7175 - *apptagval = 0;
7176 - }
7177 -}
7178 -
7179 /*
7180 * This function sets up buffer list for protection groups of
7181 * type LPFC_PG_TYPE_NO_DIF
7182 @@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7183 dma_addr_t physaddr;
7184 int i = 0, num_bde = 0, status;
7185 int datadir = sc->sc_data_direction;
7186 - unsigned blksize;
7187 uint32_t reftag;
7188 - uint16_t apptagmask, apptagval;
7189 + unsigned blksize;
7190 uint8_t txop, rxop;
7191
7192 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
7193 @@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7194
7195 /* extract some info from the scsi command for pde*/
7196 blksize = lpfc_cmd_blksize(sc);
7197 - lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
7198 + reftag = scsi_get_lba(sc) & 0xffffffff;
7199
7200 /* setup PDE5 with what we have */
7201 pde5 = (struct lpfc_pde5 *) bpl;
7202 memset(pde5, 0, sizeof(struct lpfc_pde5));
7203 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
7204 - pde5->reftag = reftag;
7205
7206 /* Endianness conversion if necessary for PDE5 */
7207 pde5->word0 = cpu_to_le32(pde5->word0);
7208 - pde5->reftag = cpu_to_le32(pde5->reftag);
7209 + pde5->reftag = cpu_to_le32(reftag);
7210
7211 /* advance bpl and increment bde count */
7212 num_bde++;
7213 @@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7214 if (datadir == DMA_FROM_DEVICE) {
7215 bf_set(pde6_ce, pde6, 1);
7216 bf_set(pde6_re, pde6, 1);
7217 - bf_set(pde6_ae, pde6, 1);
7218 }
7219 bf_set(pde6_ai, pde6, 1);
7220 - bf_set(pde6_apptagval, pde6, apptagval);
7221 + bf_set(pde6_ae, pde6, 0);
7222 + bf_set(pde6_apptagval, pde6, 0);
7223
7224 /* Endianness conversion if necessary for PDE6 */
7225 pde6->word0 = cpu_to_le32(pde6->word0);
7226 @@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7227 unsigned char pgdone = 0, alldone = 0;
7228 unsigned blksize;
7229 uint32_t reftag;
7230 - uint16_t apptagmask, apptagval;
7231 uint8_t txop, rxop;
7232 int num_bde = 0;
7233
7234 @@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7235
7236 /* extract some info from the scsi command */
7237 blksize = lpfc_cmd_blksize(sc);
7238 - lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
7239 + reftag = scsi_get_lba(sc) & 0xffffffff;
7240
7241 split_offset = 0;
7242 do {
7243 @@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7244 pde5 = (struct lpfc_pde5 *) bpl;
7245 memset(pde5, 0, sizeof(struct lpfc_pde5));
7246 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
7247 - pde5->reftag = reftag;
7248
7249 /* Endianness conversion if necessary for PDE5 */
7250 pde5->word0 = cpu_to_le32(pde5->word0);
7251 - pde5->reftag = cpu_to_le32(pde5->reftag);
7252 + pde5->reftag = cpu_to_le32(reftag);
7253
7254 /* advance bpl and increment bde count */
7255 num_bde++;
7256 @@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7257 bf_set(pde6_oprx, pde6, rxop);
7258 bf_set(pde6_ce, pde6, 1);
7259 bf_set(pde6_re, pde6, 1);
7260 - bf_set(pde6_ae, pde6, 1);
7261 bf_set(pde6_ai, pde6, 1);
7262 - bf_set(pde6_apptagval, pde6, apptagval);
7263 + bf_set(pde6_ae, pde6, 0);
7264 + bf_set(pde6_apptagval, pde6, 0);
7265
7266 /* Endianness conversion if necessary for PDE6 */
7267 pde6->word0 = cpu_to_le32(pde6->word0);
7268 @@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7269 memset(pde7, 0, sizeof(struct lpfc_pde7));
7270 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
7271
7272 - pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
7273 - pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
7274 + pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
7275 + pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
7276
7277 protgrp_blks = protgroup_len / 8;
7278 protgrp_bytes = protgrp_blks * blksize;
7279 @@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
7280 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
7281 protgroup_offset += protgroup_remainder;
7282 protgrp_blks = protgroup_remainder / 8;
7283 - protgrp_bytes = protgroup_remainder * blksize;
7284 + protgrp_bytes = protgrp_blks * blksize;
7285 } else {
7286 protgroup_offset = 0;
7287 curr_prot++;
7288 @@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
7289 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
7290 /*
7291 * setup sense data descriptor 0 per SPC-4 as an information
7292 - * field, and put the failing LBA in it
7293 + * field, and put the failing LBA in it.
7294 + * This code assumes there was also a guard/app/ref tag error
7295 + * indication.
7296 */
7297 - cmd->sense_buffer[8] = 0; /* Information */
7298 - cmd->sense_buffer[9] = 0xa; /* Add. length */
7299 + cmd->sense_buffer[7] = 0xc; /* Additional sense length */
7300 + cmd->sense_buffer[8] = 0; /* Information descriptor type */
7301 + cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
7302 + cmd->sense_buffer[10] = 0x80; /* Validity bit */
7303 bghm /= cmd->device->sector_size;
7304
7305 failing_sector = scsi_get_lba(cmd);
7306 failing_sector += bghm;
7307
7308 - put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
7309 + /* Descriptor Information */
7310 + put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
7311 }
7312
7313 if (!ret) {
7314 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
7315 index 98999bb..5b28ea1 100644
7316 --- a/drivers/scsi/lpfc/lpfc_sli.c
7317 +++ b/drivers/scsi/lpfc/lpfc_sli.c
7318 @@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
7319 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
7320 if (rrq) {
7321 rrq->send_rrq = send_rrq;
7322 - rrq->xritag = phba->sli4_hba.xri_ids[xritag];
7323 + rrq->xritag = xritag;
7324 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
7325 rrq->ndlp = ndlp;
7326 rrq->nlp_DID = ndlp->nlp_DID;
7327 @@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7328
7329 /* search continue save q for same XRI */
7330 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
7331 - if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
7332 + if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
7333 + saveq->iocb.unsli3.rcvsli3.ox_id) {
7334 list_add_tail(&saveq->list, &iocbq->list);
7335 found = 1;
7336 break;
7337 @@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
7338 irspiocbq);
7339 break;
7340 case CQE_CODE_RECEIVE:
7341 + case CQE_CODE_RECEIVE_V1:
7342 dmabuf = container_of(cq_event, struct hbq_dmabuf,
7343 cq_event);
7344 lpfc_sli4_handle_received_buffer(phba, dmabuf);
7345 @@ -5837,6 +5839,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7346 "Advanced Error Reporting (AER)\n");
7347 phba->cfg_aer_support = 0;
7348 }
7349 + rc = 0;
7350 }
7351
7352 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7353 @@ -7318,12 +7321,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7354 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7355 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7356 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7357 - break;
7358 + break;
7359 case CMD_XMIT_SEQUENCE64_CX:
7360 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7361 iocbq->iocb.un.ulpWord[3]);
7362 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7363 - iocbq->iocb.ulpContext);
7364 + iocbq->iocb.unsli3.rcvsli3.ox_id);
7365 /* The entire sequence is transmitted for this IOCB */
7366 xmit_len = total_len;
7367 cmnd = CMD_XMIT_SEQUENCE64_CR;
7368 @@ -7341,7 +7344,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7369 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
7370 wqe->xmit_sequence.xmit_len = xmit_len;
7371 command_type = OTHER_COMMAND;
7372 - break;
7373 + break;
7374 case CMD_XMIT_BCAST64_CN:
7375 /* word3 iocb=iotag32 wqe=seq_payload_len */
7376 wqe->xmit_bcast64.seq_payload_len = xmit_len;
7377 @@ -7355,7 +7358,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7378 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7379 LPFC_WQE_LENLOC_WORD3);
7380 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7381 - break;
7382 + break;
7383 case CMD_FCP_IWRITE64_CR:
7384 command_type = FCP_COMMAND_DATA_OUT;
7385 /* word3 iocb=iotag wqe=payload_offset_len */
7386 @@ -7375,7 +7378,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7387 LPFC_WQE_LENLOC_WORD4);
7388 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7389 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7390 - break;
7391 + break;
7392 case CMD_FCP_IREAD64_CR:
7393 /* word3 iocb=iotag wqe=payload_offset_len */
7394 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7395 @@ -7394,7 +7397,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7396 LPFC_WQE_LENLOC_WORD4);
7397 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7398 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7399 - break;
7400 + break;
7401 case CMD_FCP_ICMND64_CR:
7402 /* word3 iocb=IO_TAG wqe=reserved */
7403 wqe->fcp_icmd.rsrvd3 = 0;
7404 @@ -7407,7 +7410,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7405 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7406 LPFC_WQE_LENLOC_NONE);
7407 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7408 - break;
7409 + break;
7410 case CMD_GEN_REQUEST64_CR:
7411 /* For this command calculate the xmit length of the
7412 * request bde.
7413 @@ -7442,7 +7445,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7414 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7415 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
7416 command_type = OTHER_COMMAND;
7417 - break;
7418 + break;
7419 case CMD_XMIT_ELS_RSP64_CX:
7420 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7421 /* words0-2 BDE memcpy */
7422 @@ -7457,7 +7460,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7423 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7424 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7425 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7426 - iocbq->iocb.ulpContext);
7427 + iocbq->iocb.unsli3.rcvsli3.ox_id);
7428 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
7429 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7430 phba->vpi_ids[iocbq->vport->vpi]);
7431 @@ -7470,7 +7473,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7432 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7433 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7434 command_type = OTHER_COMMAND;
7435 - break;
7436 + break;
7437 case CMD_CLOSE_XRI_CN:
7438 case CMD_ABORT_XRI_CN:
7439 case CMD_ABORT_XRI_CX:
7440 @@ -7509,7 +7512,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7441 cmnd = CMD_ABORT_XRI_CX;
7442 command_type = OTHER_COMMAND;
7443 xritag = 0;
7444 - break;
7445 + break;
7446 case CMD_XMIT_BLS_RSP64_CX:
7447 /* As BLS ABTS RSP WQE is very different from other WQEs,
7448 * we re-construct this WQE here based on information in
7449 @@ -7553,7 +7556,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7450 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7451 }
7452
7453 - break;
7454 + break;
7455 case CMD_XRI_ABORTED_CX:
7456 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
7457 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
7458 @@ -7565,7 +7568,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7459 "2014 Invalid command 0x%x\n",
7460 iocbq->iocb.ulpCommand);
7461 return IOCB_ERROR;
7462 - break;
7463 + break;
7464 }
7465
7466 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
7467 @@ -10481,10 +10484,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
7468 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
7469 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
7470 struct hbq_dmabuf *dma_buf;
7471 - uint32_t status;
7472 + uint32_t status, rq_id;
7473 unsigned long iflags;
7474
7475 - if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
7476 + if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
7477 + rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
7478 + else
7479 + rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
7480 + if (rq_id != hrq->queue_id)
7481 goto out;
7482
7483 status = bf_get(lpfc_rcqe_status, rcqe);
7484 @@ -10563,6 +10570,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
7485 (struct sli4_wcqe_xri_aborted *)&cqevt);
7486 break;
7487 case CQE_CODE_RECEIVE:
7488 + case CQE_CODE_RECEIVE_V1:
7489 /* Process the RQ event */
7490 phba->last_completion_time = jiffies;
7491 workposted = lpfc_sli4_sp_handle_rcqe(phba,
7492 @@ -12345,19 +12353,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
7493 }
7494
7495 /**
7496 - * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7497 + * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
7498 * @phba: pointer to lpfc hba data structure.
7499 *
7500 * This routine is invoked to post rpi header templates to the
7501 - * port for those SLI4 ports that do not support extents. This routine
7502 - * posts a PAGE_SIZE memory region to the port to hold up to
7503 - * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7504 - * and should be called only when interrupts are disabled.
7505 + * HBA consistent with the SLI-4 interface spec. This routine
7506 + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
7507 + * SLI4_PAGE_SIZE modulo 64 rpi context headers.
7508 *
7509 - * Return codes
7510 - * 0 - successful
7511 - * -ERROR - otherwise.
7512 - */
7513 + * Returns
7514 + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
7515 + * LPFC_RPI_ALLOC_ERROR if no rpis are available.
7516 + **/
7517 uint16_t
7518 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
7519 {
7520 @@ -13406,7 +13413,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
7521 * This function validates the xri maps to the known range of XRIs allocated an
7522 * used by the driver.
7523 **/
7524 -static uint16_t
7525 +uint16_t
7526 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
7527 uint16_t xri)
7528 {
7529 @@ -13643,10 +13650,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
7530 static struct lpfc_iocbq *
7531 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7532 {
7533 + struct hbq_dmabuf *hbq_buf;
7534 struct lpfc_dmabuf *d_buf, *n_buf;
7535 struct lpfc_iocbq *first_iocbq, *iocbq;
7536 struct fc_frame_header *fc_hdr;
7537 uint32_t sid;
7538 + uint32_t len, tot_len;
7539 struct ulp_bde64 *pbde;
7540
7541 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
7542 @@ -13655,6 +13664,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7543 lpfc_update_rcv_time_stamp(vport);
7544 /* get the Remote Port's SID */
7545 sid = sli4_sid_from_fc_hdr(fc_hdr);
7546 + tot_len = 0;
7547 /* Get an iocbq struct to fill in. */
7548 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
7549 if (first_iocbq) {
7550 @@ -13662,9 +13672,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7551 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
7552 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
7553 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
7554 - first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
7555 - /* iocbq is prepped for internal consumption. Logical vpi. */
7556 - first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
7557 + first_iocbq->iocb.ulpContext = NO_XRI;
7558 + first_iocbq->iocb.unsli3.rcvsli3.ox_id =
7559 + be16_to_cpu(fc_hdr->fh_ox_id);
7560 + /* iocbq is prepped for internal consumption. Physical vpi. */
7561 + first_iocbq->iocb.unsli3.rcvsli3.vpi =
7562 + vport->phba->vpi_ids[vport->vpi];
7563 /* put the first buffer into the first IOCBq */
7564 first_iocbq->context2 = &seq_dmabuf->dbuf;
7565 first_iocbq->context3 = NULL;
7566 @@ -13672,9 +13685,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7567 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
7568 LPFC_DATA_BUF_SIZE;
7569 first_iocbq->iocb.un.rcvels.remoteID = sid;
7570 - first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
7571 - bf_get(lpfc_rcqe_length,
7572 + tot_len = bf_get(lpfc_rcqe_length,
7573 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7574 + first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
7575 }
7576 iocbq = first_iocbq;
7577 /*
7578 @@ -13692,9 +13705,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7579 pbde = (struct ulp_bde64 *)
7580 &iocbq->iocb.unsli3.sli3Words[4];
7581 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
7582 - first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
7583 - bf_get(lpfc_rcqe_length,
7584 - &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7585 +
7586 + /* We need to get the size out of the right CQE */
7587 + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
7588 + len = bf_get(lpfc_rcqe_length,
7589 + &hbq_buf->cq_event.cqe.rcqe_cmpl);
7590 + iocbq->iocb.unsli3.rcvsli3.acc_len += len;
7591 + tot_len += len;
7592 } else {
7593 iocbq = lpfc_sli_get_iocbq(vport->phba);
7594 if (!iocbq) {
7595 @@ -13712,9 +13729,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
7596 iocbq->iocb.ulpBdeCount = 1;
7597 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
7598 LPFC_DATA_BUF_SIZE;
7599 - first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
7600 - bf_get(lpfc_rcqe_length,
7601 - &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
7602 +
7603 + /* We need to get the size out of the right CQE */
7604 + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
7605 + len = bf_get(lpfc_rcqe_length,
7606 + &hbq_buf->cq_event.cqe.rcqe_cmpl);
7607 + tot_len += len;
7608 + iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
7609 +
7610 iocbq->iocb.un.rcvels.remoteID = sid;
7611 list_add_tail(&iocbq->list, &first_iocbq->list);
7612 }
7613 @@ -13787,7 +13809,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
7614 lpfc_in_buf_free(phba, &dmabuf->dbuf);
7615 return;
7616 }
7617 - fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
7618 + if ((bf_get(lpfc_cqe_code,
7619 + &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
7620 + fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
7621 + &dmabuf->cq_event.cqe.rcqe_cmpl);
7622 + else
7623 + fcfi = bf_get(lpfc_rcqe_fcf_id,
7624 + &dmabuf->cq_event.cqe.rcqe_cmpl);
7625 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
7626 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
7627 /* throw out the frame */
7628 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
7629 index 4b17035..88387c1 100644
7630 --- a/drivers/scsi/lpfc/lpfc_sli4.h
7631 +++ b/drivers/scsi/lpfc/lpfc_sli4.h
7632 @@ -81,6 +81,8 @@
7633 (fc_hdr)->fh_f_ctl[1] << 8 | \
7634 (fc_hdr)->fh_f_ctl[2])
7635
7636 +#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
7637 +
7638 enum lpfc_sli4_queue_type {
7639 LPFC_EQ,
7640 LPFC_GCQ,
7641 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
7642 index 1da606c..83035bd 100644
7643 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
7644 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
7645 @@ -1740,9 +1740,11 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
7646 static void
7647 _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
7648 {
7649 - if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
7650 - ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
7651 + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
7652 + return;
7653
7654 + switch (ioc->pdev->device) {
7655 + case MPI2_MFGPAGE_DEVID_SAS2008:
7656 switch (ioc->pdev->subsystem_device) {
7657 case MPT2SAS_INTEL_RMS2LL080_SSDID:
7658 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
7659 @@ -1752,7 +1754,20 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
7660 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
7661 MPT2SAS_INTEL_RMS2LL040_BRANDING);
7662 break;
7663 + default:
7664 + break;
7665 }
7666 + case MPI2_MFGPAGE_DEVID_SAS2308_2:
7667 + switch (ioc->pdev->subsystem_device) {
7668 + case MPT2SAS_INTEL_RS25GB008_SSDID:
7669 + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
7670 + MPT2SAS_INTEL_RS25GB008_BRANDING);
7671 + break;
7672 + default:
7673 + break;
7674 + }
7675 + default:
7676 + break;
7677 }
7678 }
7679
7680 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
7681 index 451dc1c..41a57a7 100644
7682 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h
7683 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
7684 @@ -161,12 +161,15 @@
7685 "Intel Integrated RAID Module RMS2LL080"
7686 #define MPT2SAS_INTEL_RMS2LL040_BRANDING \
7687 "Intel Integrated RAID Module RMS2LL040"
7688 +#define MPT2SAS_INTEL_RS25GB008_BRANDING \
7689 + "Intel(R) RAID Controller RS25GB008"
7690
7691 /*
7692 * Intel HBA SSDIDs
7693 */
7694 #define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
7695 #define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
7696 +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
7697
7698
7699 /*
7700 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
7701 index e327a3c..8dc2ad4 100644
7702 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
7703 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
7704 @@ -3698,7 +3698,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
7705 return 0;
7706 }
7707
7708 - if (ioc->pci_error_recovery) {
7709 + if (ioc->pci_error_recovery || ioc->remove_host) {
7710 scmd->result = DID_NO_CONNECT << 16;
7711 scmd->scsi_done(scmd);
7712 return 0;
7713 @@ -7211,7 +7211,6 @@ _scsih_remove(struct pci_dev *pdev)
7714 }
7715
7716 sas_remove_host(shost);
7717 - _scsih_shutdown(pdev);
7718 list_del(&ioc->list);
7719 scsi_remove_host(shost);
7720 scsi_host_put(shost);
7721 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7722 index 920b76b..b2df2f9 100644
7723 --- a/drivers/scsi/qla2xxx/qla_init.c
7724 +++ b/drivers/scsi/qla2xxx/qla_init.c
7725 @@ -3822,15 +3822,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
7726 req = vha->req;
7727 rsp = req->rsp;
7728
7729 - atomic_set(&vha->loop_state, LOOP_UPDATE);
7730 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7731 if (vha->flags.online) {
7732 if (!(rval = qla2x00_fw_ready(vha))) {
7733 /* Wait at most MAX_TARGET RSCNs for a stable link. */
7734 wait_time = 256;
7735 do {
7736 - atomic_set(&vha->loop_state, LOOP_UPDATE);
7737 -
7738 /* Issue a marker after FW becomes ready. */
7739 qla2x00_marker(vha, req, rsp, 0, 0,
7740 MK_SYNC_ALL);
7741 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
7742 index 1b60a95..e0fa877 100644
7743 --- a/drivers/scsi/qla2xxx/qla_isr.c
7744 +++ b/drivers/scsi/qla2xxx/qla_isr.c
7745 @@ -736,7 +736,6 @@ skip_rio:
7746 vha->flags.rscn_queue_overflow = 1;
7747 }
7748
7749 - atomic_set(&vha->loop_state, LOOP_UPDATE);
7750 atomic_set(&vha->loop_down_timer, 0);
7751 vha->flags.management_server_logged_in = 0;
7752
7753 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
7754 index 98b6e3b..e809e9d 100644
7755 --- a/drivers/tty/pty.c
7756 +++ b/drivers/tty/pty.c
7757 @@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }
7758 int pty_limit = NR_UNIX98_PTY_DEFAULT;
7759 static int pty_limit_min;
7760 static int pty_limit_max = NR_UNIX98_PTY_MAX;
7761 +static int tty_count;
7762 static int pty_count;
7763
7764 +static inline void pty_inc_count(void)
7765 +{
7766 + pty_count = (++tty_count) / 2;
7767 +}
7768 +
7769 +static inline void pty_dec_count(void)
7770 +{
7771 + pty_count = (--tty_count) / 2;
7772 +}
7773 +
7774 static struct cdev ptmx_cdev;
7775
7776 static struct ctl_table pty_table[] = {
7777 @@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
7778
7779 static void pty_unix98_shutdown(struct tty_struct *tty)
7780 {
7781 + tty_driver_remove_tty(tty->driver, tty);
7782 /* We have our own method as we don't use the tty index */
7783 kfree(tty->termios);
7784 }
7785 @@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
7786 */
7787 tty_driver_kref_get(driver);
7788 tty->count++;
7789 - pty_count++;
7790 + pty_inc_count(); /* tty */
7791 + pty_inc_count(); /* tty->link */
7792 return 0;
7793 err_free_mem:
7794 deinitialize_tty_struct(o_tty);
7795 @@ -602,7 +615,7 @@ err_free_tty:
7796
7797 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
7798 {
7799 - pty_count--;
7800 + pty_dec_count();
7801 }
7802
7803 static const struct tty_operations ptm_unix98_ops = {
7804 diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
7805 index d32b5bb..762ce72 100644
7806 --- a/drivers/tty/serial/8250.c
7807 +++ b/drivers/tty/serial/8250.c
7808 @@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)
7809 unsigned int iir, ier = 0, lsr;
7810 unsigned long flags;
7811
7812 + spin_lock_irqsave(&up->port.lock, flags);
7813 +
7814 /*
7815 * Must disable interrupts or else we risk racing with the interrupt
7816 * based handler.
7817 @@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)
7818 * the "Diva" UART used on the management processor on many HP
7819 * ia64 and parisc boxes.
7820 */
7821 - spin_lock_irqsave(&up->port.lock, flags);
7822 lsr = serial_in(up, UART_LSR);
7823 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
7824 - spin_unlock_irqrestore(&up->port.lock, flags);
7825 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
7826 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
7827 (lsr & UART_LSR_THRE)) {
7828 @@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)
7829 }
7830
7831 if (!(iir & UART_IIR_NO_INT))
7832 - serial8250_handle_port(up);
7833 + transmit_chars(up);
7834
7835 if (is_real_interrupt(up->port.irq))
7836 serial_out(up, UART_IER, ier);
7837
7838 + spin_unlock_irqrestore(&up->port.lock, flags);
7839 +
7840 /* Standard timer interval plus 0.2s to keep the port running */
7841 mod_timer(&up->timer,
7842 jiffies + uart_poll_timeout(&up->port) + HZ / 5);
7843 diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
7844 index f41b425..ff48fdb 100644
7845 --- a/drivers/tty/serial/8250_pci.c
7846 +++ b/drivers/tty/serial/8250_pci.c
7847 @@ -3886,7 +3886,7 @@ static struct pci_device_id serial_pci_tbl[] = {
7848 0, 0, pbn_b0_1_115200 },
7849
7850 /*
7851 - * Best Connectivity PCI Multi I/O cards
7852 + * Best Connectivity and Rosewill PCI Multi I/O cards
7853 */
7854
7855 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
7856 @@ -3894,6 +3894,10 @@ static struct pci_device_id serial_pci_tbl[] = {
7857 0, 0, pbn_b0_1_115200 },
7858
7859 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
7860 + 0xA000, 0x3002,
7861 + 0, 0, pbn_b0_bt_2_115200 },
7862 +
7863 + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
7864 0xA000, 0x3004,
7865 0, 0, pbn_b0_bt_4_115200 },
7866 /* Intel CE4100 */
7867 diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
7868 index fc301f6..a2f2365 100644
7869 --- a/drivers/tty/serial/8250_pnp.c
7870 +++ b/drivers/tty/serial/8250_pnp.c
7871 @@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {
7872 /* IBM */
7873 /* IBM Thinkpad 701 Internal Modem Voice */
7874 { "IBM0033", 0 },
7875 + /* Intermec */
7876 + /* Intermec CV60 touchscreen port */
7877 + { "PNP4972", 0 },
7878 /* Intertex */
7879 /* Intertex 28k8 33k6 Voice EXT PnP */
7880 { "IXDC801", 0 },
7881 diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
7882 index a1fe304..d73aadd 100644
7883 --- a/drivers/tty/serial/max3107-aava.c
7884 +++ b/drivers/tty/serial/max3107-aava.c
7885 @@ -340,5 +340,5 @@ module_exit(max3107_exit);
7886
7887 MODULE_DESCRIPTION("MAX3107 driver");
7888 MODULE_AUTHOR("Aavamobile");
7889 -MODULE_ALIAS("aava-max3107-spi");
7890 +MODULE_ALIAS("spi:aava-max3107");
7891 MODULE_LICENSE("GPL v2");
7892 diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
7893 index 750b4f6..a816460 100644
7894 --- a/drivers/tty/serial/max3107.c
7895 +++ b/drivers/tty/serial/max3107.c
7896 @@ -1209,5 +1209,5 @@ module_exit(max3107_exit);
7897
7898 MODULE_DESCRIPTION("MAX3107 driver");
7899 MODULE_AUTHOR("Aavamobile");
7900 -MODULE_ALIAS("max3107-spi");
7901 +MODULE_ALIAS("spi:max3107");
7902 MODULE_LICENSE("GPL v2");
7903 diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
7904 index a764bf9..23bc743 100644
7905 --- a/drivers/tty/serial/mrst_max3110.c
7906 +++ b/drivers/tty/serial/mrst_max3110.c
7907 @@ -917,4 +917,4 @@ module_init(serial_m3110_init);
7908 module_exit(serial_m3110_exit);
7909
7910 MODULE_LICENSE("GPL v2");
7911 -MODULE_ALIAS("max3110-uart");
7912 +MODULE_ALIAS("spi:max3110-uart");
7913 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
7914 index 47cadf4..6d3ec14 100644
7915 --- a/drivers/tty/serial/omap-serial.c
7916 +++ b/drivers/tty/serial/omap-serial.c
7917 @@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
7918
7919 serial_omap_set_mctrl(&up->port, up->port.mctrl);
7920 /* Software Flow Control Configuration */
7921 - if (termios->c_iflag & (IXON | IXOFF))
7922 - serial_omap_configure_xonxoff(up, termios);
7923 + serial_omap_configure_xonxoff(up, termios);
7924
7925 spin_unlock_irqrestore(&up->port.lock, flags);
7926 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
7927 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
7928 index 6556f74..b6f92d3 100644
7929 --- a/drivers/tty/tty_io.c
7930 +++ b/drivers/tty/tty_io.c
7931 @@ -1294,8 +1294,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
7932 *
7933 * Locking: tty_mutex for now
7934 */
7935 -static void tty_driver_remove_tty(struct tty_driver *driver,
7936 - struct tty_struct *tty)
7937 +void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
7938 {
7939 if (driver->ops->remove)
7940 driver->ops->remove(driver, tty);
7941 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
7942 index 88cfb8f..0f3a724 100644
7943 --- a/drivers/usb/host/ehci-hub.c
7944 +++ b/drivers/usb/host/ehci-hub.c
7945 @@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
7946 u32 temp;
7947 u32 power_okay;
7948 int i;
7949 - u8 resume_needed = 0;
7950 + unsigned long resume_needed = 0;
7951
7952 if (time_before (jiffies, ehci->next_statechange))
7953 msleep(5);
7954 @@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
7955 if (test_bit(i, &ehci->bus_suspended) &&
7956 (temp & PORT_SUSPEND)) {
7957 temp |= PORT_RESUME;
7958 - resume_needed = 1;
7959 + set_bit(i, &resume_needed);
7960 }
7961 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
7962 }
7963 @@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
7964 i = HCS_N_PORTS (ehci->hcs_params);
7965 while (i--) {
7966 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
7967 - if (test_bit(i, &ehci->bus_suspended) &&
7968 - (temp & PORT_SUSPEND)) {
7969 + if (test_bit(i, &resume_needed)) {
7970 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
7971 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
7972 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
7973 diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
7974 index e3374c8..491a209 100644
7975 --- a/drivers/usb/host/ehci-s5p.c
7976 +++ b/drivers/usb/host/ehci-s5p.c
7977 @@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
7978 goto fail_hcd;
7979 }
7980
7981 + s5p_ehci->hcd = hcd;
7982 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
7983
7984 if (IS_ERR(s5p_ehci->clk)) {
7985 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
7986 index e9f004e..629a968 100644
7987 --- a/drivers/usb/host/pci-quirks.c
7988 +++ b/drivers/usb/host/pci-quirks.c
7989 @@ -535,20 +535,34 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
7990 iounmap(base);
7991 }
7992
7993 +static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
7994 + {
7995 + /* Pegatron Lucid (ExoPC) */
7996 + .matches = {
7997 + DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
7998 + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
7999 + },
8000 + },
8001 + {
8002 + /* Pegatron Lucid (Ordissimo AIRIS) */
8003 + .matches = {
8004 + DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
8005 + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
8006 + },
8007 + },
8008 + { }
8009 +};
8010 +
8011 static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
8012 void __iomem *op_reg_base,
8013 u32 cap, u8 offset)
8014 {
8015 int try_handoff = 1, tried_handoff = 0;
8016
8017 - /* The Pegatron Lucid (ExoPC) tablet sporadically waits for 90
8018 - * seconds trying the handoff on its unused controller. Skip
8019 - * it. */
8020 + /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
8021 + * the handoff on its unused controller. Skip it. */
8022 if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
8023 - const char *dmi_bn = dmi_get_system_info(DMI_BOARD_NAME);
8024 - const char *dmi_bv = dmi_get_system_info(DMI_BIOS_VERSION);
8025 - if (dmi_bn && !strcmp(dmi_bn, "EXOPG06411") &&
8026 - dmi_bv && !strcmp(dmi_bv, "Lucid-CE-133"))
8027 + if (dmi_check_system(ehci_dmi_nohandoff_table))
8028 try_handoff = 0;
8029 }
8030
8031 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
8032 index 0be788c..723f823 100644
8033 --- a/drivers/usb/host/xhci-hub.c
8034 +++ b/drivers/usb/host/xhci-hub.c
8035 @@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
8036 && (temp & PORT_POWER))
8037 status |= USB_PORT_STAT_SUSPEND;
8038 }
8039 - if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
8040 + if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
8041 + !DEV_SUPERSPEED(temp)) {
8042 if ((temp & PORT_RESET) || !(temp & PORT_PE))
8043 goto error;
8044 - if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies,
8045 - bus_state->resume_done[wIndex])) {
8046 + if (time_after_eq(jiffies,
8047 + bus_state->resume_done[wIndex])) {
8048 xhci_dbg(xhci, "Resume USB2 port %d\n",
8049 wIndex + 1);
8050 bus_state->resume_done[wIndex] = 0;
8051 @@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
8052 xhci_ring_device(xhci, slot_id);
8053 bus_state->port_c_suspend |= 1 << wIndex;
8054 bus_state->suspended_ports &= ~(1 << wIndex);
8055 + } else {
8056 + /*
8057 + * The resume has been signaling for less than
8058 + * 20ms. Report the port status as SUSPEND,
8059 + * let the usbcore check port status again
8060 + * and clear resume signaling later.
8061 + */
8062 + status |= USB_PORT_STAT_SUSPEND;
8063 }
8064 }
8065 if ((temp & PORT_PLS_MASK) == XDEV_U0
8066 @@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
8067 xhci_dbg(xhci, "PORTSC %04x\n", temp);
8068 if (temp & PORT_RESET)
8069 goto error;
8070 - if (temp & XDEV_U3) {
8071 + if ((temp & PORT_PLS_MASK) == XDEV_U3) {
8072 if ((temp & PORT_PE) == 0)
8073 goto error;
8074
8075 @@ -752,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
8076 memset(buf, 0, retval);
8077 status = 0;
8078
8079 - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
8080 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
8081
8082 spin_lock_irqsave(&xhci->lock, flags);
8083 /* For each port, did anything change? If so, set that bit in buf. */
8084 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
8085 index 70cacbb..d0871ea 100644
8086 --- a/drivers/usb/host/xhci-ring.c
8087 +++ b/drivers/usb/host/xhci-ring.c
8088 @@ -516,8 +516,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
8089 (unsigned long long) addr);
8090 }
8091
8092 +/* flip_cycle means flip the cycle bit of all but the first and last TRB.
8093 + * (The last TRB actually points to the ring enqueue pointer, which is not part
8094 + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
8095 + */
8096 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
8097 - struct xhci_td *cur_td)
8098 + struct xhci_td *cur_td, bool flip_cycle)
8099 {
8100 struct xhci_segment *cur_seg;
8101 union xhci_trb *cur_trb;
8102 @@ -531,6 +535,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
8103 * leave the pointers intact.
8104 */
8105 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
8106 + /* Flip the cycle bit (link TRBs can't be the first
8107 + * or last TRB).
8108 + */
8109 + if (flip_cycle)
8110 + cur_trb->generic.field[3] ^=
8111 + cpu_to_le32(TRB_CYCLE);
8112 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
8113 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
8114 "in seg %p (0x%llx dma)\n",
8115 @@ -544,6 +554,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
8116 cur_trb->generic.field[2] = 0;
8117 /* Preserve only the cycle bit of this TRB */
8118 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
8119 + /* Flip the cycle bit except on the first or last TRB */
8120 + if (flip_cycle && cur_trb != cur_td->first_trb &&
8121 + cur_trb != cur_td->last_trb)
8122 + cur_trb->generic.field[3] ^=
8123 + cpu_to_le32(TRB_CYCLE);
8124 cur_trb->generic.field[3] |= cpu_to_le32(
8125 TRB_TYPE(TRB_TR_NOOP));
8126 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
8127 @@ -722,14 +737,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
8128 cur_td->urb->stream_id,
8129 cur_td, &deq_state);
8130 else
8131 - td_to_noop(xhci, ep_ring, cur_td);
8132 + td_to_noop(xhci, ep_ring, cur_td, false);
8133 remove_finished_td:
8134 /*
8135 * The event handler won't see a completion for this TD anymore,
8136 * so remove it from the endpoint ring's TD list. Keep it in
8137 * the cancelled TD list for URB completion later.
8138 */
8139 - list_del(&cur_td->td_list);
8140 + list_del_init(&cur_td->td_list);
8141 }
8142 last_unlinked_td = cur_td;
8143 xhci_stop_watchdog_timer_in_irq(xhci, ep);
8144 @@ -757,7 +772,7 @@ remove_finished_td:
8145 do {
8146 cur_td = list_entry(ep->cancelled_td_list.next,
8147 struct xhci_td, cancelled_td_list);
8148 - list_del(&cur_td->cancelled_td_list);
8149 + list_del_init(&cur_td->cancelled_td_list);
8150
8151 /* Clean up the cancelled URB */
8152 /* Doesn't matter what we pass for status, since the core will
8153 @@ -865,9 +880,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
8154 cur_td = list_first_entry(&ring->td_list,
8155 struct xhci_td,
8156 td_list);
8157 - list_del(&cur_td->td_list);
8158 + list_del_init(&cur_td->td_list);
8159 if (!list_empty(&cur_td->cancelled_td_list))
8160 - list_del(&cur_td->cancelled_td_list);
8161 + list_del_init(&cur_td->cancelled_td_list);
8162 xhci_giveback_urb_in_irq(xhci, cur_td,
8163 -ESHUTDOWN, "killed");
8164 }
8165 @@ -876,7 +891,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
8166 &temp_ep->cancelled_td_list,
8167 struct xhci_td,
8168 cancelled_td_list);
8169 - list_del(&cur_td->cancelled_td_list);
8170 + list_del_init(&cur_td->cancelled_td_list);
8171 xhci_giveback_urb_in_irq(xhci, cur_td,
8172 -ESHUTDOWN, "killed");
8173 }
8174 @@ -1567,10 +1582,10 @@ td_cleanup:
8175 else
8176 *status = 0;
8177 }
8178 - list_del(&td->td_list);
8179 + list_del_init(&td->td_list);
8180 /* Was this TD slated to be cancelled but completed anyway? */
8181 if (!list_empty(&td->cancelled_td_list))
8182 - list_del(&td->cancelled_td_list);
8183 + list_del_init(&td->cancelled_td_list);
8184
8185 urb_priv->td_cnt++;
8186 /* Giveback the urb when all the tds are completed */
8187 @@ -2508,11 +2523,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
8188
8189 if (td_index == 0) {
8190 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
8191 - if (unlikely(ret)) {
8192 - xhci_urb_free_priv(xhci, urb_priv);
8193 - urb->hcpriv = NULL;
8194 + if (unlikely(ret))
8195 return ret;
8196 - }
8197 }
8198
8199 td->urb = urb;
8200 @@ -2680,6 +2692,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
8201 {
8202 int packets_transferred;
8203
8204 + /* One TRB with a zero-length data packet. */
8205 + if (running_total == 0 && trb_buff_len == 0)
8206 + return 0;
8207 +
8208 /* All the TRB queueing functions don't count the current TRB in
8209 * running_total.
8210 */
8211 @@ -3121,20 +3137,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
8212 struct urb *urb, int i)
8213 {
8214 int num_trbs = 0;
8215 - u64 addr, td_len, running_total;
8216 + u64 addr, td_len;
8217
8218 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
8219 td_len = urb->iso_frame_desc[i].length;
8220
8221 - running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
8222 - running_total &= TRB_MAX_BUFF_SIZE - 1;
8223 - if (running_total != 0)
8224 - num_trbs++;
8225 -
8226 - while (running_total < td_len) {
8227 + num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
8228 + TRB_MAX_BUFF_SIZE);
8229 + if (num_trbs == 0)
8230 num_trbs++;
8231 - running_total += TRB_MAX_BUFF_SIZE;
8232 - }
8233
8234 return num_trbs;
8235 }
8236 @@ -3234,6 +3245,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8237 start_trb = &ep_ring->enqueue->generic;
8238 start_cycle = ep_ring->cycle_state;
8239
8240 + urb_priv = urb->hcpriv;
8241 /* Queue the first TRB, even if it's zero-length */
8242 for (i = 0; i < num_tds; i++) {
8243 unsigned int total_packet_count;
8244 @@ -3245,9 +3257,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8245 addr = start_addr + urb->iso_frame_desc[i].offset;
8246 td_len = urb->iso_frame_desc[i].length;
8247 td_remain_len = td_len;
8248 - /* FIXME: Ignoring zero-length packets, can those happen? */
8249 total_packet_count = roundup(td_len,
8250 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
8251 + /* A zero-length transfer still involves at least one packet. */
8252 + if (total_packet_count == 0)
8253 + total_packet_count++;
8254 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
8255 total_packet_count);
8256 residue = xhci_get_last_burst_packet_count(xhci,
8257 @@ -3257,12 +3271,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8258
8259 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
8260 urb->stream_id, trbs_per_td, urb, i, mem_flags);
8261 - if (ret < 0)
8262 - return ret;
8263 + if (ret < 0) {
8264 + if (i == 0)
8265 + return ret;
8266 + goto cleanup;
8267 + }
8268
8269 - urb_priv = urb->hcpriv;
8270 td = urb_priv->td[i];
8271 -
8272 for (j = 0; j < trbs_per_td; j++) {
8273 u32 remainder = 0;
8274 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
8275 @@ -3352,6 +3367,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8276 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
8277 start_cycle, start_trb);
8278 return 0;
8279 +cleanup:
8280 + /* Clean up a partially enqueued isoc transfer. */
8281 +
8282 + for (i--; i >= 0; i--)
8283 + list_del_init(&urb_priv->td[i]->td_list);
8284 +
8285 + /* Use the first TD as a temporary variable to turn the TDs we've queued
8286 + * into No-ops with a software-owned cycle bit. That way the hardware
8287 + * won't accidentally start executing bogus TDs when we partially
8288 + * overwrite them. td->first_trb and td->start_seg are already set.
8289 + */
8290 + urb_priv->td[0]->last_trb = ep_ring->enqueue;
8291 + /* Every TRB except the first & last will have its cycle bit flipped. */
8292 + td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
8293 +
8294 + /* Reset the ring enqueue back to the first TRB and its cycle bit. */
8295 + ep_ring->enqueue = urb_priv->td[0]->first_trb;
8296 + ep_ring->enq_seg = urb_priv->td[0]->start_seg;
8297 + ep_ring->cycle_state = start_cycle;
8298 + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
8299 + return ret;
8300 }
8301
8302 /*
8303 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
8304 index 9824761..7ea48b3 100644
8305 --- a/drivers/usb/host/xhci.c
8306 +++ b/drivers/usb/host/xhci.c
8307 @@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
8308 if (urb->dev->speed == USB_SPEED_FULL) {
8309 ret = xhci_check_maxpacket(xhci, slot_id,
8310 ep_index, urb);
8311 - if (ret < 0)
8312 + if (ret < 0) {
8313 + xhci_urb_free_priv(xhci, urb_priv);
8314 + urb->hcpriv = NULL;
8315 return ret;
8316 + }
8317 }
8318
8319 /* We have a spinlock and interrupts disabled, so we must pass
8320 @@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
8321 goto dying;
8322 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
8323 slot_id, ep_index);
8324 + if (ret)
8325 + goto free_priv;
8326 spin_unlock_irqrestore(&xhci->lock, flags);
8327 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
8328 spin_lock_irqsave(&xhci->lock, flags);
8329 @@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
8330 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
8331 slot_id, ep_index);
8332 }
8333 + if (ret)
8334 + goto free_priv;
8335 spin_unlock_irqrestore(&xhci->lock, flags);
8336 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
8337 spin_lock_irqsave(&xhci->lock, flags);
8338 @@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
8339 goto dying;
8340 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
8341 slot_id, ep_index);
8342 + if (ret)
8343 + goto free_priv;
8344 spin_unlock_irqrestore(&xhci->lock, flags);
8345 } else {
8346 spin_lock_irqsave(&xhci->lock, flags);
8347 @@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
8348 goto dying;
8349 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
8350 slot_id, ep_index);
8351 + if (ret)
8352 + goto free_priv;
8353 spin_unlock_irqrestore(&xhci->lock, flags);
8354 }
8355 exit:
8356 return ret;
8357 dying:
8358 - xhci_urb_free_priv(xhci, urb_priv);
8359 - urb->hcpriv = NULL;
8360 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
8361 "non-responsive xHCI host.\n",
8362 urb->ep->desc.bEndpointAddress, urb);
8363 + ret = -ESHUTDOWN;
8364 +free_priv:
8365 + xhci_urb_free_priv(xhci, urb_priv);
8366 + urb->hcpriv = NULL;
8367 spin_unlock_irqrestore(&xhci->lock, flags);
8368 - return -ESHUTDOWN;
8369 + return ret;
8370 }
8371
8372 /* Get the right ring for the given URB.
8373 @@ -1239,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
8374 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
8375 xhci_dbg(xhci, "HW died, freeing TD.\n");
8376 urb_priv = urb->hcpriv;
8377 + for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
8378 + td = urb_priv->td[i];
8379 + if (!list_empty(&td->td_list))
8380 + list_del_init(&td->td_list);
8381 + if (!list_empty(&td->cancelled_td_list))
8382 + list_del_init(&td->cancelled_td_list);
8383 + }
8384
8385 usb_hcd_unlink_urb_from_ep(hcd, urb);
8386 spin_unlock_irqrestore(&xhci->lock, flags);
8387 diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
8388 index 149f3f3..318fb4e 100644
8389 --- a/drivers/usb/musb/cppi_dma.c
8390 +++ b/drivers/usb/musb/cppi_dma.c
8391 @@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)
8392 struct cppi *controller;
8393 void __iomem *tibase;
8394 int i;
8395 + struct musb *musb;
8396
8397 controller = container_of(c, struct cppi, controller);
8398 + musb = controller->musb;
8399
8400 tibase = controller->tibase;
8401 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
8402 @@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,
8403 u8 index;
8404 struct cppi_channel *cppi_ch;
8405 void __iomem *tibase;
8406 + struct musb *musb;
8407
8408 controller = container_of(c, struct cppi, controller);
8409 tibase = controller->tibase;
8410 + musb = controller->musb;
8411
8412 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
8413 index = ep->epnum - 1;
8414 @@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)
8415 c = container_of(channel, struct cppi_channel, channel);
8416 tibase = c->controller->tibase;
8417 if (!c->hw_ep)
8418 - dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c);
8419 + dev_dbg(c->controller->musb->controller,
8420 + "releasing idle DMA channel %p\n", c);
8421 else if (!c->transmit)
8422 core_rxirq_enable(tibase, c->index + 1);
8423
8424 @@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
8425
8426 musb_ep_select(base, c->index + 1);
8427
8428 - DBG(level, "RX DMA%d%s: %d left, csr %04x, "
8429 - "%08x H%08x S%08x C%08x, "
8430 - "B%08x L%08x %08x .. %08x"
8431 - "\n",
8432 + dev_dbg(c->controller->musb->controller,
8433 + "RX DMA%d%s: %d left, csr %04x, "
8434 + "%08x H%08x S%08x C%08x, "
8435 + "B%08x L%08x %08x .. %08x"
8436 + "\n",
8437 c->index, tag,
8438 musb_readl(c->controller->tibase,
8439 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
8440 @@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
8441
8442 musb_ep_select(base, c->index + 1);
8443
8444 - DBG(level, "TX DMA%d%s: csr %04x, "
8445 - "H%08x S%08x C%08x %08x, "
8446 - "F%08x L%08x .. %08x"
8447 - "\n",
8448 + dev_dbg(c->controller->musb->controller,
8449 + "TX DMA%d%s: csr %04x, "
8450 + "H%08x S%08x C%08x %08x, "
8451 + "F%08x L%08x .. %08x"
8452 + "\n",
8453 c->index, tag,
8454 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
8455
8456 @@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
8457 int i;
8458 dma_addr_t safe2ack;
8459 void __iomem *regs = rx->hw_ep->regs;
8460 + struct musb *musb = cppi->musb;
8461
8462 cppi_dump_rx(6, rx, "/K");
8463
8464 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
8465 index 9afb361..f968a3d 100644
8466 --- a/drivers/usb/serial/ftdi_sio.c
8467 +++ b/drivers/usb/serial/ftdi_sio.c
8468 @@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial);
8469 static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
8470 static int ftdi_NDI_device_setup(struct usb_serial *serial);
8471 static int ftdi_stmclite_probe(struct usb_serial *serial);
8472 +static int ftdi_8u2232c_probe(struct usb_serial *serial);
8473 static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
8474 static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
8475
8476 @@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
8477 .probe = ftdi_stmclite_probe,
8478 };
8479
8480 +static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
8481 + .probe = ftdi_8u2232c_probe,
8482 +};
8483 +
8484 /*
8485 * The 8U232AM has the same API as the sio except for:
8486 * - it can support MUCH higher baudrates; up to:
8487 @@ -177,7 +182,8 @@ static struct usb_device_id id_table_combined [] = {
8488 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
8489 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
8490 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
8491 - { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
8492 + { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
8493 + .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
8494 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
8495 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
8496 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
8497 @@ -1733,6 +1739,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
8498 return 0;
8499 }
8500
8501 +static int ftdi_8u2232c_probe(struct usb_serial *serial)
8502 +{
8503 + struct usb_device *udev = serial->dev;
8504 +
8505 + dbg("%s", __func__);
8506 +
8507 + if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
8508 + return ftdi_jtag_probe(serial);
8509 +
8510 + return 0;
8511 +}
8512 +
8513 /*
8514 * First and second port on STMCLiteadaptors is reserved for JTAG interface
8515 * and the forth port for pio
8516 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
8517 index 8156561..fe22e90 100644
8518 --- a/drivers/usb/serial/option.c
8519 +++ b/drivers/usb/serial/option.c
8520 @@ -148,6 +148,8 @@ static void option_instat_callback(struct urb *urb);
8521 #define HUAWEI_PRODUCT_K4505 0x1464
8522 #define HUAWEI_PRODUCT_K3765 0x1465
8523 #define HUAWEI_PRODUCT_E14AC 0x14AC
8524 +#define HUAWEI_PRODUCT_K3806 0x14AE
8525 +#define HUAWEI_PRODUCT_K4605 0x14C6
8526 #define HUAWEI_PRODUCT_K3770 0x14C9
8527 #define HUAWEI_PRODUCT_K3771 0x14CA
8528 #define HUAWEI_PRODUCT_K4510 0x14CB
8529 @@ -416,6 +418,56 @@ static void option_instat_callback(struct urb *urb);
8530 #define SAMSUNG_VENDOR_ID 0x04e8
8531 #define SAMSUNG_PRODUCT_GT_B3730 0x6889
8532
8533 +/* YUGA products www.yuga-info.com*/
8534 +#define YUGA_VENDOR_ID 0x257A
8535 +#define YUGA_PRODUCT_CEM600 0x1601
8536 +#define YUGA_PRODUCT_CEM610 0x1602
8537 +#define YUGA_PRODUCT_CEM500 0x1603
8538 +#define YUGA_PRODUCT_CEM510 0x1604
8539 +#define YUGA_PRODUCT_CEM800 0x1605
8540 +#define YUGA_PRODUCT_CEM900 0x1606
8541 +
8542 +#define YUGA_PRODUCT_CEU818 0x1607
8543 +#define YUGA_PRODUCT_CEU816 0x1608
8544 +#define YUGA_PRODUCT_CEU828 0x1609
8545 +#define YUGA_PRODUCT_CEU826 0x160A
8546 +#define YUGA_PRODUCT_CEU518 0x160B
8547 +#define YUGA_PRODUCT_CEU516 0x160C
8548 +#define YUGA_PRODUCT_CEU528 0x160D
8549 +#define YUGA_PRODUCT_CEU526 0x160F
8550 +
8551 +#define YUGA_PRODUCT_CWM600 0x2601
8552 +#define YUGA_PRODUCT_CWM610 0x2602
8553 +#define YUGA_PRODUCT_CWM500 0x2603
8554 +#define YUGA_PRODUCT_CWM510 0x2604
8555 +#define YUGA_PRODUCT_CWM800 0x2605
8556 +#define YUGA_PRODUCT_CWM900 0x2606
8557 +
8558 +#define YUGA_PRODUCT_CWU718 0x2607
8559 +#define YUGA_PRODUCT_CWU716 0x2608
8560 +#define YUGA_PRODUCT_CWU728 0x2609
8561 +#define YUGA_PRODUCT_CWU726 0x260A
8562 +#define YUGA_PRODUCT_CWU518 0x260B
8563 +#define YUGA_PRODUCT_CWU516 0x260C
8564 +#define YUGA_PRODUCT_CWU528 0x260D
8565 +#define YUGA_PRODUCT_CWU526 0x260F
8566 +
8567 +#define YUGA_PRODUCT_CLM600 0x2601
8568 +#define YUGA_PRODUCT_CLM610 0x2602
8569 +#define YUGA_PRODUCT_CLM500 0x2603
8570 +#define YUGA_PRODUCT_CLM510 0x2604
8571 +#define YUGA_PRODUCT_CLM800 0x2605
8572 +#define YUGA_PRODUCT_CLM900 0x2606
8573 +
8574 +#define YUGA_PRODUCT_CLU718 0x2607
8575 +#define YUGA_PRODUCT_CLU716 0x2608
8576 +#define YUGA_PRODUCT_CLU728 0x2609
8577 +#define YUGA_PRODUCT_CLU726 0x260A
8578 +#define YUGA_PRODUCT_CLU518 0x260B
8579 +#define YUGA_PRODUCT_CLU516 0x260C
8580 +#define YUGA_PRODUCT_CLU528 0x260D
8581 +#define YUGA_PRODUCT_CLU526 0x260F
8582 +
8583 /* some devices interfaces need special handling due to a number of reasons */
8584 enum option_blacklist_reason {
8585 OPTION_BLACKLIST_NONE = 0,
8586 @@ -551,6 +603,8 @@ static const struct usb_device_id option_ids[] = {
8587 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
8588 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
8589 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
8590 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
8591 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
8592 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
8593 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
8594 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
8595 @@ -1005,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {
8596 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
8597 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
8598 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
8599 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
8600 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
8601 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
8602 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
8603 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
8604 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
8605 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
8606 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
8607 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
8608 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
8609 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
8610 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
8611 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
8612 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
8613 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
8614 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
8615 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
8616 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
8617 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
8618 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
8619 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
8620 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
8621 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
8622 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
8623 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
8624 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
8625 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
8626 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
8627 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
8628 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
8629 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
8630 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
8631 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
8632 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
8633 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
8634 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
8635 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
8636 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
8637 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
8638 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
8639 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
8640 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
8641 { } /* Terminating entry */
8642 };
8643 MODULE_DEVICE_TABLE(usb, option_ids);
8644 @@ -1134,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,
8645 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
8646 return -ENODEV;
8647
8648 - /* Don't bind network interfaces on Huawei K3765 & K4505 */
8649 + /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
8650 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
8651 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
8652 - serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) &&
8653 - serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
8654 + serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
8655 + serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
8656 + (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
8657 + serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
8658 return -ENODEV;
8659
8660 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
8661 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
8662 index 0c20831..1d33260 100644
8663 --- a/drivers/usb/serial/pl2303.c
8664 +++ b/drivers/usb/serial/pl2303.c
8665 @@ -343,10 +343,28 @@ static void pl2303_set_termios(struct tty_struct *tty,
8666 baud = 6000000;
8667 }
8668 dbg("%s - baud set = %d", __func__, baud);
8669 - buf[0] = baud & 0xff;
8670 - buf[1] = (baud >> 8) & 0xff;
8671 - buf[2] = (baud >> 16) & 0xff;
8672 - buf[3] = (baud >> 24) & 0xff;
8673 + if (baud <= 115200) {
8674 + buf[0] = baud & 0xff;
8675 + buf[1] = (baud >> 8) & 0xff;
8676 + buf[2] = (baud >> 16) & 0xff;
8677 + buf[3] = (baud >> 24) & 0xff;
8678 + } else {
8679 + /* apparently the formula for higher speeds is:
8680 + * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
8681 + */
8682 + unsigned tmp = 12*1000*1000*32 / baud;
8683 + buf[3] = 0x80;
8684 + buf[2] = 0;
8685 + buf[1] = (tmp >= 256);
8686 + while (tmp >= 256) {
8687 + tmp >>= 2;
8688 + buf[1] <<= 1;
8689 + }
8690 + if (tmp > 256) {
8691 + tmp %= 256;
8692 + }
8693 + buf[0] = tmp;
8694 + }
8695 }
8696
8697 /* For reference buf[4]=0 is 1 stop bits */
8698 diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
8699 index 32549d1..dcaab90 100644
8700 --- a/drivers/video/savage/savagefb.h
8701 +++ b/drivers/video/savage/savagefb.h
8702 @@ -55,7 +55,7 @@
8703
8704 #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
8705
8706 -#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
8707 +#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR))
8708
8709 #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
8710
8711 diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
8712 index e0c2807..181fa81 100644
8713 --- a/drivers/zorro/zorro.c
8714 +++ b/drivers/zorro/zorro.c
8715 @@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
8716 }
8717 platform_set_drvdata(pdev, bus);
8718
8719 - /* Register all devices */
8720 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
8721 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
8722
8723 + /* First identify all devices ... */
8724 for (i = 0; i < zorro_num_autocon; i++) {
8725 z = &zorro_autocon[i];
8726 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
8727 @@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
8728 dev_set_name(&z->dev, "%02x", i);
8729 z->dev.parent = &bus->dev;
8730 z->dev.bus = &zorro_bus_type;
8731 + }
8732 +
8733 + /* ... then register them */
8734 + for (i = 0; i < zorro_num_autocon; i++) {
8735 + z = &zorro_autocon[i];
8736 error = device_register(&z->dev);
8737 if (error) {
8738 dev_err(&bus->dev, "Error registering device %s\n",
8739 diff --git a/fs/9p/acl.c b/fs/9p/acl.c
8740 index 535ab6e..4a866cd 100644
8741 --- a/fs/9p/acl.c
8742 +++ b/fs/9p/acl.c
8743 @@ -185,12 +185,15 @@ int v9fs_acl_chmod(struct dentry *dentry)
8744 }
8745
8746 int v9fs_set_create_acl(struct dentry *dentry,
8747 - struct posix_acl *dpacl, struct posix_acl *pacl)
8748 + struct posix_acl **dpacl, struct posix_acl **pacl)
8749 {
8750 - v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
8751 - v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
8752 - posix_acl_release(dpacl);
8753 - posix_acl_release(pacl);
8754 + if (dentry) {
8755 + v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, *dpacl);
8756 + v9fs_set_acl(dentry, ACL_TYPE_ACCESS, *pacl);
8757 + }
8758 + posix_acl_release(*dpacl);
8759 + posix_acl_release(*pacl);
8760 + *dpacl = *pacl = NULL;
8761 return 0;
8762 }
8763
8764 @@ -212,11 +215,11 @@ int v9fs_acl_mode(struct inode *dir, mode_t *modep,
8765 struct posix_acl *clone;
8766
8767 if (S_ISDIR(mode))
8768 - *dpacl = acl;
8769 + *dpacl = posix_acl_dup(acl);
8770 clone = posix_acl_clone(acl, GFP_NOFS);
8771 - retval = -ENOMEM;
8772 + posix_acl_release(acl);
8773 if (!clone)
8774 - goto cleanup;
8775 + return -ENOMEM;
8776
8777 retval = posix_acl_create_masq(clone, &mode);
8778 if (retval < 0) {
8779 @@ -225,11 +228,12 @@ int v9fs_acl_mode(struct inode *dir, mode_t *modep,
8780 }
8781 if (retval > 0)
8782 *pacl = clone;
8783 + else
8784 + posix_acl_release(clone);
8785 }
8786 *modep = mode;
8787 return 0;
8788 cleanup:
8789 - posix_acl_release(acl);
8790 return retval;
8791
8792 }
8793 diff --git a/fs/9p/acl.h b/fs/9p/acl.h
8794 index 7ef3ac9..c47ea9c 100644
8795 --- a/fs/9p/acl.h
8796 +++ b/fs/9p/acl.h
8797 @@ -19,7 +19,7 @@ extern int v9fs_get_acl(struct inode *, struct p9_fid *);
8798 extern int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags);
8799 extern int v9fs_acl_chmod(struct dentry *);
8800 extern int v9fs_set_create_acl(struct dentry *,
8801 - struct posix_acl *, struct posix_acl *);
8802 + struct posix_acl **, struct posix_acl **);
8803 extern int v9fs_acl_mode(struct inode *dir, mode_t *modep,
8804 struct posix_acl **dpacl, struct posix_acl **pacl);
8805 #else
8806 @@ -33,8 +33,8 @@ static inline int v9fs_acl_chmod(struct dentry *dentry)
8807 return 0;
8808 }
8809 static inline int v9fs_set_create_acl(struct dentry *dentry,
8810 - struct posix_acl *dpacl,
8811 - struct posix_acl *pacl)
8812 + struct posix_acl **dpacl,
8813 + struct posix_acl **pacl)
8814 {
8815 return 0;
8816 }
8817 diff --git a/fs/9p/cache.c b/fs/9p/cache.c
8818 index 5b335c5..945aa5f 100644
8819 --- a/fs/9p/cache.c
8820 +++ b/fs/9p/cache.c
8821 @@ -108,11 +108,10 @@ static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
8822 void *buffer, uint16_t bufmax)
8823 {
8824 const struct v9fs_inode *v9inode = cookie_netfs_data;
8825 - memcpy(buffer, &v9inode->fscache_key->path,
8826 - sizeof(v9inode->fscache_key->path));
8827 + memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
8828 P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
8829 - v9inode->fscache_key->path);
8830 - return sizeof(v9inode->fscache_key->path);
8831 + v9inode->qid.path);
8832 + return sizeof(v9inode->qid.path);
8833 }
8834
8835 static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
8836 @@ -129,11 +128,10 @@ static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
8837 void *buffer, uint16_t buflen)
8838 {
8839 const struct v9fs_inode *v9inode = cookie_netfs_data;
8840 - memcpy(buffer, &v9inode->fscache_key->version,
8841 - sizeof(v9inode->fscache_key->version));
8842 + memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
8843 P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
8844 - v9inode->fscache_key->version);
8845 - return sizeof(v9inode->fscache_key->version);
8846 + v9inode->qid.version);
8847 + return sizeof(v9inode->qid.version);
8848 }
8849
8850 static enum
8851 @@ -143,11 +141,11 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
8852 {
8853 const struct v9fs_inode *v9inode = cookie_netfs_data;
8854
8855 - if (buflen != sizeof(v9inode->fscache_key->version))
8856 + if (buflen != sizeof(v9inode->qid.version))
8857 return FSCACHE_CHECKAUX_OBSOLETE;
8858
8859 - if (memcmp(buffer, &v9inode->fscache_key->version,
8860 - sizeof(v9inode->fscache_key->version)))
8861 + if (memcmp(buffer, &v9inode->qid.version,
8862 + sizeof(v9inode->qid.version)))
8863 return FSCACHE_CHECKAUX_OBSOLETE;
8864
8865 return FSCACHE_CHECKAUX_OKAY;
8866 diff --git a/fs/9p/cache.h b/fs/9p/cache.h
8867 index 049507a..40cc54c 100644
8868 --- a/fs/9p/cache.h
8869 +++ b/fs/9p/cache.h
8870 @@ -93,15 +93,6 @@ static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
8871 BUG_ON(PageFsCache(page));
8872 }
8873
8874 -static inline void v9fs_fscache_set_key(struct inode *inode,
8875 - struct p9_qid *qid)
8876 -{
8877 - struct v9fs_inode *v9inode = V9FS_I(inode);
8878 - spin_lock(&v9inode->fscache_lock);
8879 - v9inode->fscache_key = qid;
8880 - spin_unlock(&v9inode->fscache_lock);
8881 -}
8882 -
8883 static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
8884 struct page *page)
8885 {
8886 diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
8887 index c82b017..ef96618 100644
8888 --- a/fs/9p/v9fs.c
8889 +++ b/fs/9p/v9fs.c
8890 @@ -78,6 +78,25 @@ static const match_table_t tokens = {
8891 {Opt_err, NULL}
8892 };
8893
8894 +/* Interpret mount options for cache mode */
8895 +static int get_cache_mode(char *s)
8896 +{
8897 + int version = -EINVAL;
8898 +
8899 + if (!strcmp(s, "loose")) {
8900 + version = CACHE_LOOSE;
8901 + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: loose\n");
8902 + } else if (!strcmp(s, "fscache")) {
8903 + version = CACHE_FSCACHE;
8904 + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: fscache\n");
8905 + } else if (!strcmp(s, "none")) {
8906 + version = CACHE_NONE;
8907 + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: none\n");
8908 + } else
8909 + printk(KERN_INFO "9p: Unknown Cache mode %s.\n", s);
8910 + return version;
8911 +}
8912 +
8913 /**
8914 * v9fs_parse_options - parse mount options into session structure
8915 * @v9ses: existing v9fs session information
8916 @@ -97,7 +116,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
8917 /* setup defaults */
8918 v9ses->afid = ~0;
8919 v9ses->debug = 0;
8920 - v9ses->cache = 0;
8921 + v9ses->cache = CACHE_NONE;
8922 #ifdef CONFIG_9P_FSCACHE
8923 v9ses->cachetag = NULL;
8924 #endif
8925 @@ -171,13 +190,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
8926 "problem allocating copy of cache arg\n");
8927 goto free_and_return;
8928 }
8929 + ret = get_cache_mode(s);
8930 + if (ret == -EINVAL) {
8931 + kfree(s);
8932 + goto free_and_return;
8933 + }
8934
8935 - if (strcmp(s, "loose") == 0)
8936 - v9ses->cache = CACHE_LOOSE;
8937 - else if (strcmp(s, "fscache") == 0)
8938 - v9ses->cache = CACHE_FSCACHE;
8939 - else
8940 - v9ses->cache = CACHE_NONE;
8941 + v9ses->cache = ret;
8942 kfree(s);
8943 break;
8944
8945 @@ -200,9 +219,15 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
8946 } else {
8947 v9ses->flags |= V9FS_ACCESS_SINGLE;
8948 v9ses->uid = simple_strtoul(s, &e, 10);
8949 - if (*e != '\0')
8950 - v9ses->uid = ~0;
8951 + if (*e != '\0') {
8952 + ret = -EINVAL;
8953 + printk(KERN_INFO "9p: Unknown access "
8954 + "argument %s.\n", s);
8955 + kfree(s);
8956 + goto free_and_return;
8957 + }
8958 }
8959 +
8960 kfree(s);
8961 break;
8962
8963 @@ -487,8 +512,8 @@ static void v9fs_inode_init_once(void *foo)
8964 struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
8965 #ifdef CONFIG_9P_FSCACHE
8966 v9inode->fscache = NULL;
8967 - v9inode->fscache_key = NULL;
8968 #endif
8969 + memset(&v9inode->qid, 0, sizeof(v9inode->qid));
8970 inode_init_once(&v9inode->vfs_inode);
8971 }
8972
8973 diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
8974 index e5ebedf..e78956c 100644
8975 --- a/fs/9p/v9fs.h
8976 +++ b/fs/9p/v9fs.h
8977 @@ -125,8 +125,8 @@ struct v9fs_inode {
8978 #ifdef CONFIG_9P_FSCACHE
8979 spinlock_t fscache_lock;
8980 struct fscache_cookie *fscache;
8981 - struct p9_qid *fscache_key;
8982 #endif
8983 + struct p9_qid qid;
8984 unsigned int cache_validity;
8985 struct p9_fid *writeback_fid;
8986 struct mutex v_mutex;
8987 @@ -153,13 +153,13 @@ extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
8988 void *p);
8989 extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
8990 struct p9_fid *fid,
8991 - struct super_block *sb);
8992 + struct super_block *sb, int new);
8993 extern const struct inode_operations v9fs_dir_inode_operations_dotl;
8994 extern const struct inode_operations v9fs_file_inode_operations_dotl;
8995 extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
8996 extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
8997 struct p9_fid *fid,
8998 - struct super_block *sb);
8999 + struct super_block *sb, int new);
9000
9001 /* other default globals */
9002 #define V9FS_PORT 564
9003 @@ -201,8 +201,27 @@ v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
9004 struct super_block *sb)
9005 {
9006 if (v9fs_proto_dotl(v9ses))
9007 - return v9fs_inode_from_fid_dotl(v9ses, fid, sb);
9008 + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 0);
9009 else
9010 - return v9fs_inode_from_fid(v9ses, fid, sb);
9011 + return v9fs_inode_from_fid(v9ses, fid, sb, 0);
9012 }
9013 +
9014 +/**
9015 + * v9fs_get_new_inode_from_fid - Helper routine to populate an inode by
9016 + * issuing a attribute request
9017 + * @v9ses: session information
9018 + * @fid: fid to issue attribute request for
9019 + * @sb: superblock on which to create inode
9020 + *
9021 + */
9022 +static inline struct inode *
9023 +v9fs_get_new_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
9024 + struct super_block *sb)
9025 +{
9026 + if (v9fs_proto_dotl(v9ses))
9027 + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 1);
9028 + else
9029 + return v9fs_inode_from_fid(v9ses, fid, sb, 1);
9030 +}
9031 +
9032 #endif
9033 diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
9034 index 4014160..f9a28ea 100644
9035 --- a/fs/9p/v9fs_vfs.h
9036 +++ b/fs/9p/v9fs_vfs.h
9037 @@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
9038
9039 struct inode *v9fs_alloc_inode(struct super_block *sb);
9040 void v9fs_destroy_inode(struct inode *inode);
9041 -struct inode *v9fs_get_inode(struct super_block *sb, int mode);
9042 +struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
9043 int v9fs_init_inode(struct v9fs_session_info *v9ses,
9044 - struct inode *inode, int mode);
9045 + struct inode *inode, int mode, dev_t);
9046 void v9fs_evict_inode(struct inode *inode);
9047 ino_t v9fs_qid2ino(struct p9_qid *qid);
9048 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
9049 @@ -82,4 +82,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
9050 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
9051 return;
9052 }
9053 +
9054 +int v9fs_open_to_dotl_flags(int flags);
9055 #endif
9056 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
9057 index ffed558..9d6e168 100644
9058 --- a/fs/9p/vfs_file.c
9059 +++ b/fs/9p/vfs_file.c
9060 @@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
9061 v9inode = V9FS_I(inode);
9062 v9ses = v9fs_inode2v9ses(inode);
9063 if (v9fs_proto_dotl(v9ses))
9064 - omode = file->f_flags;
9065 + omode = v9fs_open_to_dotl_flags(file->f_flags);
9066 else
9067 omode = v9fs_uflags2omode(file->f_flags,
9068 v9fs_proto_dotu(v9ses));
9069 @@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
9070
9071 /* convert posix lock to p9 tlock args */
9072 memset(&flock, 0, sizeof(flock));
9073 - flock.type = fl->fl_type;
9074 + /* map the lock type */
9075 + switch (fl->fl_type) {
9076 + case F_RDLCK:
9077 + flock.type = P9_LOCK_TYPE_RDLCK;
9078 + break;
9079 + case F_WRLCK:
9080 + flock.type = P9_LOCK_TYPE_WRLCK;
9081 + break;
9082 + case F_UNLCK:
9083 + flock.type = P9_LOCK_TYPE_UNLCK;
9084 + break;
9085 + }
9086 flock.start = fl->fl_start;
9087 if (fl->fl_end == OFFSET_MAX)
9088 flock.length = 0;
9089 @@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
9090
9091 /* convert posix lock to p9 tgetlock args */
9092 memset(&glock, 0, sizeof(glock));
9093 - glock.type = fl->fl_type;
9094 + glock.type = P9_LOCK_TYPE_UNLCK;
9095 glock.start = fl->fl_start;
9096 if (fl->fl_end == OFFSET_MAX)
9097 glock.length = 0;
9098 @@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
9099 res = p9_client_getlock_dotl(fid, &glock);
9100 if (res < 0)
9101 return res;
9102 - if (glock.type != F_UNLCK) {
9103 - fl->fl_type = glock.type;
9104 + /* map 9p lock type to os lock type */
9105 + switch (glock.type) {
9106 + case P9_LOCK_TYPE_RDLCK:
9107 + fl->fl_type = F_RDLCK;
9108 + break;
9109 + case P9_LOCK_TYPE_WRLCK:
9110 + fl->fl_type = F_WRLCK;
9111 + break;
9112 + case P9_LOCK_TYPE_UNLCK:
9113 + fl->fl_type = F_UNLCK;
9114 + break;
9115 + }
9116 + if (glock.type != P9_LOCK_TYPE_UNLCK) {
9117 fl->fl_start = glock.start;
9118 if (glock.length == 0)
9119 fl->fl_end = OFFSET_MAX;
9120 else
9121 fl->fl_end = glock.start + glock.length - 1;
9122 fl->fl_pid = glock.proc_id;
9123 - } else
9124 - fl->fl_type = F_UNLCK;
9125 -
9126 + }
9127 return res;
9128 }
9129
9130 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
9131 index 7f6c677..c72e20c 100644
9132 --- a/fs/9p/vfs_inode.c
9133 +++ b/fs/9p/vfs_inode.c
9134 @@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
9135 /**
9136 * p9mode2unixmode- convert plan9 mode bits to unix mode bits
9137 * @v9ses: v9fs session information
9138 - * @mode: mode to convert
9139 + * @stat: p9_wstat from which mode need to be derived
9140 + * @rdev: major number, minor number in case of device files.
9141 *
9142 */
9143 -
9144 -static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
9145 +static int p9mode2unixmode(struct v9fs_session_info *v9ses,
9146 + struct p9_wstat *stat, dev_t *rdev)
9147 {
9148 int res;
9149 + int mode = stat->mode;
9150
9151 - res = mode & 0777;
9152 + res = mode & S_IALLUGO;
9153 + *rdev = 0;
9154
9155 if ((mode & P9_DMDIR) == P9_DMDIR)
9156 res |= S_IFDIR;
9157 @@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
9158 && (v9ses->nodev == 0))
9159 res |= S_IFIFO;
9160 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
9161 - && (v9ses->nodev == 0))
9162 - res |= S_IFBLK;
9163 - else
9164 + && (v9ses->nodev == 0)) {
9165 + char type = 0, ext[32];
9166 + int major = -1, minor = -1;
9167 +
9168 + strncpy(ext, stat->extension, sizeof(ext));
9169 + sscanf(ext, "%c %u %u", &type, &major, &minor);
9170 + switch (type) {
9171 + case 'c':
9172 + res |= S_IFCHR;
9173 + break;
9174 + case 'b':
9175 + res |= S_IFBLK;
9176 + break;
9177 + default:
9178 + P9_DPRINTK(P9_DEBUG_ERROR,
9179 + "Unknown special type %c %s\n", type,
9180 + stat->extension);
9181 + };
9182 + *rdev = MKDEV(major, minor);
9183 + } else
9184 res |= S_IFREG;
9185
9186 if (v9fs_proto_dotu(v9ses)) {
9187 @@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
9188 if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
9189 res |= S_ISVTX;
9190 }
9191 -
9192 return res;
9193 }
9194
9195 @@ -216,7 +235,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
9196 return NULL;
9197 #ifdef CONFIG_9P_FSCACHE
9198 v9inode->fscache = NULL;
9199 - v9inode->fscache_key = NULL;
9200 spin_lock_init(&v9inode->fscache_lock);
9201 #endif
9202 v9inode->writeback_fid = NULL;
9203 @@ -243,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
9204 }
9205
9206 int v9fs_init_inode(struct v9fs_session_info *v9ses,
9207 - struct inode *inode, int mode)
9208 + struct inode *inode, int mode, dev_t rdev)
9209 {
9210 int err = 0;
9211
9212 inode_init_owner(inode, NULL, mode);
9213 inode->i_blocks = 0;
9214 - inode->i_rdev = 0;
9215 + inode->i_rdev = rdev;
9216 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9217 inode->i_mapping->a_ops = &v9fs_addr_operations;
9218
9219 @@ -336,7 +354,7 @@ error:
9220 *
9221 */
9222
9223 -struct inode *v9fs_get_inode(struct super_block *sb, int mode)
9224 +struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
9225 {
9226 int err;
9227 struct inode *inode;
9228 @@ -349,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
9229 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
9230 return ERR_PTR(-ENOMEM);
9231 }
9232 - err = v9fs_init_inode(v9ses, inode, mode);
9233 + err = v9fs_init_inode(v9ses, inode, mode, rdev);
9234 if (err) {
9235 iput(inode);
9236 return ERR_PTR(err);
9237 @@ -433,17 +451,62 @@ void v9fs_evict_inode(struct inode *inode)
9238 }
9239 }
9240
9241 +static int v9fs_test_inode(struct inode *inode, void *data)
9242 +{
9243 + int umode;
9244 + dev_t rdev;
9245 + struct v9fs_inode *v9inode = V9FS_I(inode);
9246 + struct p9_wstat *st = (struct p9_wstat *)data;
9247 + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
9248 +
9249 + umode = p9mode2unixmode(v9ses, st, &rdev);
9250 + /* don't match inode of different type */
9251 + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
9252 + return 0;
9253 +
9254 + /* compare qid details */
9255 + if (memcmp(&v9inode->qid.version,
9256 + &st->qid.version, sizeof(v9inode->qid.version)))
9257 + return 0;
9258 +
9259 + if (v9inode->qid.type != st->qid.type)
9260 + return 0;
9261 + return 1;
9262 +}
9263 +
9264 +static int v9fs_test_new_inode(struct inode *inode, void *data)
9265 +{
9266 + return 0;
9267 +}
9268 +
9269 +static int v9fs_set_inode(struct inode *inode, void *data)
9270 +{
9271 + struct v9fs_inode *v9inode = V9FS_I(inode);
9272 + struct p9_wstat *st = (struct p9_wstat *)data;
9273 +
9274 + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
9275 + return 0;
9276 +}
9277 +
9278 static struct inode *v9fs_qid_iget(struct super_block *sb,
9279 struct p9_qid *qid,
9280 - struct p9_wstat *st)
9281 + struct p9_wstat *st,
9282 + int new)
9283 {
9284 + dev_t rdev;
9285 int retval, umode;
9286 unsigned long i_ino;
9287 struct inode *inode;
9288 struct v9fs_session_info *v9ses = sb->s_fs_info;
9289 + int (*test)(struct inode *, void *);
9290 +
9291 + if (new)
9292 + test = v9fs_test_new_inode;
9293 + else
9294 + test = v9fs_test_inode;
9295
9296 i_ino = v9fs_qid2ino(qid);
9297 - inode = iget_locked(sb, i_ino);
9298 + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st);
9299 if (!inode)
9300 return ERR_PTR(-ENOMEM);
9301 if (!(inode->i_state & I_NEW))
9302 @@ -453,14 +516,14 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
9303 * FIXME!! we may need support for stale inodes
9304 * later.
9305 */
9306 - umode = p9mode2unixmode(v9ses, st->mode);
9307 - retval = v9fs_init_inode(v9ses, inode, umode);
9308 + inode->i_ino = i_ino;
9309 + umode = p9mode2unixmode(v9ses, st, &rdev);
9310 + retval = v9fs_init_inode(v9ses, inode, umode, rdev);
9311 if (retval)
9312 goto error;
9313
9314 v9fs_stat2inode(st, inode, sb);
9315 #ifdef CONFIG_9P_FSCACHE
9316 - v9fs_fscache_set_key(inode, &st->qid);
9317 v9fs_cache_inode_get_cookie(inode);
9318 #endif
9319 unlock_new_inode(inode);
9320 @@ -474,7 +537,7 @@ error:
9321
9322 struct inode *
9323 v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
9324 - struct super_block *sb)
9325 + struct super_block *sb, int new)
9326 {
9327 struct p9_wstat *st;
9328 struct inode *inode = NULL;
9329 @@ -483,7 +546,7 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
9330 if (IS_ERR(st))
9331 return ERR_CAST(st);
9332
9333 - inode = v9fs_qid_iget(sb, &st->qid, st);
9334 + inode = v9fs_qid_iget(sb, &st->qid, st, new);
9335 p9stat_free(st);
9336 kfree(st);
9337 return inode;
9338 @@ -585,19 +648,17 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
9339 }
9340
9341 /* instantiate inode and assign the unopened fid to the dentry */
9342 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9343 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9344 if (IS_ERR(inode)) {
9345 err = PTR_ERR(inode);
9346 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
9347 goto error;
9348 }
9349 - d_instantiate(dentry, inode);
9350 err = v9fs_fid_add(dentry, fid);
9351 if (err < 0)
9352 goto error;
9353 -
9354 + d_instantiate(dentry, inode);
9355 return ofid;
9356 -
9357 error:
9358 if (ofid)
9359 p9_client_clunk(ofid);
9360 @@ -738,6 +799,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
9361 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
9362 struct nameidata *nameidata)
9363 {
9364 + struct dentry *res;
9365 struct super_block *sb;
9366 struct v9fs_session_info *v9ses;
9367 struct p9_fid *dfid, *fid;
9368 @@ -769,22 +831,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
9369
9370 return ERR_PTR(result);
9371 }
9372 -
9373 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9374 + /*
9375 + * Make sure we don't use a wrong inode due to parallel
9376 + * unlink. For cached mode create calls request for new
9377 + * inode. But with cache disabled, lookup should do this.
9378 + */
9379 + if (v9ses->cache)
9380 + inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9381 + else
9382 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9383 if (IS_ERR(inode)) {
9384 result = PTR_ERR(inode);
9385 inode = NULL;
9386 goto error;
9387 }
9388 -
9389 result = v9fs_fid_add(dentry, fid);
9390 if (result < 0)
9391 goto error_iput;
9392 -
9393 inst_out:
9394 - d_add(dentry, inode);
9395 - return NULL;
9396 -
9397 + /*
9398 + * If we had a rename on the server and a parallel lookup
9399 + * for the new name, then make sure we instantiate with
9400 + * the new name. ie look up for a/b, while on server somebody
9401 + * moved b under k and client parallely did a lookup for
9402 + * k/b.
9403 + */
9404 + res = d_materialise_unique(dentry, inode);
9405 + if (!IS_ERR(res))
9406 + return res;
9407 + result = PTR_ERR(res);
9408 error_iput:
9409 iput(inode);
9410 error:
9411 @@ -950,7 +1025,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
9412 return PTR_ERR(st);
9413
9414 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
9415 - generic_fillattr(dentry->d_inode, stat);
9416 + generic_fillattr(dentry->d_inode, stat);
9417
9418 p9stat_free(st);
9419 kfree(st);
9420 @@ -1034,6 +1109,7 @@ void
9421 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
9422 struct super_block *sb)
9423 {
9424 + mode_t mode;
9425 char ext[32];
9426 char tag_name[14];
9427 unsigned int i_nlink;
9428 @@ -1069,31 +1145,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
9429 inode->i_nlink = i_nlink;
9430 }
9431 }
9432 - inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
9433 - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
9434 - char type = 0;
9435 - int major = -1;
9436 - int minor = -1;
9437 -
9438 - strncpy(ext, stat->extension, sizeof(ext));
9439 - sscanf(ext, "%c %u %u", &type, &major, &minor);
9440 - switch (type) {
9441 - case 'c':
9442 - inode->i_mode &= ~S_IFBLK;
9443 - inode->i_mode |= S_IFCHR;
9444 - break;
9445 - case 'b':
9446 - break;
9447 - default:
9448 - P9_DPRINTK(P9_DEBUG_ERROR,
9449 - "Unknown special type %c %s\n", type,
9450 - stat->extension);
9451 - };
9452 - inode->i_rdev = MKDEV(major, minor);
9453 - init_special_inode(inode, inode->i_mode, inode->i_rdev);
9454 - } else
9455 - inode->i_rdev = 0;
9456 -
9457 + mode = stat->mode & S_IALLUGO;
9458 + mode |= inode->i_mode & ~S_IALLUGO;
9459 + inode->i_mode = mode;
9460 i_size_write(inode, stat->length);
9461
9462 /* not real number of blocks, but 512 byte ones ... */
9463 @@ -1359,6 +1413,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
9464
9465 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
9466 {
9467 + int umode;
9468 + dev_t rdev;
9469 loff_t i_size;
9470 struct p9_wstat *st;
9471 struct v9fs_session_info *v9ses;
9472 @@ -1367,6 +1423,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
9473 st = p9_client_stat(fid);
9474 if (IS_ERR(st))
9475 return PTR_ERR(st);
9476 + /*
9477 + * Don't update inode if the file type is different
9478 + */
9479 + umode = p9mode2unixmode(v9ses, st, &rdev);
9480 + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
9481 + goto out;
9482
9483 spin_lock(&inode->i_lock);
9484 /*
9485 @@ -1378,6 +1440,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
9486 if (v9ses->cache)
9487 inode->i_size = i_size;
9488 spin_unlock(&inode->i_lock);
9489 +out:
9490 p9stat_free(st);
9491 kfree(st);
9492 return 0;
9493 diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
9494 index 691c78f..c873172 100644
9495 --- a/fs/9p/vfs_inode_dotl.c
9496 +++ b/fs/9p/vfs_inode_dotl.c
9497 @@ -86,18 +86,63 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
9498 return dentry;
9499 }
9500
9501 +static int v9fs_test_inode_dotl(struct inode *inode, void *data)
9502 +{
9503 + struct v9fs_inode *v9inode = V9FS_I(inode);
9504 + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
9505 +
9506 + /* don't match inode of different type */
9507 + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
9508 + return 0;
9509 +
9510 + if (inode->i_generation != st->st_gen)
9511 + return 0;
9512 +
9513 + /* compare qid details */
9514 + if (memcmp(&v9inode->qid.version,
9515 + &st->qid.version, sizeof(v9inode->qid.version)))
9516 + return 0;
9517 +
9518 + if (v9inode->qid.type != st->qid.type)
9519 + return 0;
9520 + return 1;
9521 +}
9522 +
9523 +/* Always get a new inode */
9524 +static int v9fs_test_new_inode_dotl(struct inode *inode, void *data)
9525 +{
9526 + return 0;
9527 +}
9528 +
9529 +static int v9fs_set_inode_dotl(struct inode *inode, void *data)
9530 +{
9531 + struct v9fs_inode *v9inode = V9FS_I(inode);
9532 + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data;
9533 +
9534 + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
9535 + inode->i_generation = st->st_gen;
9536 + return 0;
9537 +}
9538 +
9539 static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
9540 struct p9_qid *qid,
9541 struct p9_fid *fid,
9542 - struct p9_stat_dotl *st)
9543 + struct p9_stat_dotl *st,
9544 + int new)
9545 {
9546 int retval;
9547 unsigned long i_ino;
9548 struct inode *inode;
9549 struct v9fs_session_info *v9ses = sb->s_fs_info;
9550 + int (*test)(struct inode *, void *);
9551 +
9552 + if (new)
9553 + test = v9fs_test_new_inode_dotl;
9554 + else
9555 + test = v9fs_test_inode_dotl;
9556
9557 i_ino = v9fs_qid2ino(qid);
9558 - inode = iget_locked(sb, i_ino);
9559 + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st);
9560 if (!inode)
9561 return ERR_PTR(-ENOMEM);
9562 if (!(inode->i_state & I_NEW))
9563 @@ -107,13 +152,14 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
9564 * FIXME!! we may need support for stale inodes
9565 * later.
9566 */
9567 - retval = v9fs_init_inode(v9ses, inode, st->st_mode);
9568 + inode->i_ino = i_ino;
9569 + retval = v9fs_init_inode(v9ses, inode,
9570 + st->st_mode, new_decode_dev(st->st_rdev));
9571 if (retval)
9572 goto error;
9573
9574 v9fs_stat2inode_dotl(st, inode);
9575 #ifdef CONFIG_9P_FSCACHE
9576 - v9fs_fscache_set_key(inode, &st->qid);
9577 v9fs_cache_inode_get_cookie(inode);
9578 #endif
9579 retval = v9fs_get_acl(inode, fid);
9580 @@ -131,20 +177,72 @@ error:
9581
9582 struct inode *
9583 v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
9584 - struct super_block *sb)
9585 + struct super_block *sb, int new)
9586 {
9587 struct p9_stat_dotl *st;
9588 struct inode *inode = NULL;
9589
9590 - st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
9591 + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN);
9592 if (IS_ERR(st))
9593 return ERR_CAST(st);
9594
9595 - inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st);
9596 + inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new);
9597 kfree(st);
9598 return inode;
9599 }
9600
9601 +struct dotl_openflag_map {
9602 + int open_flag;
9603 + int dotl_flag;
9604 +};
9605 +
9606 +static int v9fs_mapped_dotl_flags(int flags)
9607 +{
9608 + int i;
9609 + int rflags = 0;
9610 + struct dotl_openflag_map dotl_oflag_map[] = {
9611 + { O_CREAT, P9_DOTL_CREATE },
9612 + { O_EXCL, P9_DOTL_EXCL },
9613 + { O_NOCTTY, P9_DOTL_NOCTTY },
9614 + { O_TRUNC, P9_DOTL_TRUNC },
9615 + { O_APPEND, P9_DOTL_APPEND },
9616 + { O_NONBLOCK, P9_DOTL_NONBLOCK },
9617 + { O_DSYNC, P9_DOTL_DSYNC },
9618 + { FASYNC, P9_DOTL_FASYNC },
9619 + { O_DIRECT, P9_DOTL_DIRECT },
9620 + { O_LARGEFILE, P9_DOTL_LARGEFILE },
9621 + { O_DIRECTORY, P9_DOTL_DIRECTORY },
9622 + { O_NOFOLLOW, P9_DOTL_NOFOLLOW },
9623 + { O_NOATIME, P9_DOTL_NOATIME },
9624 + { O_CLOEXEC, P9_DOTL_CLOEXEC },
9625 + { O_SYNC, P9_DOTL_SYNC},
9626 + };
9627 + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
9628 + if (flags & dotl_oflag_map[i].open_flag)
9629 + rflags |= dotl_oflag_map[i].dotl_flag;
9630 + }
9631 + return rflags;
9632 +}
9633 +
9634 +/**
9635 + * v9fs_open_to_dotl_flags- convert Linux specific open flags to
9636 + * plan 9 open flag.
9637 + * @flags: flags to convert
9638 + */
9639 +int v9fs_open_to_dotl_flags(int flags)
9640 +{
9641 + int rflags = 0;
9642 +
9643 + /*
9644 + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
9645 + * and P9_DOTL_NOACCESS
9646 + */
9647 + rflags |= flags & O_ACCMODE;
9648 + rflags |= v9fs_mapped_dotl_flags(flags);
9649 +
9650 + return rflags;
9651 +}
9652 +
9653 /**
9654 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
9655 * @dir: directory inode that is being created
9656 @@ -213,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
9657 "Failed to get acl values in creat %d\n", err);
9658 goto error;
9659 }
9660 - err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
9661 + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
9662 + mode, gid, &qid);
9663 if (err < 0) {
9664 P9_DPRINTK(P9_DEBUG_VFS,
9665 "p9_client_open_dotl failed in creat %d\n",
9666 @@ -230,19 +329,19 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
9667 fid = NULL;
9668 goto error;
9669 }
9670 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9671 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9672 if (IS_ERR(inode)) {
9673 err = PTR_ERR(inode);
9674 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
9675 goto error;
9676 }
9677 - d_instantiate(dentry, inode);
9678 err = v9fs_fid_add(dentry, fid);
9679 if (err < 0)
9680 goto error;
9681 + d_instantiate(dentry, inode);
9682
9683 /* Now set the ACL based on the default value */
9684 - v9fs_set_create_acl(dentry, dacl, pacl);
9685 + v9fs_set_create_acl(dentry, &dacl, &pacl);
9686
9687 v9inode = V9FS_I(inode);
9688 mutex_lock(&v9inode->v_mutex);
9689 @@ -283,6 +382,7 @@ error:
9690 err_clunk_old_fid:
9691 if (ofid)
9692 p9_client_clunk(ofid);
9693 + v9fs_set_create_acl(NULL, &dacl, &pacl);
9694 return err;
9695 }
9696
9697 @@ -350,17 +450,17 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
9698 goto error;
9699 }
9700
9701 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9702 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9703 if (IS_ERR(inode)) {
9704 err = PTR_ERR(inode);
9705 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
9706 err);
9707 goto error;
9708 }
9709 - d_instantiate(dentry, inode);
9710 err = v9fs_fid_add(dentry, fid);
9711 if (err < 0)
9712 goto error;
9713 + d_instantiate(dentry, inode);
9714 fid = NULL;
9715 } else {
9716 /*
9717 @@ -368,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
9718 * inode with stat. We need to get an inode
9719 * so that we can set the acl with dentry
9720 */
9721 - inode = v9fs_get_inode(dir->i_sb, mode);
9722 + inode = v9fs_get_inode(dir->i_sb, mode, 0);
9723 if (IS_ERR(inode)) {
9724 err = PTR_ERR(inode);
9725 goto error;
9726 @@ -376,12 +476,13 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
9727 d_instantiate(dentry, inode);
9728 }
9729 /* Now set the ACL based on the default value */
9730 - v9fs_set_create_acl(dentry, dacl, pacl);
9731 + v9fs_set_create_acl(dentry, &dacl, &pacl);
9732 inc_nlink(dir);
9733 v9fs_invalidate_inode_attr(dir);
9734 error:
9735 if (fid)
9736 p9_client_clunk(fid);
9737 + v9fs_set_create_acl(NULL, &dacl, &pacl);
9738 return err;
9739 }
9740
9741 @@ -493,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
9742 void
9743 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
9744 {
9745 + mode_t mode;
9746 struct v9fs_inode *v9inode = V9FS_I(inode);
9747
9748 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
9749 @@ -505,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
9750 inode->i_uid = stat->st_uid;
9751 inode->i_gid = stat->st_gid;
9752 inode->i_nlink = stat->st_nlink;
9753 - inode->i_mode = stat->st_mode;
9754 - inode->i_rdev = new_decode_dev(stat->st_rdev);
9755
9756 - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
9757 - init_special_inode(inode, inode->i_mode, inode->i_rdev);
9758 + mode = stat->st_mode & S_IALLUGO;
9759 + mode |= inode->i_mode & ~S_IALLUGO;
9760 + inode->i_mode = mode;
9761
9762 i_size_write(inode, stat->st_size);
9763 inode->i_blocks = stat->st_blocks;
9764 @@ -547,7 +648,7 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
9765 inode->i_blocks = stat->st_blocks;
9766 }
9767 if (stat->st_result_mask & P9_STATS_GEN)
9768 - inode->i_generation = stat->st_gen;
9769 + inode->i_generation = stat->st_gen;
9770
9771 /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
9772 * because the inode structure does not have fields for them.
9773 @@ -603,21 +704,21 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
9774 }
9775
9776 /* instantiate inode and assign the unopened fid to dentry */
9777 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9778 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9779 if (IS_ERR(inode)) {
9780 err = PTR_ERR(inode);
9781 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
9782 err);
9783 goto error;
9784 }
9785 - d_instantiate(dentry, inode);
9786 err = v9fs_fid_add(dentry, fid);
9787 if (err < 0)
9788 goto error;
9789 + d_instantiate(dentry, inode);
9790 fid = NULL;
9791 } else {
9792 /* Not in cached mode. No need to populate inode with stat */
9793 - inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
9794 + inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
9795 if (IS_ERR(inode)) {
9796 err = PTR_ERR(inode);
9797 goto error;
9798 @@ -756,24 +857,24 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
9799 goto error;
9800 }
9801
9802 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
9803 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
9804 if (IS_ERR(inode)) {
9805 err = PTR_ERR(inode);
9806 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
9807 err);
9808 goto error;
9809 }
9810 - d_instantiate(dentry, inode);
9811 err = v9fs_fid_add(dentry, fid);
9812 if (err < 0)
9813 goto error;
9814 + d_instantiate(dentry, inode);
9815 fid = NULL;
9816 } else {
9817 /*
9818 * Not in cached mode. No need to populate inode with stat.
9819 * socket syscall returns a fd, so we need instantiate
9820 */
9821 - inode = v9fs_get_inode(dir->i_sb, mode);
9822 + inode = v9fs_get_inode(dir->i_sb, mode, rdev);
9823 if (IS_ERR(inode)) {
9824 err = PTR_ERR(inode);
9825 goto error;
9826 @@ -781,10 +882,11 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
9827 d_instantiate(dentry, inode);
9828 }
9829 /* Now set the ACL based on the default value */
9830 - v9fs_set_create_acl(dentry, dacl, pacl);
9831 + v9fs_set_create_acl(dentry, &dacl, &pacl);
9832 error:
9833 if (fid)
9834 p9_client_clunk(fid);
9835 + v9fs_set_create_acl(NULL, &dacl, &pacl);
9836 return err;
9837 }
9838
9839 @@ -838,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
9840 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
9841 if (IS_ERR(st))
9842 return PTR_ERR(st);
9843 + /*
9844 + * Don't update inode if the file type is different
9845 + */
9846 + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
9847 + goto out;
9848
9849 spin_lock(&inode->i_lock);
9850 /*
9851 @@ -849,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
9852 if (v9ses->cache)
9853 inode->i_size = i_size;
9854 spin_unlock(&inode->i_lock);
9855 +out:
9856 kfree(st);
9857 return 0;
9858 }
9859 diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
9860 index feef6cd..c70251d 100644
9861 --- a/fs/9p/vfs_super.c
9862 +++ b/fs/9p/vfs_super.c
9863 @@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
9864 else
9865 sb->s_d_op = &v9fs_dentry_operations;
9866
9867 - inode = v9fs_get_inode(sb, S_IFDIR | mode);
9868 + inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
9869 if (IS_ERR(inode)) {
9870 retval = PTR_ERR(inode);
9871 goto release_sb;
9872 diff --git a/fs/block_dev.c b/fs/block_dev.c
9873 index 610e8e0..194cf66 100644
9874 --- a/fs/block_dev.c
9875 +++ b/fs/block_dev.c
9876 @@ -1419,6 +1419,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
9877 WARN_ON_ONCE(bdev->bd_holders);
9878 sync_blockdev(bdev);
9879 kill_bdev(bdev);
9880 + /* ->release can cause the old bdi to disappear,
9881 + * so must switch it out first
9882 + */
9883 + bdev_inode_switch_bdi(bdev->bd_inode,
9884 + &default_backing_dev_info);
9885 }
9886 if (bdev->bd_contains == bdev) {
9887 if (disk->fops->release)
9888 @@ -1432,8 +1437,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
9889 disk_put_part(bdev->bd_part);
9890 bdev->bd_part = NULL;
9891 bdev->bd_disk = NULL;
9892 - bdev_inode_switch_bdi(bdev->bd_inode,
9893 - &default_backing_dev_info);
9894 if (bdev != bdev->bd_contains)
9895 victim = bdev->bd_contains;
9896 bdev->bd_contains = NULL;
9897 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
9898 index 3601f0a..d42e6bf 100644
9899 --- a/fs/btrfs/inode.c
9900 +++ b/fs/btrfs/inode.c
9901 @@ -4124,7 +4124,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
9902
9903 /* special case for "." */
9904 if (filp->f_pos == 0) {
9905 - over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
9906 + over = filldir(dirent, ".", 1,
9907 + filp->f_pos, btrfs_ino(inode), DT_DIR);
9908 if (over)
9909 return 0;
9910 filp->f_pos = 1;
9911 @@ -4133,7 +4134,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
9912 if (filp->f_pos == 1) {
9913 u64 pino = parent_ino(filp->f_path.dentry);
9914 over = filldir(dirent, "..", 2,
9915 - 2, pino, DT_DIR);
9916 + filp->f_pos, pino, DT_DIR);
9917 if (over)
9918 return 0;
9919 filp->f_pos = 2;
9920 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
9921 index 1a9fe7f..07132c4 100644
9922 --- a/fs/cifs/cifssmb.c
9923 +++ b/fs/cifs/cifssmb.c
9924 @@ -4079,7 +4079,8 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
9925 T2_FNEXT_RSP_PARMS *parms;
9926 char *response_data;
9927 int rc = 0;
9928 - int bytes_returned, name_len;
9929 + int bytes_returned;
9930 + unsigned int name_len;
9931 __u16 params, byte_count;
9932
9933 cFYI(1, "In FindNext");
9934 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
9935 index e0ea721..2451627 100644
9936 --- a/fs/cifs/connect.c
9937 +++ b/fs/cifs/connect.c
9938 @@ -1258,7 +1258,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
9939 /* ignore */
9940 } else if (strnicmp(data, "guest", 5) == 0) {
9941 /* ignore */
9942 - } else if (strnicmp(data, "rw", 2) == 0) {
9943 + } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
9944 /* ignore */
9945 } else if (strnicmp(data, "ro", 2) == 0) {
9946 /* ignore */
9947 @@ -1361,7 +1361,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
9948 vol->server_ino = 1;
9949 } else if (strnicmp(data, "noserverino", 9) == 0) {
9950 vol->server_ino = 0;
9951 - } else if (strnicmp(data, "rwpidforward", 4) == 0) {
9952 + } else if (strnicmp(data, "rwpidforward", 12) == 0) {
9953 vol->rwpidforward = 1;
9954 } else if (strnicmp(data, "cifsacl", 7) == 0) {
9955 vol->cifs_acl = 1;
9956 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
9957 index b864839..c94774c 100644
9958 --- a/fs/ext4/inode.c
9959 +++ b/fs/ext4/inode.c
9960 @@ -2756,7 +2756,7 @@ static int write_cache_pages_da(struct address_space *mapping,
9961 index = wbc->range_start >> PAGE_CACHE_SHIFT;
9962 end = wbc->range_end >> PAGE_CACHE_SHIFT;
9963
9964 - if (wbc->sync_mode == WB_SYNC_ALL)
9965 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
9966 tag = PAGECACHE_TAG_TOWRITE;
9967 else
9968 tag = PAGECACHE_TAG_DIRTY;
9969 @@ -2988,7 +2988,7 @@ static int ext4_da_writepages(struct address_space *mapping,
9970 }
9971
9972 retry:
9973 - if (wbc->sync_mode == WB_SYNC_ALL)
9974 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
9975 tag_pages_for_writeback(mapping, index, end);
9976
9977 while (!ret && wbc->nr_to_write > 0) {
9978 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
9979 index 0f015a0..fe190a8 100644
9980 --- a/fs/fs-writeback.c
9981 +++ b/fs/fs-writeback.c
9982 @@ -36,6 +36,7 @@ struct wb_writeback_work {
9983 long nr_pages;
9984 struct super_block *sb;
9985 enum writeback_sync_modes sync_mode;
9986 + unsigned int tagged_writepages:1;
9987 unsigned int for_kupdate:1;
9988 unsigned int range_cyclic:1;
9989 unsigned int for_background:1;
9990 @@ -418,6 +419,15 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
9991 spin_lock(&inode->i_lock);
9992 inode->i_state &= ~I_SYNC;
9993 if (!(inode->i_state & I_FREEING)) {
9994 + /*
9995 + * Sync livelock prevention. Each inode is tagged and synced in
9996 + * one shot. If still dirty, it will be redirty_tail()'ed below.
9997 + * Update the dirty time to prevent enqueue and sync it again.
9998 + */
9999 + if ((inode->i_state & I_DIRTY) &&
10000 + (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
10001 + inode->dirtied_when = jiffies;
10002 +
10003 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
10004 /*
10005 * We didn't write back all the pages. nfs_writepages()
10006 @@ -650,6 +660,7 @@ static long wb_writeback(struct bdi_writeback *wb,
10007 {
10008 struct writeback_control wbc = {
10009 .sync_mode = work->sync_mode,
10010 + .tagged_writepages = work->tagged_writepages,
10011 .older_than_this = NULL,
10012 .for_kupdate = work->for_kupdate,
10013 .for_background = work->for_background,
10014 @@ -657,7 +668,7 @@ static long wb_writeback(struct bdi_writeback *wb,
10015 };
10016 unsigned long oldest_jif;
10017 long wrote = 0;
10018 - long write_chunk;
10019 + long write_chunk = MAX_WRITEBACK_PAGES;
10020 struct inode *inode;
10021
10022 if (wbc.for_kupdate) {
10023 @@ -683,9 +694,7 @@ static long wb_writeback(struct bdi_writeback *wb,
10024 * (quickly) tag currently dirty pages
10025 * (maybe slowly) sync all tagged pages
10026 */
10027 - if (wbc.sync_mode == WB_SYNC_NONE)
10028 - write_chunk = MAX_WRITEBACK_PAGES;
10029 - else
10030 + if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages)
10031 write_chunk = LONG_MAX;
10032
10033 wbc.wb_start = jiffies; /* livelock avoidance */
10034 @@ -1188,10 +1197,11 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
10035 {
10036 DECLARE_COMPLETION_ONSTACK(done);
10037 struct wb_writeback_work work = {
10038 - .sb = sb,
10039 - .sync_mode = WB_SYNC_NONE,
10040 - .done = &done,
10041 - .nr_pages = nr,
10042 + .sb = sb,
10043 + .sync_mode = WB_SYNC_NONE,
10044 + .tagged_writepages = 1,
10045 + .done = &done,
10046 + .nr_pages = nr,
10047 };
10048
10049 WARN_ON(!rwsem_is_locked(&sb->s_umount));
10050 diff --git a/fs/namei.c b/fs/namei.c
10051 index 14ab8d3..b456c7a 100644
10052 --- a/fs/namei.c
10053 +++ b/fs/namei.c
10054 @@ -2582,6 +2582,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
10055 if (!dir->i_op->rmdir)
10056 return -EPERM;
10057
10058 + dget(dentry);
10059 mutex_lock(&dentry->d_inode->i_mutex);
10060
10061 error = -EBUSY;
10062 @@ -2602,6 +2603,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
10063
10064 out:
10065 mutex_unlock(&dentry->d_inode->i_mutex);
10066 + dput(dentry);
10067 if (!error)
10068 d_delete(dentry);
10069 return error;
10070 @@ -3005,6 +3007,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
10071 if (error)
10072 return error;
10073
10074 + dget(new_dentry);
10075 if (target)
10076 mutex_lock(&target->i_mutex);
10077
10078 @@ -3025,6 +3028,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
10079 out:
10080 if (target)
10081 mutex_unlock(&target->i_mutex);
10082 + dput(new_dentry);
10083 if (!error)
10084 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
10085 d_move(old_dentry,new_dentry);
10086 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
10087 index 25b6a88..5afaa58 100644
10088 --- a/fs/proc/task_mmu.c
10089 +++ b/fs/proc/task_mmu.c
10090 @@ -877,30 +877,54 @@ struct numa_maps_private {
10091 struct numa_maps md;
10092 };
10093
10094 -static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
10095 +static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
10096 + unsigned long nr_pages)
10097 {
10098 int count = page_mapcount(page);
10099
10100 - md->pages++;
10101 + md->pages += nr_pages;
10102 if (pte_dirty || PageDirty(page))
10103 - md->dirty++;
10104 + md->dirty += nr_pages;
10105
10106 if (PageSwapCache(page))
10107 - md->swapcache++;
10108 + md->swapcache += nr_pages;
10109
10110 if (PageActive(page) || PageUnevictable(page))
10111 - md->active++;
10112 + md->active += nr_pages;
10113
10114 if (PageWriteback(page))
10115 - md->writeback++;
10116 + md->writeback += nr_pages;
10117
10118 if (PageAnon(page))
10119 - md->anon++;
10120 + md->anon += nr_pages;
10121
10122 if (count > md->mapcount_max)
10123 md->mapcount_max = count;
10124
10125 - md->node[page_to_nid(page)]++;
10126 + md->node[page_to_nid(page)] += nr_pages;
10127 +}
10128 +
10129 +static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
10130 + unsigned long addr)
10131 +{
10132 + struct page *page;
10133 + int nid;
10134 +
10135 + if (!pte_present(pte))
10136 + return NULL;
10137 +
10138 + page = vm_normal_page(vma, addr, pte);
10139 + if (!page)
10140 + return NULL;
10141 +
10142 + if (PageReserved(page))
10143 + return NULL;
10144 +
10145 + nid = page_to_nid(page);
10146 + if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
10147 + return NULL;
10148 +
10149 + return page;
10150 }
10151
10152 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
10153 @@ -912,26 +936,32 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
10154 pte_t *pte;
10155
10156 md = walk->private;
10157 - orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
10158 - do {
10159 - struct page *page;
10160 - int nid;
10161 + spin_lock(&walk->mm->page_table_lock);
10162 + if (pmd_trans_huge(*pmd)) {
10163 + if (pmd_trans_splitting(*pmd)) {
10164 + spin_unlock(&walk->mm->page_table_lock);
10165 + wait_split_huge_page(md->vma->anon_vma, pmd);
10166 + } else {
10167 + pte_t huge_pte = *(pte_t *)pmd;
10168 + struct page *page;
10169
10170 - if (!pte_present(*pte))
10171 - continue;
10172 + page = can_gather_numa_stats(huge_pte, md->vma, addr);
10173 + if (page)
10174 + gather_stats(page, md, pte_dirty(huge_pte),
10175 + HPAGE_PMD_SIZE/PAGE_SIZE);
10176 + spin_unlock(&walk->mm->page_table_lock);
10177 + return 0;
10178 + }
10179 + } else {
10180 + spin_unlock(&walk->mm->page_table_lock);
10181 + }
10182
10183 - page = vm_normal_page(md->vma, addr, *pte);
10184 + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
10185 + do {
10186 + struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
10187 if (!page)
10188 continue;
10189 -
10190 - if (PageReserved(page))
10191 - continue;
10192 -
10193 - nid = page_to_nid(page);
10194 - if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
10195 - continue;
10196 -
10197 - gather_stats(page, md, pte_dirty(*pte));
10198 + gather_stats(page, md, pte_dirty(*pte), 1);
10199
10200 } while (pte++, addr += PAGE_SIZE, addr != end);
10201 pte_unmap_unlock(orig_pte, ptl);
10202 @@ -952,7 +982,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
10203 return 0;
10204
10205 md = walk->private;
10206 - gather_stats(page, md, pte_dirty(*pte));
10207 + gather_stats(page, md, pte_dirty(*pte), 1);
10208 return 0;
10209 }
10210
10211 diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
10212 index d12f8d6..97cf4f2 100644
10213 --- a/include/linux/mfd/wm8994/pdata.h
10214 +++ b/include/linux/mfd/wm8994/pdata.h
10215 @@ -26,7 +26,7 @@ struct wm8994_ldo_pdata {
10216 struct regulator_init_data *init_data;
10217 };
10218
10219 -#define WM8994_CONFIGURE_GPIO 0x8000
10220 +#define WM8994_CONFIGURE_GPIO 0x10000
10221
10222 #define WM8994_DRC_REGS 5
10223 #define WM8994_EQ_REGS 20
10224 diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
10225 index 9026b30..218168a 100644
10226 --- a/include/linux/rio_regs.h
10227 +++ b/include/linux/rio_regs.h
10228 @@ -36,12 +36,12 @@
10229 #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */
10230 #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */
10231 #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */
10232 -#define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */
10233 -#define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */
10234 -#define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */
10235 -#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */
10236 -#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */
10237 -#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */
10238 +#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */
10239 +#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */
10240 +#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */
10241 +#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */
10242 +#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */
10243 +#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */
10244 #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
10245 #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
10246 #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
10247 @@ -102,7 +102,7 @@
10248 #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */
10249 #define RIO_RT_MAX_DESTID 0x0000ffff
10250
10251 -#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */
10252 +#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */
10253 #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */
10254 #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */
10255 #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */
10256 @@ -128,8 +128,8 @@
10257 #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */
10258 #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */
10259
10260 -#define RIO_WRITE_PORT_CSR 0x44 /* [I] Write Port CSR */
10261 -#define RIO_DOORBELL_CSR 0x44 /* [II] Doorbell CSR */
10262 +#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */
10263 +#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */
10264 #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */
10265 #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */
10266 #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */
10267 diff --git a/include/linux/rtc.h b/include/linux/rtc.h
10268 index b27ebea..93f4d03 100644
10269 --- a/include/linux/rtc.h
10270 +++ b/include/linux/rtc.h
10271 @@ -97,6 +97,9 @@ struct rtc_pll_info {
10272 #define RTC_AF 0x20 /* Alarm interrupt */
10273 #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
10274
10275 +
10276 +#define RTC_MAX_FREQ 8192
10277 +
10278 #ifdef __KERNEL__
10279
10280 #include <linux/types.h>
10281 diff --git a/include/linux/tty.h b/include/linux/tty.h
10282 index d6f0529..6660c41 100644
10283 --- a/include/linux/tty.h
10284 +++ b/include/linux/tty.h
10285 @@ -420,6 +420,8 @@ extern void tty_driver_flush_buffer(struct tty_struct *tty);
10286 extern void tty_throttle(struct tty_struct *tty);
10287 extern void tty_unthrottle(struct tty_struct *tty);
10288 extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
10289 +extern void tty_driver_remove_tty(struct tty_driver *driver,
10290 + struct tty_struct *tty);
10291 extern void tty_shutdown(struct tty_struct *tty);
10292 extern void tty_free_termios(struct tty_struct *tty);
10293 extern int is_current_pgrp_orphaned(void);
10294 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
10295 index 9deeac8..ecdaeb9 100644
10296 --- a/include/linux/tty_driver.h
10297 +++ b/include/linux/tty_driver.h
10298 @@ -47,6 +47,9 @@
10299 *
10300 * This routine is called synchronously when a particular tty device
10301 * is closed for the last time freeing up the resources.
10302 + * Note that tty_shutdown() is not called if ops->shutdown is defined.
10303 + * This means one is responsible to take care of calling ops->remove (e.g.
10304 + * via tty_driver_remove_tty) and releasing tty->termios.
10305 *
10306 *
10307 * void (*cleanup)(struct tty_struct * tty);
10308 diff --git a/include/linux/writeback.h b/include/linux/writeback.h
10309 index 17e7ccc..3f6542c 100644
10310 --- a/include/linux/writeback.h
10311 +++ b/include/linux/writeback.h
10312 @@ -47,6 +47,7 @@ struct writeback_control {
10313 unsigned encountered_congestion:1; /* An output: a queue is full */
10314 unsigned for_kupdate:1; /* A kupdate writeback */
10315 unsigned for_background:1; /* A background writeback */
10316 + unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
10317 unsigned for_reclaim:1; /* Invoked from the page allocator */
10318 unsigned range_cyclic:1; /* range_start is cyclic */
10319 unsigned more_io:1; /* more io to be dispatched */
10320 diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
10321 index 008711e..32f67c3 100644
10322 --- a/include/net/9p/9p.h
10323 +++ b/include/net/9p/9p.h
10324 @@ -278,6 +278,30 @@ enum p9_perm_t {
10325 P9_DMSETVTX = 0x00010000,
10326 };
10327
10328 +/* 9p2000.L open flags */
10329 +#define P9_DOTL_RDONLY 00000000
10330 +#define P9_DOTL_WRONLY 00000001
10331 +#define P9_DOTL_RDWR 00000002
10332 +#define P9_DOTL_NOACCESS 00000003
10333 +#define P9_DOTL_CREATE 00000100
10334 +#define P9_DOTL_EXCL 00000200
10335 +#define P9_DOTL_NOCTTY 00000400
10336 +#define P9_DOTL_TRUNC 00001000
10337 +#define P9_DOTL_APPEND 00002000
10338 +#define P9_DOTL_NONBLOCK 00004000
10339 +#define P9_DOTL_DSYNC 00010000
10340 +#define P9_DOTL_FASYNC 00020000
10341 +#define P9_DOTL_DIRECT 00040000
10342 +#define P9_DOTL_LARGEFILE 00100000
10343 +#define P9_DOTL_DIRECTORY 00200000
10344 +#define P9_DOTL_NOFOLLOW 00400000
10345 +#define P9_DOTL_NOATIME 01000000
10346 +#define P9_DOTL_CLOEXEC 02000000
10347 +#define P9_DOTL_SYNC 04000000
10348 +
10349 +/* 9p2000.L at flags */
10350 +#define P9_DOTL_AT_REMOVEDIR 0x200
10351 +
10352 /**
10353 * enum p9_qid_t - QID types
10354 * @P9_QTDIR: directory
10355 @@ -320,6 +344,11 @@ enum p9_qid_t {
10356 /* Room for readdir header */
10357 #define P9_READDIRHDRSZ 24
10358
10359 +/* 9p2000.L lock type */
10360 +#define P9_LOCK_TYPE_RDLCK 0
10361 +#define P9_LOCK_TYPE_WRLCK 1
10362 +#define P9_LOCK_TYPE_UNLCK 2
10363 +
10364 /**
10365 * struct p9_str - length prefixed string type
10366 * @len: length of the string
10367 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
10368 index 14fb6d6..ed049ea 100644
10369 --- a/ipc/mqueue.c
10370 +++ b/ipc/mqueue.c
10371 @@ -113,72 +113,75 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
10372 {
10373 struct user_struct *u = current_user();
10374 struct inode *inode;
10375 + int ret = -ENOMEM;
10376
10377 inode = new_inode(sb);
10378 - if (inode) {
10379 - inode->i_ino = get_next_ino();
10380 - inode->i_mode = mode;
10381 - inode->i_uid = current_fsuid();
10382 - inode->i_gid = current_fsgid();
10383 - inode->i_mtime = inode->i_ctime = inode->i_atime =
10384 - CURRENT_TIME;
10385 + if (!inode)
10386 + goto err;
10387
10388 - if (S_ISREG(mode)) {
10389 - struct mqueue_inode_info *info;
10390 - struct task_struct *p = current;
10391 - unsigned long mq_bytes, mq_msg_tblsz;
10392 + inode->i_ino = get_next_ino();
10393 + inode->i_mode = mode;
10394 + inode->i_uid = current_fsuid();
10395 + inode->i_gid = current_fsgid();
10396 + inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
10397
10398 - inode->i_fop = &mqueue_file_operations;
10399 - inode->i_size = FILENT_SIZE;
10400 - /* mqueue specific info */
10401 - info = MQUEUE_I(inode);
10402 - spin_lock_init(&info->lock);
10403 - init_waitqueue_head(&info->wait_q);
10404 - INIT_LIST_HEAD(&info->e_wait_q[0].list);
10405 - INIT_LIST_HEAD(&info->e_wait_q[1].list);
10406 - info->notify_owner = NULL;
10407 - info->qsize = 0;
10408 - info->user = NULL; /* set when all is ok */
10409 - memset(&info->attr, 0, sizeof(info->attr));
10410 - info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
10411 - info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
10412 - if (attr) {
10413 - info->attr.mq_maxmsg = attr->mq_maxmsg;
10414 - info->attr.mq_msgsize = attr->mq_msgsize;
10415 - }
10416 - mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
10417 - info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
10418 - if (!info->messages)
10419 - goto out_inode;
10420 + if (S_ISREG(mode)) {
10421 + struct mqueue_inode_info *info;
10422 + struct task_struct *p = current;
10423 + unsigned long mq_bytes, mq_msg_tblsz;
10424
10425 - mq_bytes = (mq_msg_tblsz +
10426 - (info->attr.mq_maxmsg * info->attr.mq_msgsize));
10427 + inode->i_fop = &mqueue_file_operations;
10428 + inode->i_size = FILENT_SIZE;
10429 + /* mqueue specific info */
10430 + info = MQUEUE_I(inode);
10431 + spin_lock_init(&info->lock);
10432 + init_waitqueue_head(&info->wait_q);
10433 + INIT_LIST_HEAD(&info->e_wait_q[0].list);
10434 + INIT_LIST_HEAD(&info->e_wait_q[1].list);
10435 + info->notify_owner = NULL;
10436 + info->qsize = 0;
10437 + info->user = NULL; /* set when all is ok */
10438 + memset(&info->attr, 0, sizeof(info->attr));
10439 + info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
10440 + info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
10441 + if (attr) {
10442 + info->attr.mq_maxmsg = attr->mq_maxmsg;
10443 + info->attr.mq_msgsize = attr->mq_msgsize;
10444 + }
10445 + mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
10446 + info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
10447 + if (!info->messages)
10448 + goto out_inode;
10449
10450 - spin_lock(&mq_lock);
10451 - if (u->mq_bytes + mq_bytes < u->mq_bytes ||
10452 - u->mq_bytes + mq_bytes >
10453 - task_rlimit(p, RLIMIT_MSGQUEUE)) {
10454 - spin_unlock(&mq_lock);
10455 - /* mqueue_evict_inode() releases info->messages */
10456 - goto out_inode;
10457 - }
10458 - u->mq_bytes += mq_bytes;
10459 - spin_unlock(&mq_lock);
10460 + mq_bytes = (mq_msg_tblsz +
10461 + (info->attr.mq_maxmsg * info->attr.mq_msgsize));
10462
10463 - /* all is ok */
10464 - info->user = get_uid(u);
10465 - } else if (S_ISDIR(mode)) {
10466 - inc_nlink(inode);
10467 - /* Some things misbehave if size == 0 on a directory */
10468 - inode->i_size = 2 * DIRENT_SIZE;
10469 - inode->i_op = &mqueue_dir_inode_operations;
10470 - inode->i_fop = &simple_dir_operations;
10471 + spin_lock(&mq_lock);
10472 + if (u->mq_bytes + mq_bytes < u->mq_bytes ||
10473 + u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
10474 + spin_unlock(&mq_lock);
10475 + /* mqueue_evict_inode() releases info->messages */
10476 + ret = -EMFILE;
10477 + goto out_inode;
10478 }
10479 + u->mq_bytes += mq_bytes;
10480 + spin_unlock(&mq_lock);
10481 +
10482 + /* all is ok */
10483 + info->user = get_uid(u);
10484 + } else if (S_ISDIR(mode)) {
10485 + inc_nlink(inode);
10486 + /* Some things misbehave if size == 0 on a directory */
10487 + inode->i_size = 2 * DIRENT_SIZE;
10488 + inode->i_op = &mqueue_dir_inode_operations;
10489 + inode->i_fop = &simple_dir_operations;
10490 }
10491 +
10492 return inode;
10493 out_inode:
10494 iput(inode);
10495 - return NULL;
10496 +err:
10497 + return ERR_PTR(ret);
10498 }
10499
10500 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
10501 @@ -194,8 +197,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
10502
10503 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO,
10504 NULL);
10505 - if (!inode) {
10506 - error = -ENOMEM;
10507 + if (IS_ERR(inode)) {
10508 + error = PTR_ERR(inode);
10509 goto out;
10510 }
10511
10512 @@ -315,8 +318,8 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
10513 spin_unlock(&mq_lock);
10514
10515 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
10516 - if (!inode) {
10517 - error = -ENOMEM;
10518 + if (IS_ERR(inode)) {
10519 + error = PTR_ERR(inode);
10520 spin_lock(&mq_lock);
10521 ipc_ns->mq_queues_count--;
10522 goto out_unlock;
10523 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
10524 index d5a3009..dc5114b 100644
10525 --- a/kernel/irq/chip.c
10526 +++ b/kernel/irq/chip.c
10527 @@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
10528 desc->depth = 1;
10529 if (desc->irq_data.chip->irq_shutdown)
10530 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
10531 - if (desc->irq_data.chip->irq_disable)
10532 + else if (desc->irq_data.chip->irq_disable)
10533 desc->irq_data.chip->irq_disable(&desc->irq_data);
10534 else
10535 desc->irq_data.chip->irq_mask(&desc->irq_data);
10536 diff --git a/kernel/printk.c b/kernel/printk.c
10537 index 3518539..084982f 100644
10538 --- a/kernel/printk.c
10539 +++ b/kernel/printk.c
10540 @@ -1584,7 +1584,7 @@ static int __init printk_late_init(void)
10541 struct console *con;
10542
10543 for_each_console(con) {
10544 - if (con->flags & CON_BOOT) {
10545 + if (!keep_bootcon && con->flags & CON_BOOT) {
10546 printk(KERN_INFO "turn off boot console %s%d\n",
10547 con->name, con->index);
10548 unregister_console(con);
10549 diff --git a/kernel/sched.c b/kernel/sched.c
10550 index fde6ff9..8b37360 100644
10551 --- a/kernel/sched.c
10552 +++ b/kernel/sched.c
10553 @@ -4242,9 +4242,9 @@ pick_next_task(struct rq *rq)
10554 }
10555
10556 /*
10557 - * schedule() is the main scheduler function.
10558 + * __schedule() is the main scheduler function.
10559 */
10560 -asmlinkage void __sched schedule(void)
10561 +static void __sched __schedule(void)
10562 {
10563 struct task_struct *prev, *next;
10564 unsigned long *switch_count;
10565 @@ -4285,16 +4285,6 @@ need_resched:
10566 if (to_wakeup)
10567 try_to_wake_up_local(to_wakeup);
10568 }
10569 -
10570 - /*
10571 - * If we are going to sleep and we have plugged IO
10572 - * queued, make sure to submit it to avoid deadlocks.
10573 - */
10574 - if (blk_needs_flush_plug(prev)) {
10575 - raw_spin_unlock(&rq->lock);
10576 - blk_schedule_flush_plug(prev);
10577 - raw_spin_lock(&rq->lock);
10578 - }
10579 }
10580 switch_count = &prev->nvcsw;
10581 }
10582 @@ -4332,6 +4322,26 @@ need_resched:
10583 if (need_resched())
10584 goto need_resched;
10585 }
10586 +
10587 +static inline void sched_submit_work(struct task_struct *tsk)
10588 +{
10589 + if (!tsk->state)
10590 + return;
10591 + /*
10592 + * If we are going to sleep and we have plugged IO queued,
10593 + * make sure to submit it to avoid deadlocks.
10594 + */
10595 + if (blk_needs_flush_plug(tsk))
10596 + blk_schedule_flush_plug(tsk);
10597 +}
10598 +
10599 +asmlinkage void schedule(void)
10600 +{
10601 + struct task_struct *tsk = current;
10602 +
10603 + sched_submit_work(tsk);
10604 + __schedule();
10605 +}
10606 EXPORT_SYMBOL(schedule);
10607
10608 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
10609 @@ -4405,7 +4415,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
10610
10611 do {
10612 add_preempt_count_notrace(PREEMPT_ACTIVE);
10613 - schedule();
10614 + __schedule();
10615 sub_preempt_count_notrace(PREEMPT_ACTIVE);
10616
10617 /*
10618 @@ -4433,7 +4443,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
10619 do {
10620 add_preempt_count(PREEMPT_ACTIVE);
10621 local_irq_enable();
10622 - schedule();
10623 + __schedule();
10624 local_irq_disable();
10625 sub_preempt_count(PREEMPT_ACTIVE);
10626
10627 @@ -5558,7 +5568,7 @@ static inline int should_resched(void)
10628 static void __cond_resched(void)
10629 {
10630 add_preempt_count(PREEMPT_ACTIVE);
10631 - schedule();
10632 + __schedule();
10633 sub_preempt_count(PREEMPT_ACTIVE);
10634 }
10635
10636 @@ -7413,6 +7423,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
10637 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
10638 if (sd && (sd->flags & SD_OVERLAP))
10639 free_sched_groups(sd->groups, 0);
10640 + kfree(*per_cpu_ptr(sdd->sd, j));
10641 kfree(*per_cpu_ptr(sdd->sg, j));
10642 kfree(*per_cpu_ptr(sdd->sgp, j));
10643 }
10644 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
10645 index 59f369f..ea5e1a9 100644
10646 --- a/kernel/time/alarmtimer.c
10647 +++ b/kernel/time/alarmtimer.c
10648 @@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
10649 static void alarm_timer_get(struct k_itimer *timr,
10650 struct itimerspec *cur_setting)
10651 {
10652 + memset(cur_setting, 0, sizeof(struct itimerspec));
10653 +
10654 cur_setting->it_interval =
10655 ktime_to_timespec(timr->it.alarmtimer.period);
10656 cur_setting->it_value =
10657 @@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
10658 if (!rtcdev)
10659 return -ENOTSUPP;
10660
10661 - /* Save old values */
10662 - old_setting->it_interval =
10663 - ktime_to_timespec(timr->it.alarmtimer.period);
10664 - old_setting->it_value =
10665 - ktime_to_timespec(timr->it.alarmtimer.node.expires);
10666 + /*
10667 + * XXX HACK! Currently we can DOS a system if the interval
10668 + * period on alarmtimers is too small. Cap the interval here
10669 + * to 100us and solve this properly in a future patch! -jstultz
10670 + */
10671 + if ((new_setting->it_interval.tv_sec == 0) &&
10672 + (new_setting->it_interval.tv_nsec < 100000))
10673 + new_setting->it_interval.tv_nsec = 100000;
10674 +
10675 + if (old_setting)
10676 + alarm_timer_get(timr, old_setting);
10677
10678 /* If the timer was already set, cancel it */
10679 alarm_cancel(&timr->it.alarmtimer);
10680 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
10681 index 0400553..aec02b6 100644
10682 --- a/kernel/workqueue.c
10683 +++ b/kernel/workqueue.c
10684 @@ -3026,8 +3026,13 @@ reflush:
10685
10686 for_each_cwq_cpu(cpu, wq) {
10687 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
10688 + bool drained;
10689
10690 - if (!cwq->nr_active && list_empty(&cwq->delayed_works))
10691 + spin_lock_irq(&cwq->gcwq->lock);
10692 + drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
10693 + spin_unlock_irq(&cwq->gcwq->lock);
10694 +
10695 + if (drained)
10696 continue;
10697
10698 if (++flush_cnt == 10 ||
10699 diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
10700 index e51e255..a768e6d 100644
10701 --- a/lib/xz/xz_dec_bcj.c
10702 +++ b/lib/xz/xz_dec_bcj.c
10703 @@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
10704 * next filter in the chain. Apply the BCJ filter on the new data
10705 * in the output buffer. If everything cannot be filtered, copy it
10706 * to temp and rewind the output buffer position accordingly.
10707 + *
10708 + * This needs to be always run when temp.size == 0 to handle a special
10709 + * case where the output buffer is full and the next filter has no
10710 + * more output coming but hasn't returned XZ_STREAM_END yet.
10711 */
10712 - if (s->temp.size < b->out_size - b->out_pos) {
10713 + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
10714 out_start = b->out_pos;
10715 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
10716 b->out_pos += s->temp.size;
10717 @@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
10718 s->temp.size = b->out_pos - out_start;
10719 b->out_pos -= s->temp.size;
10720 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
10721 +
10722 + /*
10723 + * If there wasn't enough input to the next filter to fill
10724 + * the output buffer with unfiltered data, there's no point
10725 + * to try decoding more data to temp.
10726 + */
10727 + if (b->out_pos + s->temp.size < b->out_size)
10728 + return XZ_OK;
10729 }
10730
10731 /*
10732 - * If we have unfiltered data in temp, try to fill by decoding more
10733 - * data from the next filter. Apply the BCJ filter on temp. Then we
10734 - * hopefully can fill the actual output buffer by copying filtered
10735 - * data from temp. A mix of filtered and unfiltered data may be left
10736 - * in temp; it will be taken care on the next call to this function.
10737 + * We have unfiltered data in temp. If the output buffer isn't full
10738 + * yet, try to fill the temp buffer by decoding more data from the
10739 + * next filter. Apply the BCJ filter on temp. Then we hopefully can
10740 + * fill the actual output buffer by copying filtered data from temp.
10741 + * A mix of filtered and unfiltered data may be left in temp; it will
10742 + * be taken care on the next call to this function.
10743 */
10744 - if (s->temp.size > 0) {
10745 + if (b->out_pos < b->out_size) {
10746 /* Make b->out{,_pos,_size} temporarily point to s->temp. */
10747 s->out = b->out;
10748 s->out_pos = b->out_pos;
10749 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
10750 index 31f6988..955fe35 100644
10751 --- a/mm/page-writeback.c
10752 +++ b/mm/page-writeback.c
10753 @@ -892,12 +892,12 @@ int write_cache_pages(struct address_space *mapping,
10754 range_whole = 1;
10755 cycled = 1; /* ignore range_cyclic tests */
10756 }
10757 - if (wbc->sync_mode == WB_SYNC_ALL)
10758 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
10759 tag = PAGECACHE_TAG_TOWRITE;
10760 else
10761 tag = PAGECACHE_TAG_DIRTY;
10762 retry:
10763 - if (wbc->sync_mode == WB_SYNC_ALL)
10764 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
10765 tag_pages_for_writeback(mapping, index, end);
10766 done_index = index;
10767 while (!done && (index <= end)) {
10768 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
10769 index 4e8985a..0f50cdb 100644
10770 --- a/mm/page_alloc.c
10771 +++ b/mm/page_alloc.c
10772 @@ -1616,6 +1616,21 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
10773 set_bit(i, zlc->fullzones);
10774 }
10775
10776 +/*
10777 + * clear all zones full, called after direct reclaim makes progress so that
10778 + * a zone that was recently full is not skipped over for up to a second
10779 + */
10780 +static void zlc_clear_zones_full(struct zonelist *zonelist)
10781 +{
10782 + struct zonelist_cache *zlc; /* cached zonelist speedup info */
10783 +
10784 + zlc = zonelist->zlcache_ptr;
10785 + if (!zlc)
10786 + return;
10787 +
10788 + bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
10789 +}
10790 +
10791 #else /* CONFIG_NUMA */
10792
10793 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
10794 @@ -1632,6 +1647,10 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
10795 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
10796 {
10797 }
10798 +
10799 +static void zlc_clear_zones_full(struct zonelist *zonelist)
10800 +{
10801 +}
10802 #endif /* CONFIG_NUMA */
10803
10804 /*
10805 @@ -1664,7 +1683,7 @@ zonelist_scan:
10806 continue;
10807 if ((alloc_flags & ALLOC_CPUSET) &&
10808 !cpuset_zone_allowed_softwall(zone, gfp_mask))
10809 - goto try_next_zone;
10810 + continue;
10811
10812 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
10813 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
10814 @@ -1676,17 +1695,36 @@ zonelist_scan:
10815 classzone_idx, alloc_flags))
10816 goto try_this_zone;
10817
10818 + if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
10819 + /*
10820 + * we do zlc_setup if there are multiple nodes
10821 + * and before considering the first zone allowed
10822 + * by the cpuset.
10823 + */
10824 + allowednodes = zlc_setup(zonelist, alloc_flags);
10825 + zlc_active = 1;
10826 + did_zlc_setup = 1;
10827 + }
10828 +
10829 if (zone_reclaim_mode == 0)
10830 goto this_zone_full;
10831
10832 + /*
10833 + * As we may have just activated ZLC, check if the first
10834 + * eligible zone has failed zone_reclaim recently.
10835 + */
10836 + if (NUMA_BUILD && zlc_active &&
10837 + !zlc_zone_worth_trying(zonelist, z, allowednodes))
10838 + continue;
10839 +
10840 ret = zone_reclaim(zone, gfp_mask, order);
10841 switch (ret) {
10842 case ZONE_RECLAIM_NOSCAN:
10843 /* did not scan */
10844 - goto try_next_zone;
10845 + continue;
10846 case ZONE_RECLAIM_FULL:
10847 /* scanned but unreclaimable */
10848 - goto this_zone_full;
10849 + continue;
10850 default:
10851 /* did we reclaim enough */
10852 if (!zone_watermark_ok(zone, order, mark,
10853 @@ -1703,16 +1741,6 @@ try_this_zone:
10854 this_zone_full:
10855 if (NUMA_BUILD)
10856 zlc_mark_zone_full(zonelist, z);
10857 -try_next_zone:
10858 - if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
10859 - /*
10860 - * we do zlc_setup after the first zone is tried but only
10861 - * if there are multiple nodes make it worthwhile
10862 - */
10863 - allowednodes = zlc_setup(zonelist, alloc_flags);
10864 - zlc_active = 1;
10865 - did_zlc_setup = 1;
10866 - }
10867 }
10868
10869 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
10870 @@ -1954,6 +1982,10 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
10871 if (unlikely(!(*did_some_progress)))
10872 return NULL;
10873
10874 + /* After successful reclaim, reconsider all zones for allocation */
10875 + if (NUMA_BUILD)
10876 + zlc_clear_zones_full(zonelist);
10877 +
10878 retry:
10879 page = get_page_from_freelist(gfp_mask, nodemask, order,
10880 zonelist, high_zoneidx,
10881 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
10882 index d3d451b..45ece89 100644
10883 --- a/mm/vmalloc.c
10884 +++ b/mm/vmalloc.c
10885 @@ -2154,6 +2154,14 @@ struct vm_struct *alloc_vm_area(size_t size)
10886 return NULL;
10887 }
10888
10889 + /*
10890 + * If the allocated address space is passed to a hypercall
10891 + * before being used then we cannot rely on a page fault to
10892 + * trigger an update of the page tables. So sync all the page
10893 + * tables here.
10894 + */
10895 + vmalloc_sync_all();
10896 +
10897 return area;
10898 }
10899 EXPORT_SYMBOL_GPL(alloc_vm_area);
10900 diff --git a/mm/vmscan.c b/mm/vmscan.c
10901 index d036e59..6072d74 100644
10902 --- a/mm/vmscan.c
10903 +++ b/mm/vmscan.c
10904 @@ -1748,6 +1748,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
10905 enum lru_list l;
10906 int noswap = 0;
10907 int force_scan = 0;
10908 + unsigned long nr_force_scan[2];
10909
10910
10911 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
10912 @@ -1770,6 +1771,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
10913 fraction[0] = 0;
10914 fraction[1] = 1;
10915 denominator = 1;
10916 + nr_force_scan[0] = 0;
10917 + nr_force_scan[1] = SWAP_CLUSTER_MAX;
10918 goto out;
10919 }
10920
10921 @@ -1781,6 +1784,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
10922 fraction[0] = 1;
10923 fraction[1] = 0;
10924 denominator = 1;
10925 + nr_force_scan[0] = SWAP_CLUSTER_MAX;
10926 + nr_force_scan[1] = 0;
10927 goto out;
10928 }
10929 }
10930 @@ -1829,6 +1834,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
10931 fraction[0] = ap;
10932 fraction[1] = fp;
10933 denominator = ap + fp + 1;
10934 + if (force_scan) {
10935 + unsigned long scan = SWAP_CLUSTER_MAX;
10936 + nr_force_scan[0] = div64_u64(scan * ap, denominator);
10937 + nr_force_scan[1] = div64_u64(scan * fp, denominator);
10938 + }
10939 out:
10940 for_each_evictable_lru(l) {
10941 int file = is_file_lru(l);
10942 @@ -1849,12 +1859,8 @@ out:
10943 * memcg, priority drop can cause big latency. So, it's better
10944 * to scan small amount. See may_noscan above.
10945 */
10946 - if (!scan && force_scan) {
10947 - if (file)
10948 - scan = SWAP_CLUSTER_MAX;
10949 - else if (!noswap)
10950 - scan = SWAP_CLUSTER_MAX;
10951 - }
10952 + if (!scan && force_scan)
10953 + scan = nr_force_scan[file];
10954 nr[l] = scan;
10955 }
10956 }
10957 diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
10958 index fcc6846..27263fb 100644
10959 --- a/net/8021q/vlan_core.c
10960 +++ b/net/8021q/vlan_core.c
10961 @@ -171,6 +171,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
10962 if (unlikely(!skb))
10963 goto err_free;
10964
10965 + skb_reset_network_header(skb);
10966 + skb_reset_transport_header(skb);
10967 return skb;
10968
10969 err_free:
10970 diff --git a/net/9p/client.c b/net/9p/client.c
10971 index 9e3b0e6..5532710 100644
10972 --- a/net/9p/client.c
10973 +++ b/net/9p/client.c
10974 @@ -280,7 +280,8 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
10975 * buffer to read the data into */
10976 tag++;
10977
10978 - BUG_ON(tag >= c->max_tag);
10979 + if(tag >= c->max_tag)
10980 + return NULL;
10981
10982 row = tag / P9_ROW_MAXTAG;
10983 col = tag % P9_ROW_MAXTAG;
10984 @@ -821,8 +822,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
10985 if (err)
10986 goto destroy_fidpool;
10987
10988 - if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
10989 - clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
10990 + if (clnt->msize > clnt->trans_mod->maxsize)
10991 + clnt->msize = clnt->trans_mod->maxsize;
10992
10993 err = p9_client_version(clnt);
10994 if (err)
10995 @@ -1249,9 +1250,11 @@ int p9_client_clunk(struct p9_fid *fid)
10996 P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
10997
10998 p9_free_req(clnt, req);
10999 - p9_fid_destroy(fid);
11000 -
11001 error:
11002 + /*
11003 + * Fid is not valid even after a failed clunk
11004 + */
11005 + p9_fid_destroy(fid);
11006 return err;
11007 }
11008 EXPORT_SYMBOL(p9_client_clunk);
11009 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
11010 index 244e707..e317583 100644
11011 --- a/net/9p/trans_virtio.c
11012 +++ b/net/9p/trans_virtio.c
11013 @@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
11014 {
11015 int in, out, inp, outp;
11016 struct virtio_chan *chan = client->trans;
11017 - char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
11018 unsigned long flags;
11019 size_t pdata_off = 0;
11020 struct trans_rpage_info *rpinfo = NULL;
11021 @@ -346,7 +345,8 @@ req_retry_pinned:
11022 * Arrange in such a way that server places header in the
11023 * alloced memory and payload onto the user buffer.
11024 */
11025 - inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
11026 + inp = pack_sg_list(chan->sg, out,
11027 + VIRTQUEUE_NUM, req->rc->sdata, 11);
11028 /*
11029 * Running executables in the filesystem may result in
11030 * a read request with kernel buffer as opposed to user buffer.
11031 @@ -366,8 +366,8 @@ req_retry_pinned:
11032 }
11033 in += inp;
11034 } else {
11035 - in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
11036 - client->msize);
11037 + in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
11038 + req->rc->sdata, req->rc->capacity);
11039 }
11040
11041 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
11042 @@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
11043 .close = p9_virtio_close,
11044 .request = p9_virtio_request,
11045 .cancel = p9_virtio_cancel,
11046 - .maxsize = PAGE_SIZE*16,
11047 +
11048 + /*
11049 + * We leave one entry for input and one entry for response
11050 + * headers. We also skip one more entry to accomodate, address
11051 + * that are not at page boundary, that can result in an extra
11052 + * page in zero copy.
11053 + */
11054 + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
11055 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
11056 .def = 0,
11057 .owner = THIS_MODULE,
11058 diff --git a/net/atm/br2684.c b/net/atm/br2684.c
11059 index 52cfd0c..d07223c 100644
11060 --- a/net/atm/br2684.c
11061 +++ b/net/atm/br2684.c
11062 @@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
11063 spin_unlock_irqrestore(&rq->lock, flags);
11064
11065 skb_queue_walk_safe(&queue, skb, tmp) {
11066 - struct net_device *dev = skb->dev;
11067 + struct net_device *dev;
11068 +
11069 + br2684_push(atmvcc, skb);
11070 + dev = skb->dev;
11071
11072 dev->stats.rx_bytes -= skb->len;
11073 dev->stats.rx_packets--;
11074 -
11075 - br2684_push(atmvcc, skb);
11076 }
11077
11078 /* initialize netdev carrier state */
11079 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
11080 index 77930aa..01aa7e7 100644
11081 --- a/net/bluetooth/hci_event.c
11082 +++ b/net/bluetooth/hci_event.c
11083 @@ -56,8 +56,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
11084 if (status)
11085 return;
11086
11087 - if (test_bit(HCI_MGMT, &hdev->flags) &&
11088 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
11089 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
11090 + test_bit(HCI_MGMT, &hdev->flags))
11091 mgmt_discovering(hdev->id, 0);
11092
11093 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
11094 @@ -74,8 +74,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
11095 if (status)
11096 return;
11097
11098 - if (test_bit(HCI_MGMT, &hdev->flags) &&
11099 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
11100 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
11101 + test_bit(HCI_MGMT, &hdev->flags))
11102 mgmt_discovering(hdev->id, 0);
11103
11104 hci_conn_check_pending(hdev);
11105 @@ -851,9 +851,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
11106 return;
11107 }
11108
11109 - if (test_bit(HCI_MGMT, &hdev->flags) &&
11110 - !test_and_set_bit(HCI_INQUIRY,
11111 - &hdev->flags))
11112 + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
11113 + test_bit(HCI_MGMT, &hdev->flags))
11114 mgmt_discovering(hdev->id, 1);
11115 }
11116
11117 @@ -1225,8 +1224,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
11118
11119 BT_DBG("%s status %d", hdev->name, status);
11120
11121 - if (test_bit(HCI_MGMT, &hdev->flags) &&
11122 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
11123 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
11124 + test_bit(HCI_MGMT, &hdev->flags))
11125 mgmt_discovering(hdev->id, 0);
11126
11127 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
11128 diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
11129 index 1bacca4..6f156c1 100644
11130 --- a/net/bridge/br_if.c
11131 +++ b/net/bridge/br_if.c
11132 @@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
11133 int br_add_bridge(struct net *net, const char *name)
11134 {
11135 struct net_device *dev;
11136 + int res;
11137
11138 dev = alloc_netdev(sizeof(struct net_bridge), name,
11139 br_dev_setup);
11140 @@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name)
11141
11142 dev_net_set(dev, net);
11143
11144 - return register_netdev(dev);
11145 + res = register_netdev(dev);
11146 + if (res)
11147 + free_netdev(dev);
11148 + return res;
11149 }
11150
11151 int br_del_bridge(struct net *net, const char *name)
11152 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
11153 index 2d85ca7..995cbe0 100644
11154 --- a/net/bridge/br_multicast.c
11155 +++ b/net/bridge/br_multicast.c
11156 @@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
11157 {
11158 struct sk_buff *skb2;
11159 const struct ipv6hdr *ip6h;
11160 - struct icmp6hdr *icmp6h;
11161 + u8 icmp6_type;
11162 u8 nexthdr;
11163 unsigned len;
11164 int offset;
11165 @@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
11166 __skb_pull(skb2, offset);
11167 skb_reset_transport_header(skb2);
11168
11169 - icmp6h = icmp6_hdr(skb2);
11170 + icmp6_type = icmp6_hdr(skb2)->icmp6_type;
11171
11172 - switch (icmp6h->icmp6_type) {
11173 + switch (icmp6_type) {
11174 case ICMPV6_MGM_QUERY:
11175 case ICMPV6_MGM_REPORT:
11176 case ICMPV6_MGM_REDUCTION:
11177 @@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
11178 err = pskb_trim_rcsum(skb2, len);
11179 if (err)
11180 goto out;
11181 + err = -EINVAL;
11182 }
11183
11184 + ip6h = ipv6_hdr(skb2);
11185 +
11186 switch (skb2->ip_summed) {
11187 case CHECKSUM_COMPLETE:
11188 - if (!csum_fold(skb2->csum))
11189 + if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
11190 + IPPROTO_ICMPV6, skb2->csum))
11191 break;
11192 /*FALLTHROUGH*/
11193 case CHECKSUM_NONE:
11194 - skb2->csum = 0;
11195 - if (skb_checksum_complete(skb2))
11196 + skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
11197 + &ip6h->daddr,
11198 + skb2->len,
11199 + IPPROTO_ICMPV6, 0));
11200 + if (__skb_checksum_complete(skb2))
11201 goto out;
11202 }
11203
11204 @@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
11205
11206 BR_INPUT_SKB_CB(skb)->igmp = 1;
11207
11208 - switch (icmp6h->icmp6_type) {
11209 + switch (icmp6_type) {
11210 case ICMPV6_MGM_REPORT:
11211 {
11212 struct mld_msg *mld;
11213 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
11214 index 008dc70..f39ef5c 100644
11215 --- a/net/core/fib_rules.c
11216 +++ b/net/core/fib_rules.c
11217 @@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
11218 */
11219 list_for_each_entry(r, &ops->rules_list, list) {
11220 if (r->action == FR_ACT_GOTO &&
11221 - r->target == rule->pref) {
11222 - BUG_ON(rtnl_dereference(r->ctarget) != NULL);
11223 + r->target == rule->pref &&
11224 + rtnl_dereference(r->ctarget) == NULL) {
11225 rcu_assign_pointer(r->ctarget, rule);
11226 if (--ops->unresolved_rules == 0)
11227 break;
11228 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
11229 index 799f06e..16db887 100644
11230 --- a/net/core/neighbour.c
11231 +++ b/net/core/neighbour.c
11232 @@ -1383,11 +1383,15 @@ static void neigh_proxy_process(unsigned long arg)
11233
11234 if (tdif <= 0) {
11235 struct net_device *dev = skb->dev;
11236 +
11237 __skb_unlink(skb, &tbl->proxy_queue);
11238 - if (tbl->proxy_redo && netif_running(dev))
11239 + if (tbl->proxy_redo && netif_running(dev)) {
11240 + rcu_read_lock();
11241 tbl->proxy_redo(skb);
11242 - else
11243 + rcu_read_unlock();
11244 + } else {
11245 kfree_skb(skb);
11246 + }
11247
11248 dev_put(dev);
11249 } else if (!sched_next || tdif < sched_next)
11250 diff --git a/net/core/scm.c b/net/core/scm.c
11251 index 4c1ef02..811b53f 100644
11252 --- a/net/core/scm.c
11253 +++ b/net/core/scm.c
11254 @@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
11255 goto error;
11256
11257 cred->uid = cred->euid = p->creds.uid;
11258 - cred->gid = cred->egid = p->creds.uid;
11259 + cred->gid = cred->egid = p->creds.gid;
11260 put_cred(p->cred);
11261 p->cred = cred;
11262 }
11263 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
11264 index 283c0a2..d577199 100644
11265 --- a/net/ipv4/igmp.c
11266 +++ b/net/ipv4/igmp.c
11267 @@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
11268 break;
11269 for (i=0; i<nsrcs; i++) {
11270 /* skip inactive filters */
11271 - if (pmc->sfcount[MCAST_INCLUDE] ||
11272 + if (psf->sf_count[MCAST_INCLUDE] ||
11273 pmc->sfcount[MCAST_EXCLUDE] !=
11274 psf->sf_count[MCAST_EXCLUDE])
11275 continue;
11276 diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
11277 index 2e97e3e..929b27b 100644
11278 --- a/net/ipv4/netfilter.c
11279 +++ b/net/ipv4/netfilter.c
11280 @@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
11281 struct rtable *rt;
11282 struct flowi4 fl4 = {};
11283 __be32 saddr = iph->saddr;
11284 - __u8 flags = 0;
11285 + __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
11286 unsigned int hh_len;
11287
11288 - if (!skb->sk && addr_type != RTN_LOCAL) {
11289 - if (addr_type == RTN_UNSPEC)
11290 - addr_type = inet_addr_type(net, saddr);
11291 - if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
11292 - flags |= FLOWI_FLAG_ANYSRC;
11293 - else
11294 - saddr = 0;
11295 - }
11296 + if (addr_type == RTN_UNSPEC)
11297 + addr_type = inet_addr_type(net, saddr);
11298 + if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
11299 + flags |= FLOWI_FLAG_ANYSRC;
11300 + else
11301 + saddr = 0;
11302
11303 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
11304 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
11305 @@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
11306 fl4.flowi4_tos = RT_TOS(iph->tos);
11307 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
11308 fl4.flowi4_mark = skb->mark;
11309 - fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
11310 + fl4.flowi4_flags = flags;
11311 rt = ip_route_output_key(net, &fl4);
11312 if (IS_ERR(rt))
11313 return -1;
11314 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11315 index cdabdbf..75ef66f 100644
11316 --- a/net/ipv4/route.c
11317 +++ b/net/ipv4/route.c
11318 @@ -717,7 +717,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1,
11319 {
11320 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
11321 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
11322 - (rt1->rt_iif ^ rt2->rt_iif)) == 0);
11323 + (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
11324 }
11325
11326 static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
11327 @@ -727,8 +727,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
11328 (rt1->rt_mark ^ rt2->rt_mark) |
11329 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
11330 (rt1->rt_route_iif ^ rt2->rt_route_iif) |
11331 - (rt1->rt_oif ^ rt2->rt_oif) |
11332 - (rt1->rt_iif ^ rt2->rt_iif)) == 0;
11333 + (rt1->rt_oif ^ rt2->rt_oif)) == 0;
11334 }
11335
11336 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
11337 @@ -2282,9 +2281,8 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
11338 rth = rcu_dereference(rth->dst.rt_next)) {
11339 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
11340 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
11341 - (rth->rt_iif ^ iif) |
11342 + (rth->rt_route_iif ^ iif) |
11343 (rth->rt_key_tos ^ tos)) == 0 &&
11344 - rt_is_input_route(rth) &&
11345 rth->rt_mark == skb->mark &&
11346 net_eq(dev_net(rth->dst.dev), net) &&
11347 !rt_is_expired(rth)) {
11348 diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
11349 index 2646149..4382629 100644
11350 --- a/net/ipv4/syncookies.c
11351 +++ b/net/ipv4/syncookies.c
11352 @@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
11353 int mss;
11354 struct rtable *rt;
11355 __u8 rcv_wscale;
11356 - bool ecn_ok;
11357 + bool ecn_ok = false;
11358
11359 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
11360 goto out;
11361 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
11362 index bef9f04..b6771f9 100644
11363 --- a/net/ipv4/tcp_input.c
11364 +++ b/net/ipv4/tcp_input.c
11365 @@ -1115,7 +1115,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
11366 return 0;
11367
11368 /* ...Then it's D-SACK, and must reside below snd_una completely */
11369 - if (!after(end_seq, tp->snd_una))
11370 + if (after(end_seq, tp->snd_una))
11371 return 0;
11372
11373 if (!before(start_seq, tp->undo_marker))
11374 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
11375 index 9cb191e..147ede38 100644
11376 --- a/net/ipv6/ipv6_sockglue.c
11377 +++ b/net/ipv6/ipv6_sockglue.c
11378 @@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
11379 }
11380
11381 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
11382 - char __user *optval, int __user *optlen)
11383 + char __user *optval, int __user *optlen, unsigned flags)
11384 {
11385 struct ipv6_pinfo *np = inet6_sk(sk);
11386 int len;
11387 @@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
11388
11389 msg.msg_control = optval;
11390 msg.msg_controllen = len;
11391 - msg.msg_flags = 0;
11392 + msg.msg_flags = flags;
11393
11394 lock_sock(sk);
11395 skb = np->pktoptions;
11396 @@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
11397 if(level != SOL_IPV6)
11398 return -ENOPROTOOPT;
11399
11400 - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen);
11401 + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
11402 #ifdef CONFIG_NETFILTER
11403 /* we need to exclude all possible ENOPROTOOPTs except default case */
11404 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
11405 @@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
11406 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
11407 ipv6_getsockopt);
11408
11409 - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen);
11410 + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
11411 + MSG_CMSG_COMPAT);
11412 #ifdef CONFIG_NETFILTER
11413 /* we need to exclude all possible ENOPROTOOPTs except default case */
11414 if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
11415 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
11416 index 3e6ebcd..ee7839f 100644
11417 --- a/net/ipv6/mcast.c
11418 +++ b/net/ipv6/mcast.c
11419 @@ -1059,7 +1059,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
11420 break;
11421 for (i=0; i<nsrcs; i++) {
11422 /* skip inactive filters */
11423 - if (pmc->mca_sfcount[MCAST_INCLUDE] ||
11424 + if (psf->sf_count[MCAST_INCLUDE] ||
11425 pmc->mca_sfcount[MCAST_EXCLUDE] !=
11426 psf->sf_count[MCAST_EXCLUDE])
11427 continue;
11428 diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
11429 index 8b9644a..14b8339 100644
11430 --- a/net/ipv6/syncookies.c
11431 +++ b/net/ipv6/syncookies.c
11432 @@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
11433 int mss;
11434 struct dst_entry *dst;
11435 __u8 rcv_wscale;
11436 - bool ecn_ok;
11437 + bool ecn_ok = false;
11438
11439 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
11440 goto out;
11441 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
11442 index b83870b..ca7bf10 100644
11443 --- a/net/mac80211/sta_info.c
11444 +++ b/net/mac80211/sta_info.c
11445 @@ -669,7 +669,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
11446 BUG_ON(!sdata->bss);
11447
11448 atomic_dec(&sdata->bss->num_sta_ps);
11449 - __sta_info_clear_tim_bit(sdata->bss, sta);
11450 + sta_info_clear_tim_bit(sta);
11451 }
11452
11453 local->num_sta--;
11454 diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
11455 index 2a318f2..b5d56a2 100644
11456 --- a/net/sched/sch_prio.c
11457 +++ b/net/sched/sch_prio.c
11458 @@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
11459
11460 for (prio = 0; prio < q->bands; prio++) {
11461 struct Qdisc *qdisc = q->queues[prio];
11462 - struct sk_buff *skb = qdisc->dequeue(qdisc);
11463 + struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
11464 if (skb) {
11465 qdisc_bstats_update(sch, skb);
11466 sch->q.qlen--;
11467 diff --git a/net/socket.c b/net/socket.c
11468 index ed46dbb..1ad42d3 100644
11469 --- a/net/socket.c
11470 +++ b/net/socket.c
11471 @@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
11472 * used_address->name_len is initialized to UINT_MAX so that the first
11473 * destination address never matches.
11474 */
11475 - if (used_address && used_address->name_len == msg_sys->msg_namelen &&
11476 - !memcmp(&used_address->name, msg->msg_name,
11477 + if (used_address && msg_sys->msg_name &&
11478 + used_address->name_len == msg_sys->msg_namelen &&
11479 + !memcmp(&used_address->name, msg_sys->msg_name,
11480 used_address->name_len)) {
11481 err = sock_sendmsg_nosec(sock, msg_sys, total_len);
11482 goto out_freectl;
11483 @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
11484 */
11485 if (used_address && err >= 0) {
11486 used_address->name_len = msg_sys->msg_namelen;
11487 - memcpy(&used_address->name, msg->msg_name,
11488 - used_address->name_len);
11489 + if (msg_sys->msg_name)
11490 + memcpy(&used_address->name, msg_sys->msg_name,
11491 + used_address->name_len);
11492 }
11493
11494 out_freectl:
11495 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
11496 index cea3381..1ac9443 100644
11497 --- a/net/wireless/nl80211.c
11498 +++ b/net/wireless/nl80211.c
11499 @@ -4044,9 +4044,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
11500 if (len % sizeof(u32))
11501 return -EINVAL;
11502
11503 + if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES)
11504 + return -EINVAL;
11505 +
11506 memcpy(settings->akm_suites, data, len);
11507
11508 - for (i = 0; i < settings->n_ciphers_pairwise; i++)
11509 + for (i = 0; i < settings->n_akm_suites; i++)
11510 if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
11511 return -EINVAL;
11512 }
11513 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
11514 index 4453eb7..379574c 100644
11515 --- a/net/wireless/reg.c
11516 +++ b/net/wireless/reg.c
11517 @@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy,
11518 return;
11519 }
11520
11521 + chan->beacon_found = false;
11522 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
11523 chan->max_antenna_gain = min(chan->orig_mag,
11524 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
11525 diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
11526 index a026b0e..54a0dc2 100644
11527 --- a/net/xfrm/xfrm_input.c
11528 +++ b/net/xfrm/xfrm_input.c
11529 @@ -212,6 +212,11 @@ resume:
11530 /* only the first xfrm gets the encap type */
11531 encap_type = 0;
11532
11533 + if (async && x->repl->check(x, skb, seq)) {
11534 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
11535 + goto drop_unlock;
11536 + }
11537 +
11538 x->repl->advance(x, seq);
11539
11540 x->curlft.bytes += skb->len;
11541 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
11542 index f134130..3388442 100644
11543 --- a/sound/core/pcm_lib.c
11544 +++ b/sound/core/pcm_lib.c
11545 @@ -1758,6 +1758,10 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
11546 snd_pcm_uframes_t avail = 0;
11547 long wait_time, tout;
11548
11549 + init_waitqueue_entry(&wait, current);
11550 + set_current_state(TASK_INTERRUPTIBLE);
11551 + add_wait_queue(&runtime->tsleep, &wait);
11552 +
11553 if (runtime->no_period_wakeup)
11554 wait_time = MAX_SCHEDULE_TIMEOUT;
11555 else {
11556 @@ -1768,16 +1772,32 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
11557 }
11558 wait_time = msecs_to_jiffies(wait_time * 1000);
11559 }
11560 - init_waitqueue_entry(&wait, current);
11561 - add_wait_queue(&runtime->tsleep, &wait);
11562 +
11563 for (;;) {
11564 if (signal_pending(current)) {
11565 err = -ERESTARTSYS;
11566 break;
11567 }
11568 +
11569 + /*
11570 + * We need to check if space became available already
11571 + * (and thus the wakeup happened already) first to close
11572 + * the race of space already having become available.
11573 + * This check must happen after been added to the waitqueue
11574 + * and having current state be INTERRUPTIBLE.
11575 + */
11576 + if (is_playback)
11577 + avail = snd_pcm_playback_avail(runtime);
11578 + else
11579 + avail = snd_pcm_capture_avail(runtime);
11580 + if (avail >= runtime->twake)
11581 + break;
11582 snd_pcm_stream_unlock_irq(substream);
11583 - tout = schedule_timeout_interruptible(wait_time);
11584 +
11585 + tout = schedule_timeout(wait_time);
11586 +
11587 snd_pcm_stream_lock_irq(substream);
11588 + set_current_state(TASK_INTERRUPTIBLE);
11589 switch (runtime->status->state) {
11590 case SNDRV_PCM_STATE_SUSPENDED:
11591 err = -ESTRPIPE;
11592 @@ -1803,14 +1823,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
11593 err = -EIO;
11594 break;
11595 }
11596 - if (is_playback)
11597 - avail = snd_pcm_playback_avail(runtime);
11598 - else
11599 - avail = snd_pcm_capture_avail(runtime);
11600 - if (avail >= runtime->twake)
11601 - break;
11602 }
11603 _endloop:
11604 + set_current_state(TASK_RUNNING);
11605 remove_wait_queue(&runtime->tsleep, &wait);
11606 *availp = avail;
11607 return err;
11608 diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
11609 index a7ec703..ecce948 100644
11610 --- a/sound/pci/fm801.c
11611 +++ b/sound/pci/fm801.c
11612 @@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard.");
11613 module_param_array(tea575x_tuner, int, NULL, 0444);
11614 MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only).");
11615
11616 +#define TUNER_DISABLED (1<<3)
11617 #define TUNER_ONLY (1<<4)
11618 #define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF)
11619
11620 @@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip)
11621
11622 __end_hw:
11623 #ifdef CONFIG_SND_FM801_TEA575X_BOOL
11624 - snd_tea575x_exit(&chip->tea);
11625 + if (!(chip->tea575x_tuner & TUNER_DISABLED))
11626 + snd_tea575x_exit(&chip->tea);
11627 #endif
11628 if (chip->irq >= 0)
11629 free_irq(chip->irq, chip);
11630 @@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card,
11631 (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
11632 if (snd_tea575x_init(&chip->tea)) {
11633 snd_printk(KERN_ERR "TEA575x radio not found\n");
11634 - snd_fm801_free(chip);
11635 return -ENODEV;
11636 }
11637 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
11638 @@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card,
11639 }
11640 if (tea575x_tuner == 4) {
11641 snd_printk(KERN_ERR "TEA575x radio not found\n");
11642 - snd_fm801_free(chip);
11643 - return -ENODEV;
11644 + chip->tea575x_tuner = TUNER_DISABLED;
11645 }
11646 }
11647 - strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
11648 + if (!(chip->tea575x_tuner & TUNER_DISABLED)) {
11649 + strlcpy(chip->tea.card,
11650 + snd_fm801_tea575x_gpios[(tea575x_tuner &
11651 + TUNER_TYPE_MASK) - 1].name,
11652 + sizeof(chip->tea.card));
11653 + }
11654 #endif
11655
11656 *rchip = chip;
11657 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
11658 index 26a1521..fb6fbe4 100644
11659 --- a/sound/pci/hda/patch_cirrus.c
11660 +++ b/sound/pci/hda/patch_cirrus.c
11661 @@ -508,7 +508,7 @@ static int add_volume(struct hda_codec *codec, const char *name,
11662 int index, unsigned int pval, int dir,
11663 struct snd_kcontrol **kctlp)
11664 {
11665 - char tmp[32];
11666 + char tmp[44];
11667 struct snd_kcontrol_new knew =
11668 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
11669 knew.private_value = pval;
11670 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11671 index 524ff26..4c7cd6b 100644
11672 --- a/sound/pci/hda/patch_realtek.c
11673 +++ b/sound/pci/hda/patch_realtek.c
11674 @@ -397,7 +397,7 @@ struct alc_spec {
11675 unsigned int auto_mic:1;
11676 unsigned int automute:1; /* HP automute enabled */
11677 unsigned int detect_line:1; /* Line-out detection enabled */
11678 - unsigned int automute_lines:1; /* automute line-out as well */
11679 + unsigned int automute_lines:1; /* automute line-out as well; NOP when automute_hp_lo isn't set */
11680 unsigned int automute_hp_lo:1; /* both HP and LO available */
11681
11682 /* other flags */
11683 @@ -1161,7 +1161,7 @@ static void update_speakers(struct hda_codec *codec)
11684 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] ||
11685 spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0])
11686 return;
11687 - if (!spec->automute_lines || !spec->automute)
11688 + if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines))
11689 on = 0;
11690 else
11691 on = spec->jack_present;
11692 @@ -1494,7 +1494,7 @@ static int alc_automute_mode_get(struct snd_kcontrol *kcontrol,
11693 unsigned int val;
11694 if (!spec->automute)
11695 val = 0;
11696 - else if (!spec->automute_lines)
11697 + else if (!spec->automute_hp_lo || !spec->automute_lines)
11698 val = 1;
11699 else
11700 val = 2;
11701 @@ -1515,7 +1515,8 @@ static int alc_automute_mode_put(struct snd_kcontrol *kcontrol,
11702 spec->automute = 0;
11703 break;
11704 case 1:
11705 - if (spec->automute && !spec->automute_lines)
11706 + if (spec->automute &&
11707 + (!spec->automute_hp_lo || !spec->automute_lines))
11708 return 0;
11709 spec->automute = 1;
11710 spec->automute_lines = 0;
11711 @@ -1858,7 +1859,9 @@ do_sku:
11712 * 15 : 1 --> enable the function "Mute internal speaker
11713 * when the external headphone out jack is plugged"
11714 */
11715 - if (!spec->autocfg.hp_pins[0]) {
11716 + if (!spec->autocfg.hp_pins[0] &&
11717 + !(spec->autocfg.line_out_pins[0] &&
11718 + spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
11719 hda_nid_t nid;
11720 tmp = (ass >> 11) & 0x3; /* HP to chassis */
11721 if (tmp == 0)
11722 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
11723 index 7f81cc2..5c42f3e 100644
11724 --- a/sound/pci/hda/patch_sigmatel.c
11725 +++ b/sound/pci/hda/patch_sigmatel.c
11726 @@ -5470,6 +5470,7 @@ again:
11727 switch (codec->vendor_id) {
11728 case 0x111d76d1:
11729 case 0x111d76d9:
11730 + case 0x111d76df:
11731 case 0x111d76e5:
11732 case 0x111d7666:
11733 case 0x111d7667:
11734 @@ -6399,6 +6400,7 @@ static const struct hda_codec_preset snd_hda_preset_sigmatel[] = {
11735 { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
11736 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
11737 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
11738 + { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx},
11739 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
11740 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
11741 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
11742 diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
11743 index d6651c0..2f0f836 100644
11744 --- a/sound/soc/blackfin/bf5xx-ad193x.c
11745 +++ b/sound/soc/blackfin/bf5xx-ad193x.c
11746 @@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
11747 .cpu_dai_name = "bfin-tdm.0",
11748 .codec_dai_name ="ad193x-hifi",
11749 .platform_name = "bfin-tdm-pcm-audio",
11750 - .codec_name = "ad193x.5",
11751 + .codec_name = "spi0.5",
11752 .ops = &bf5xx_ad193x_ops,
11753 },
11754 {
11755 @@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
11756 .cpu_dai_name = "bfin-tdm.1",
11757 .codec_dai_name ="ad193x-hifi",
11758 .platform_name = "bfin-tdm-pcm-audio",
11759 - .codec_name = "ad193x.5",
11760 + .codec_name = "spi0.5",
11761 .ops = &bf5xx_ad193x_ops,
11762 },
11763 };
11764 diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
11765 index 2374ca5..f1a8be5 100644
11766 --- a/sound/soc/codecs/ad193x.c
11767 +++ b/sound/soc/codecs/ad193x.c
11768 @@ -307,7 +307,8 @@ static int ad193x_hw_params(struct snd_pcm_substream *substream,
11769 snd_soc_write(codec, AD193X_PLL_CLK_CTRL0, reg);
11770
11771 reg = snd_soc_read(codec, AD193X_DAC_CTRL2);
11772 - reg = (reg & (~AD193X_DAC_WORD_LEN_MASK)) | word_len;
11773 + reg = (reg & (~AD193X_DAC_WORD_LEN_MASK))
11774 + | (word_len << AD193X_DAC_WORD_LEN_SHFT);
11775 snd_soc_write(codec, AD193X_DAC_CTRL2, reg);
11776
11777 reg = snd_soc_read(codec, AD193X_ADC_CTRL1);
11778 diff --git a/sound/soc/codecs/ad193x.h b/sound/soc/codecs/ad193x.h
11779 index 9747b54..cccc2e8 100644
11780 --- a/sound/soc/codecs/ad193x.h
11781 +++ b/sound/soc/codecs/ad193x.h
11782 @@ -34,7 +34,8 @@
11783 #define AD193X_DAC_LEFT_HIGH (1 << 3)
11784 #define AD193X_DAC_BCLK_INV (1 << 7)
11785 #define AD193X_DAC_CTRL2 0x804
11786 -#define AD193X_DAC_WORD_LEN_MASK 0xC
11787 +#define AD193X_DAC_WORD_LEN_SHFT 3
11788 +#define AD193X_DAC_WORD_LEN_MASK 0x18
11789 #define AD193X_DAC_MASTER_MUTE 1
11790 #define AD193X_DAC_CHNL_MUTE 0x805
11791 #define AD193X_DACL1_MUTE 0
11792 @@ -63,7 +64,7 @@
11793 #define AD193X_ADC_CTRL1 0x80f
11794 #define AD193X_ADC_SERFMT_MASK 0x60
11795 #define AD193X_ADC_SERFMT_STEREO (0 << 5)
11796 -#define AD193X_ADC_SERFMT_TDM (1 << 2)
11797 +#define AD193X_ADC_SERFMT_TDM (1 << 5)
11798 #define AD193X_ADC_SERFMT_AUX (2 << 5)
11799 #define AD193X_ADC_WORD_LEN_MASK 0x3
11800 #define AD193X_ADC_CTRL2 0x810
11801 diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
11802 index 84f4ad5..9801cd7 100644
11803 --- a/sound/soc/codecs/ssm2602.c
11804 +++ b/sound/soc/codecs/ssm2602.c
11805 @@ -431,7 +431,8 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai,
11806 static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
11807 enum snd_soc_bias_level level)
11808 {
11809 - u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f;
11810 + u16 reg = snd_soc_read(codec, SSM2602_PWR);
11811 + reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN);
11812
11813 switch (level) {
11814 case SND_SOC_BIAS_ON:
11815 diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
11816 index fff695c..cbaf8b7 100644
11817 --- a/sound/soc/fsl/mpc5200_dma.c
11818 +++ b/sound/soc/fsl/mpc5200_dma.c
11819 @@ -368,7 +368,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = {
11820 .pcm_free = &psc_dma_free,
11821 };
11822
11823 -static int mpc5200_hpcd_probe(struct of_device *op)
11824 +static int mpc5200_hpcd_probe(struct platform_device *op)
11825 {
11826 phys_addr_t fifo;
11827 struct psc_dma *psc_dma;
11828 @@ -486,7 +486,7 @@ out_unmap:
11829 return ret;
11830 }
11831
11832 -static int mpc5200_hpcd_remove(struct of_device *op)
11833 +static int mpc5200_hpcd_remove(struct platform_device *op)
11834 {
11835 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev);
11836
11837 @@ -518,7 +518,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match);
11838 static struct platform_driver mpc5200_hpcd_of_driver = {
11839 .probe = mpc5200_hpcd_probe,
11840 .remove = mpc5200_hpcd_remove,
11841 - .dev = {
11842 + .driver = {
11843 .owner = THIS_MODULE,
11844 .name = "mpc5200-pcm-audio",
11845 .of_match_table = mpc5200_hpcd_match,
11846 diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
11847 index 07b7723..4b82290 100644
11848 --- a/sound/soc/omap/omap-mcbsp.c
11849 +++ b/sound/soc/omap/omap-mcbsp.c
11850 @@ -516,6 +516,12 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
11851 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
11852 int err = 0;
11853
11854 + if (mcbsp_data->active)
11855 + if (freq == mcbsp_data->in_freq)
11856 + return 0;
11857 + else
11858 + return -EBUSY;
11859 +
11860 /* The McBSP signal muxing functions are only available on McBSP1 */
11861 if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR ||
11862 clk_id == OMAP_MCBSP_CLKR_SRC_CLKX ||
11863 diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
11864 index 7c17b98..fa31d9c 100644
11865 --- a/sound/soc/soc-jack.c
11866 +++ b/sound/soc/soc-jack.c
11867 @@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
11868
11869 snd_soc_dapm_sync(dapm);
11870
11871 - snd_jack_report(jack->jack, status);
11872 + snd_jack_report(jack->jack, jack->status);
11873
11874 out:
11875 mutex_unlock(&codec->mutex);
11876 @@ -327,7 +327,7 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
11877 IRQF_TRIGGER_FALLING,
11878 gpios[i].name,
11879 &gpios[i]);
11880 - if (ret)
11881 + if (ret < 0)
11882 goto err;
11883
11884 if (gpios[i].wake) {
11885 diff --git a/sound/usb/card.c b/sound/usb/card.c
11886 index 220c616..57a8e2d 100644
11887 --- a/sound/usb/card.c
11888 +++ b/sound/usb/card.c
11889 @@ -529,8 +529,11 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
11890 return chip;
11891
11892 __error:
11893 - if (chip && !chip->num_interfaces)
11894 - snd_card_free(chip->card);
11895 + if (chip) {
11896 + if (!chip->num_interfaces)
11897 + snd_card_free(chip->card);
11898 + chip->probing = 0;
11899 + }
11900 mutex_unlock(&register_mutex);
11901 __err_val:
11902 return NULL;
11903 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
11904 index eec1963..40fd1c7 100644
11905 --- a/tools/perf/util/symbol.c
11906 +++ b/tools/perf/util/symbol.c
11907 @@ -1111,6 +1111,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
11908 }
11909
11910 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
11911 + if (opdshdr.sh_type != SHT_PROGBITS)
11912 + opdsec = NULL;
11913 if (opdsec)
11914 opddata = elf_rawdata(opdsec, NULL);
11915