Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.26-r1/0102-2.6.26.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 681 - (show annotations) (download)
Wed Sep 17 19:42:13 2008 UTC (15 years, 7 months ago) by niro
File size: 105676 byte(s)
-2.6.26-alx-r1

1 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
2 index 5152ba0..778de8d 100644
3 --- a/arch/ia64/kvm/kvm-ia64.c
4 +++ b/arch/ia64/kvm/kvm-ia64.c
5 @@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
6 PAGE_KERNEL));
7 local_irq_save(saved_psr);
8 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
9 + local_irq_restore(saved_psr);
10 if (slot < 0)
11 return;
12 - local_irq_restore(saved_psr);
13
14 spin_lock(&vp_lock);
15 status = ia64_pal_vp_init_env(kvm_vsa_base ?
16 @@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)
17
18 local_irq_save(saved_psr);
19 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
20 + local_irq_restore(saved_psr);
21 if (slot < 0)
22 return;
23 - local_irq_restore(saved_psr);
24
25 status = ia64_pal_vp_exit_env(host_iva);
26 if (status)
27 @@ -1258,6 +1258,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
28 uninit:
29 kvm_vcpu_uninit(vcpu);
30 fail:
31 + local_irq_restore(psr);
32 return r;
33 }
34
35 diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
36 index c481673..acb0b97 100644
37 --- a/arch/sparc64/kernel/irq.c
38 +++ b/arch/sparc64/kernel/irq.c
39 @@ -682,10 +682,32 @@ void ack_bad_irq(unsigned int virt_irq)
40 ino, virt_irq);
41 }
42
43 +void *hardirq_stack[NR_CPUS];
44 +void *softirq_stack[NR_CPUS];
45 +
46 +static __attribute__((always_inline)) void *set_hardirq_stack(void)
47 +{
48 + void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
49 +
50 + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
51 + if (orig_sp < sp ||
52 + orig_sp > (sp + THREAD_SIZE)) {
53 + sp += THREAD_SIZE - 192 - STACK_BIAS;
54 + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
55 + }
56 +
57 + return orig_sp;
58 +}
59 +static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
60 +{
61 + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
62 +}
63 +
64 void handler_irq(int irq, struct pt_regs *regs)
65 {
66 unsigned long pstate, bucket_pa;
67 struct pt_regs *old_regs;
68 + void *orig_sp;
69
70 clear_softint(1 << irq);
71
72 @@ -703,6 +725,8 @@ void handler_irq(int irq, struct pt_regs *regs)
73 "i" (PSTATE_IE)
74 : "memory");
75
76 + orig_sp = set_hardirq_stack();
77 +
78 while (bucket_pa) {
79 struct irq_desc *desc;
80 unsigned long next_pa;
81 @@ -719,10 +743,38 @@ void handler_irq(int irq, struct pt_regs *regs)
82 bucket_pa = next_pa;
83 }
84
85 + restore_hardirq_stack(orig_sp);
86 +
87 irq_exit();
88 set_irq_regs(old_regs);
89 }
90
91 +void do_softirq(void)
92 +{
93 + unsigned long flags;
94 +
95 + if (in_interrupt())
96 + return;
97 +
98 + local_irq_save(flags);
99 +
100 + if (local_softirq_pending()) {
101 + void *orig_sp, *sp = softirq_stack[smp_processor_id()];
102 +
103 + sp += THREAD_SIZE - 192 - STACK_BIAS;
104 +
105 + __asm__ __volatile__("mov %%sp, %0\n\t"
106 + "mov %1, %%sp"
107 + : "=&r" (orig_sp)
108 + : "r" (sp));
109 + __do_softirq();
110 + __asm__ __volatile__("mov %0, %%sp"
111 + : : "r" (orig_sp));
112 + }
113 +
114 + local_irq_restore(flags);
115 +}
116 +
117 #ifdef CONFIG_HOTPLUG_CPU
118 void fixup_irqs(void)
119 {
120 diff --git a/arch/sparc64/kernel/kstack.h b/arch/sparc64/kernel/kstack.h
121 new file mode 100644
122 index 0000000..4248d96
123 --- /dev/null
124 +++ b/arch/sparc64/kernel/kstack.h
125 @@ -0,0 +1,60 @@
126 +#ifndef _KSTACK_H
127 +#define _KSTACK_H
128 +
129 +#include <linux/thread_info.h>
130 +#include <linux/sched.h>
131 +#include <asm/ptrace.h>
132 +#include <asm/irq.h>
133 +
134 +/* SP must be STACK_BIAS adjusted already. */
135 +static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
136 +{
137 + unsigned long base = (unsigned long) tp;
138 +
139 + if (sp >= (base + sizeof(struct thread_info)) &&
140 + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
141 + return true;
142 +
143 + if (hardirq_stack[tp->cpu]) {
144 + base = (unsigned long) hardirq_stack[tp->cpu];
145 + if (sp >= base &&
146 + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
147 + return true;
148 + base = (unsigned long) softirq_stack[tp->cpu];
149 + if (sp >= base &&
150 + sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
151 + return true;
152 + }
153 + return false;
154 +}
155 +
156 +/* Does "regs" point to a valid pt_regs trap frame? */
157 +static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
158 +{
159 + unsigned long base = (unsigned long) tp;
160 + unsigned long addr = (unsigned long) regs;
161 +
162 + if (addr >= base &&
163 + addr <= (base + THREAD_SIZE - sizeof(*regs)))
164 + goto check_magic;
165 +
166 + if (hardirq_stack[tp->cpu]) {
167 + base = (unsigned long) hardirq_stack[tp->cpu];
168 + if (addr >= base &&
169 + addr <= (base + THREAD_SIZE - sizeof(*regs)))
170 + goto check_magic;
171 + base = (unsigned long) softirq_stack[tp->cpu];
172 + if (addr >= base &&
173 + addr <= (base + THREAD_SIZE - sizeof(*regs)))
174 + goto check_magic;
175 + }
176 + return false;
177 +
178 +check_magic:
179 + if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
180 + return true;
181 + return false;
182 +
183 +}
184 +
185 +#endif /* _KSTACK_H */
186 diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
187 index 2084f81..d9f4cd0 100644
188 --- a/arch/sparc64/kernel/process.c
189 +++ b/arch/sparc64/kernel/process.c
190 @@ -55,6 +55,8 @@
191
192 /* #define VERBOSE_SHOWREGS */
193
194 +#include "kstack.h"
195 +
196 static void sparc64_yield(int cpu)
197 {
198 if (tlb_type != hypervisor)
199 @@ -316,14 +318,22 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
200 global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7];
201
202 if (regs->tstate & TSTATE_PRIV) {
203 + struct thread_info *tp = current_thread_info();
204 struct reg_window *rw;
205
206 rw = (struct reg_window *)
207 (regs->u_regs[UREG_FP] + STACK_BIAS);
208 - global_reg_snapshot[this_cpu].i7 = rw->ins[6];
209 - } else
210 + if (kstack_valid(tp, (unsigned long) rw)) {
211 + global_reg_snapshot[this_cpu].i7 = rw->ins[7];
212 + rw = (struct reg_window *)
213 + (rw->ins[6] + STACK_BIAS);
214 + if (kstack_valid(tp, (unsigned long) rw))
215 + global_reg_snapshot[this_cpu].rpc = rw->ins[7];
216 + }
217 + } else {
218 global_reg_snapshot[this_cpu].i7 = 0;
219 -
220 + global_reg_snapshot[this_cpu].rpc = 0;
221 + }
222 global_reg_snapshot[this_cpu].thread = tp;
223 }
224
225 @@ -384,12 +394,14 @@ static void sysrq_handle_globreg(int key, struct tty_struct *tty)
226 sprint_symbol(buffer, gp->o7);
227 printk("O7[%s] ", buffer);
228 sprint_symbol(buffer, gp->i7);
229 - printk("I7[%s]\n", buffer);
230 + printk("I7[%s] ", buffer);
231 + sprint_symbol(buffer, gp->rpc);
232 + printk("RPC[%s]\n", buffer);
233 } else
234 #endif
235 {
236 - printk(" TPC[%lx] O7[%lx] I7[%lx]\n",
237 - gp->tpc, gp->o7, gp->i7);
238 + printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
239 + gp->tpc, gp->o7, gp->i7, gp->rpc);
240 }
241 }
242
243 @@ -876,7 +888,7 @@ out:
244 unsigned long get_wchan(struct task_struct *task)
245 {
246 unsigned long pc, fp, bias = 0;
247 - unsigned long thread_info_base;
248 + struct thread_info *tp;
249 struct reg_window *rw;
250 unsigned long ret = 0;
251 int count = 0;
252 @@ -885,14 +897,12 @@ unsigned long get_wchan(struct task_struct *task)
253 task->state == TASK_RUNNING)
254 goto out;
255
256 - thread_info_base = (unsigned long) task_stack_page(task);
257 + tp = task_thread_info(task);
258 bias = STACK_BIAS;
259 fp = task_thread_info(task)->ksp + bias;
260
261 do {
262 - /* Bogus frame pointer? */
263 - if (fp < (thread_info_base + sizeof(struct thread_info)) ||
264 - fp >= (thread_info_base + THREAD_SIZE))
265 + if (!kstack_valid(tp, fp))
266 break;
267 rw = (struct reg_window *) fp;
268 pc = rw->ins[7];
269 diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
270 index 9667e96..10a12cb 100644
271 --- a/arch/sparc64/kernel/signal.c
272 +++ b/arch/sparc64/kernel/signal.c
273 @@ -2,7 +2,7 @@
274 * arch/sparc64/kernel/signal.c
275 *
276 * Copyright (C) 1991, 1992 Linus Torvalds
277 - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
278 + * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
279 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
280 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
281 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
282 @@ -89,7 +89,9 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
283 err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
284 err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
285 err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
286 - err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
287 +
288 + /* Skip %g7 as that's the thread register in userspace. */
289 +
290 err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
291 err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
292 err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
293 diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
294 index c73ce3f..8d749ef 100644
295 --- a/arch/sparc64/kernel/stacktrace.c
296 +++ b/arch/sparc64/kernel/stacktrace.c
297 @@ -4,10 +4,12 @@
298 #include <asm/ptrace.h>
299 #include <asm/stacktrace.h>
300
301 +#include "kstack.h"
302 +
303 void save_stack_trace(struct stack_trace *trace)
304 {
305 - unsigned long ksp, fp, thread_base;
306 struct thread_info *tp = task_thread_info(current);
307 + unsigned long ksp, fp;
308
309 stack_trace_flush();
310
311 @@ -17,21 +19,18 @@ void save_stack_trace(struct stack_trace *trace)
312 );
313
314 fp = ksp + STACK_BIAS;
315 - thread_base = (unsigned long) tp;
316 do {
317 struct sparc_stackf *sf;
318 struct pt_regs *regs;
319 unsigned long pc;
320
321 - /* Bogus frame pointer? */
322 - if (fp < (thread_base + sizeof(struct thread_info)) ||
323 - fp >= (thread_base + THREAD_SIZE))
324 + if (!kstack_valid(tp, fp))
325 break;
326
327 sf = (struct sparc_stackf *) fp;
328 regs = (struct pt_regs *) (sf + 1);
329
330 - if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
331 + if (kstack_is_trap_frame(tp, regs)) {
332 if (!(regs->tstate & TSTATE_PRIV))
333 break;
334 pc = regs->tpc;
335 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
336 index 3697492..1389e38 100644
337 --- a/arch/sparc64/kernel/traps.c
338 +++ b/arch/sparc64/kernel/traps.c
339 @@ -43,6 +43,7 @@
340 #include <asm/prom.h>
341
342 #include "entry.h"
343 +#include "kstack.h"
344
345 /* When an irrecoverable trap occurs at tl > 0, the trap entry
346 * code logs the trap state registers at every level in the trap
347 @@ -2120,14 +2121,12 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
348 struct pt_regs *regs;
349 unsigned long pc;
350
351 - /* Bogus frame pointer? */
352 - if (fp < (thread_base + sizeof(struct thread_info)) ||
353 - fp >= (thread_base + THREAD_SIZE))
354 + if (!kstack_valid(tp, fp))
355 break;
356 sf = (struct sparc_stackf *) fp;
357 regs = (struct pt_regs *) (sf + 1);
358
359 - if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
360 + if (kstack_is_trap_frame(tp, regs)) {
361 if (!(regs->tstate & TSTATE_PRIV))
362 break;
363 pc = regs->tpc;
364 diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
365 index 9e4534b..0935f84 100644
366 --- a/arch/sparc64/lib/mcount.S
367 +++ b/arch/sparc64/lib/mcount.S
368 @@ -45,12 +45,45 @@ _mcount:
369 sub %g3, STACK_BIAS, %g3
370 cmp %sp, %g3
371 bg,pt %xcc, 1f
372 - sethi %hi(panicstring), %g3
373 + nop
374 + lduh [%g6 + TI_CPU], %g1
375 + sethi %hi(hardirq_stack), %g3
376 + or %g3, %lo(hardirq_stack), %g3
377 + sllx %g1, 3, %g1
378 + ldx [%g3 + %g1], %g7
379 + sub %g7, STACK_BIAS, %g7
380 + cmp %sp, %g7
381 + bleu,pt %xcc, 2f
382 + sethi %hi(THREAD_SIZE), %g3
383 + add %g7, %g3, %g7
384 + cmp %sp, %g7
385 + blu,pn %xcc, 1f
386 +2: sethi %hi(softirq_stack), %g3
387 + or %g3, %lo(softirq_stack), %g3
388 + ldx [%g3 + %g1], %g7
389 + cmp %sp, %g7
390 + bleu,pt %xcc, 2f
391 + sethi %hi(THREAD_SIZE), %g3
392 + add %g7, %g3, %g7
393 + cmp %sp, %g7
394 + blu,pn %xcc, 1f
395 + nop
396 + /* If we are already on ovstack, don't hop onto it
397 + * again, we are already trying to output the stack overflow
398 + * message.
399 + */
400 sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
401 or %g7, %lo(ovstack), %g7
402 - add %g7, OVSTACKSIZE, %g7
403 + add %g7, OVSTACKSIZE, %g3
404 + sub %g3, STACK_BIAS + 192, %g3
405 sub %g7, STACK_BIAS, %g7
406 - mov %g7, %sp
407 + cmp %sp, %g7
408 + blu,pn %xcc, 2f
409 + cmp %sp, %g3
410 + bleu,pn %xcc, 1f
411 + nop
412 +2: mov %g3, %sp
413 + sethi %hi(panicstring), %g3
414 call prom_printf
415 or %g3, %lo(panicstring), %o0
416 call prom_halt
417 diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
418 index 84898c4..e289a98 100644
419 --- a/arch/sparc64/mm/init.c
420 +++ b/arch/sparc64/mm/init.c
421 @@ -49,6 +49,7 @@
422 #include <asm/sstate.h>
423 #include <asm/mdesc.h>
424 #include <asm/cpudata.h>
425 +#include <asm/irq.h>
426
427 #define MAX_PHYS_ADDRESS (1UL << 42UL)
428 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
429 @@ -1817,6 +1818,16 @@ void __init paging_init(void)
430 if (tlb_type == hypervisor)
431 sun4v_mdesc_init();
432
433 + /* Once the OF device tree and MDESC have been setup, we know
434 + * the list of possible cpus. Therefore we can allocate the
435 + * IRQ stacks.
436 + */
437 + for_each_possible_cpu(i) {
438 + /* XXX Use node local allocations... XXX */
439 + softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
440 + hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
441 + }
442 +
443 /* Setup bootmem... */
444 last_valid_pfn = end_pfn = bootmem_init(phys_base);
445
446 diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
447 index 9bb2d90..db57686 100644
448 --- a/arch/sparc64/mm/ultra.S
449 +++ b/arch/sparc64/mm/ultra.S
450 @@ -531,6 +531,13 @@ xcall_fetch_glob_regs:
451 stx %g7, [%g1 + GR_SNAP_TNPC]
452 stx %o7, [%g1 + GR_SNAP_O7]
453 stx %i7, [%g1 + GR_SNAP_I7]
454 + /* Don't try this at home kids... */
455 + rdpr %cwp, %g2
456 + sub %g2, 1, %g7
457 + wrpr %g7, %cwp
458 + mov %i7, %g7
459 + wrpr %g2, %cwp
460 + stx %g7, [%g1 + GR_SNAP_RPC]
461 sethi %hi(trap_block), %g7
462 or %g7, %lo(trap_block), %g7
463 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
464 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
465 index a34b998..9d4b4b4 100644
466 --- a/arch/x86/boot/boot.h
467 +++ b/arch/x86/boot/boot.h
468 @@ -25,6 +25,8 @@
469 #include <asm/boot.h>
470 #include <asm/setup.h>
471
472 +#define NCAPINTS 8
473 +
474 /* Useful macros */
475 #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
476
477 @@ -242,6 +244,12 @@ int cmdline_find_option(const char *option, char *buffer, int bufsize);
478 int cmdline_find_option_bool(const char *option);
479
480 /* cpu.c, cpucheck.c */
481 +struct cpu_features {
482 + int level; /* Family, or 64 for x86-64 */
483 + int model;
484 + u32 flags[NCAPINTS];
485 +};
486 +extern struct cpu_features cpu;
487 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
488 int validate_cpu(void);
489
490 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
491 index 7804389..c1ce030 100644
492 --- a/arch/x86/boot/cpucheck.c
493 +++ b/arch/x86/boot/cpucheck.c
494 @@ -30,13 +30,7 @@
495 #include <asm/required-features.h>
496 #include <asm/msr-index.h>
497
498 -struct cpu_features {
499 - int level; /* Family, or 64 for x86-64 */
500 - int model;
501 - u32 flags[NCAPINTS];
502 -};
503 -
504 -static struct cpu_features cpu;
505 +struct cpu_features cpu;
506 static u32 cpu_vendor[3];
507 static u32 err_flags[NCAPINTS];
508
509 diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
510 index 77569a4..1b92cb6 100644
511 --- a/arch/x86/boot/main.c
512 +++ b/arch/x86/boot/main.c
513 @@ -73,6 +73,10 @@ static void keyboard_set_repeat(void)
514 */
515 static void query_ist(void)
516 {
517 + /* Some 486 BIOSes apparently crash on this call */
518 + if (cpu.level < 6)
519 + return;
520 +
521 asm("int $0x15"
522 : "=a" (boot_params.ist_info.signature),
523 "=b" (boot_params.ist_info.command),
524 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
525 index 5d241ce..75b14b1 100644
526 --- a/arch/x86/kernel/cpu/mtrr/generic.c
527 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
528 @@ -219,7 +219,7 @@ void __init get_mtrr_state(void)
529 tom2 = hi;
530 tom2 <<= 32;
531 tom2 |= lo;
532 - tom2 &= 0xffffff8000000ULL;
533 + tom2 &= 0xffffff800000ULL;
534 }
535 if (mtrr_show) {
536 int high_width;
537 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
538 index c26d811..67d00bc 100644
539 --- a/arch/x86/kvm/mmu.c
540 +++ b/arch/x86/kvm/mmu.c
541 @@ -1792,6 +1792,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
542 spin_unlock(&vcpu->kvm->mmu_lock);
543 return r;
544 }
545 +EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
546
547 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
548 {
549 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
550 index 06992d6..7d6071d 100644
551 --- a/arch/x86/kvm/svm.c
552 +++ b/arch/x86/kvm/svm.c
553 @@ -1007,13 +1007,18 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
554 struct kvm *kvm = svm->vcpu.kvm;
555 u64 fault_address;
556 u32 error_code;
557 + bool event_injection = false;
558
559 if (!irqchip_in_kernel(kvm) &&
560 - is_external_interrupt(exit_int_info))
561 + is_external_interrupt(exit_int_info)) {
562 + event_injection = true;
563 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
564 + }
565
566 fault_address = svm->vmcb->control.exit_info_2;
567 error_code = svm->vmcb->control.exit_info_1;
568 + if (event_injection)
569 + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
570 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
571 }
572
573 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
574 index 2ce9063..3ff39c1 100644
575 --- a/arch/x86/kvm/vmx.c
576 +++ b/arch/x86/kvm/vmx.c
577 @@ -2258,6 +2258,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
578 cr2 = vmcs_readl(EXIT_QUALIFICATION);
579 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
580 (u32)((u64)cr2 >> 32), handler);
581 + if (vect_info & VECTORING_INFO_VALID_MASK)
582 + kvm_mmu_unprotect_page_virt(vcpu, cr2);
583 return kvm_mmu_page_fault(vcpu, cr2, error_code);
584 }
585
586 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
587 index 5a7406e..8ab14ab 100644
588 --- a/arch/x86/kvm/x86.c
589 +++ b/arch/x86/kvm/x86.c
590 @@ -3168,6 +3168,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
591 kvm_desct->base |= seg_desc->base2 << 24;
592 kvm_desct->limit = seg_desc->limit0;
593 kvm_desct->limit |= seg_desc->limit << 16;
594 + if (seg_desc->g) {
595 + kvm_desct->limit <<= 12;
596 + kvm_desct->limit |= 0xfff;
597 + }
598 kvm_desct->selector = selector;
599 kvm_desct->type = seg_desc->type;
600 kvm_desct->present = seg_desc->p;
601 @@ -3207,6 +3211,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
602 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
603 struct desc_struct *seg_desc)
604 {
605 + gpa_t gpa;
606 struct descriptor_table dtable;
607 u16 index = selector >> 3;
608
609 @@ -3216,13 +3221,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
610 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
611 return 1;
612 }
613 - return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
614 + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
615 + gpa += index * 8;
616 + return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
617 }
618
619 /* allowed just for 8 bytes segments */
620 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
621 struct desc_struct *seg_desc)
622 {
623 + gpa_t gpa;
624 struct descriptor_table dtable;
625 u16 index = selector >> 3;
626
627 @@ -3230,7 +3238,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
628
629 if (dtable.limit < index * 8 + 7)
630 return 1;
631 - return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
632 + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
633 + gpa += index * 8;
634 + return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
635 }
636
637 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
638 @@ -3242,55 +3252,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
639 base_addr |= (seg_desc->base1 << 16);
640 base_addr |= (seg_desc->base2 << 24);
641
642 - return base_addr;
643 -}
644 -
645 -static int load_tss_segment32(struct kvm_vcpu *vcpu,
646 - struct desc_struct *seg_desc,
647 - struct tss_segment_32 *tss)
648 -{
649 - u32 base_addr;
650 -
651 - base_addr = get_tss_base_addr(vcpu, seg_desc);
652 -
653 - return kvm_read_guest(vcpu->kvm, base_addr, tss,
654 - sizeof(struct tss_segment_32));
655 -}
656 -
657 -static int save_tss_segment32(struct kvm_vcpu *vcpu,
658 - struct desc_struct *seg_desc,
659 - struct tss_segment_32 *tss)
660 -{
661 - u32 base_addr;
662 -
663 - base_addr = get_tss_base_addr(vcpu, seg_desc);
664 -
665 - return kvm_write_guest(vcpu->kvm, base_addr, tss,
666 - sizeof(struct tss_segment_32));
667 -}
668 -
669 -static int load_tss_segment16(struct kvm_vcpu *vcpu,
670 - struct desc_struct *seg_desc,
671 - struct tss_segment_16 *tss)
672 -{
673 - u32 base_addr;
674 -
675 - base_addr = get_tss_base_addr(vcpu, seg_desc);
676 -
677 - return kvm_read_guest(vcpu->kvm, base_addr, tss,
678 - sizeof(struct tss_segment_16));
679 -}
680 -
681 -static int save_tss_segment16(struct kvm_vcpu *vcpu,
682 - struct desc_struct *seg_desc,
683 - struct tss_segment_16 *tss)
684 -{
685 - u32 base_addr;
686 -
687 - base_addr = get_tss_base_addr(vcpu, seg_desc);
688 -
689 - return kvm_write_guest(vcpu->kvm, base_addr, tss,
690 - sizeof(struct tss_segment_16));
691 + return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
692 }
693
694 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
695 @@ -3450,20 +3412,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
696 }
697
698 int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
699 - struct desc_struct *cseg_desc,
700 + u32 old_tss_base,
701 struct desc_struct *nseg_desc)
702 {
703 struct tss_segment_16 tss_segment_16;
704 int ret = 0;
705
706 - if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
707 + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
708 + sizeof tss_segment_16))
709 goto out;
710
711 save_state_to_tss16(vcpu, &tss_segment_16);
712 - save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
713
714 - if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
715 + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
716 + sizeof tss_segment_16))
717 goto out;
718 +
719 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
720 + &tss_segment_16, sizeof tss_segment_16))
721 + goto out;
722 +
723 if (load_state_from_tss16(vcpu, &tss_segment_16))
724 goto out;
725
726 @@ -3473,20 +3441,26 @@ out:
727 }
728
729 int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
730 - struct desc_struct *cseg_desc,
731 + u32 old_tss_base,
732 struct desc_struct *nseg_desc)
733 {
734 struct tss_segment_32 tss_segment_32;
735 int ret = 0;
736
737 - if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
738 + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
739 + sizeof tss_segment_32))
740 goto out;
741
742 save_state_to_tss32(vcpu, &tss_segment_32);
743 - save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
744
745 - if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
746 + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
747 + sizeof tss_segment_32))
748 + goto out;
749 +
750 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
751 + &tss_segment_32, sizeof tss_segment_32))
752 goto out;
753 +
754 if (load_state_from_tss32(vcpu, &tss_segment_32))
755 goto out;
756
757 @@ -3501,16 +3475,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
758 struct desc_struct cseg_desc;
759 struct desc_struct nseg_desc;
760 int ret = 0;
761 + u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
762 + u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
763
764 - get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
765 + old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
766
767 + /* FIXME: Handle errors. Failure to read either TSS or their
768 + * descriptors should generate a pagefault.
769 + */
770 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
771 goto out;
772
773 - if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
774 + if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
775 goto out;
776
777 -
778 if (reason != TASK_SWITCH_IRET) {
779 int cpl;
780
781 @@ -3528,8 +3506,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
782
783 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
784 cseg_desc.type &= ~(1 << 1); //clear the B flag
785 - save_guest_segment_descriptor(vcpu, tr_seg.selector,
786 - &cseg_desc);
787 + save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
788 }
789
790 if (reason == TASK_SWITCH_IRET) {
791 @@ -3541,10 +3518,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
792 kvm_x86_ops->cache_regs(vcpu);
793
794 if (nseg_desc.type & 8)
795 - ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
796 + ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
797 &nseg_desc);
798 else
799 - ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
800 + ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
801 &nseg_desc);
802
803 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
804 diff --git a/arch/x86/pci/k8-bus_64.c b/arch/x86/pci/k8-bus_64.c
805 index 5c2799c..bfefdf0 100644
806 --- a/arch/x86/pci/k8-bus_64.c
807 +++ b/arch/x86/pci/k8-bus_64.c
808 @@ -384,7 +384,7 @@ static int __init early_fill_mp_bus_info(void)
809 /* need to take out [0, TOM) for RAM*/
810 address = MSR_K8_TOP_MEM1;
811 rdmsrl(address, val);
812 - end = (val & 0xffffff8000000ULL);
813 + end = (val & 0xffffff800000ULL);
814 printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
815 if (end < (1ULL<<32))
816 update_range(range, 0, end - 1);
817 @@ -478,7 +478,7 @@ static int __init early_fill_mp_bus_info(void)
818 /* TOP_MEM2 */
819 address = MSR_K8_TOP_MEM2;
820 rdmsrl(address, val);
821 - end = (val & 0xffffff8000000ULL);
822 + end = (val & 0xffffff800000ULL);
823 printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
824 update_range(range, 1ULL<<32, end - 1);
825 }
826 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
827 index 78199c0..f1d2e8a 100644
828 --- a/block/scsi_ioctl.c
829 +++ b/block/scsi_ioctl.c
830 @@ -629,7 +629,7 @@ int scsi_cmd_ioctl(struct file *file, struct request_queue *q,
831 hdr.sbp = cgc.sense;
832 if (hdr.sbp)
833 hdr.mx_sb_len = sizeof(struct request_sense);
834 - hdr.timeout = cgc.timeout;
835 + hdr.timeout = jiffies_to_msecs(cgc.timeout);
836 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
837 hdr.cmd_len = sizeof(cgc.cmd);
838
839 diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
840 index f7feae4..128202e 100644
841 --- a/drivers/char/hw_random/via-rng.c
842 +++ b/drivers/char/hw_random/via-rng.c
843 @@ -31,6 +31,7 @@
844 #include <asm/io.h>
845 #include <asm/msr.h>
846 #include <asm/cpufeature.h>
847 +#include <asm/i387.h>
848
849
850 #define PFX KBUILD_MODNAME ": "
851 @@ -67,16 +68,23 @@ enum {
852 * Another possible performance boost may come from simply buffering
853 * until we have 4 bytes, thus returning a u32 at a time,
854 * instead of the current u8-at-a-time.
855 + *
856 + * Padlock instructions can generate a spurious DNA fault, so
857 + * we have to call them in the context of irq_ts_save/restore()
858 */
859
860 static inline u32 xstore(u32 *addr, u32 edx_in)
861 {
862 u32 eax_out;
863 + int ts_state;
864 +
865 + ts_state = irq_ts_save();
866
867 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
868 :"=m"(*addr), "=a"(eax_out)
869 :"D"(addr), "d"(edx_in));
870
871 + irq_ts_restore(ts_state);
872 return eax_out;
873 }
874
875 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
876 index bb30eb9..2a5c2db 100644
877 --- a/drivers/crypto/padlock-aes.c
878 +++ b/drivers/crypto/padlock-aes.c
879 @@ -16,6 +16,7 @@
880 #include <linux/interrupt.h>
881 #include <linux/kernel.h>
882 #include <asm/byteorder.h>
883 +#include <asm/i387.h>
884 #include "padlock.h"
885
886 /* Control word. */
887 @@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
888 asm volatile ("pushfl; popfl");
889 }
890
891 +/*
892 + * While the padlock instructions don't use FP/SSE registers, they
893 + * generate a spurious DNA fault when cr0.ts is '1'. These instructions
894 + * should be used only inside the irq_ts_save/restore() context
895 + */
896 +
897 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
898 void *control_word)
899 {
900 @@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
901 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
902 {
903 struct aes_ctx *ctx = aes_ctx(tfm);
904 + int ts_state;
905 padlock_reset_key();
906 +
907 + ts_state = irq_ts_save();
908 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
909 + irq_ts_restore(ts_state);
910 }
911
912 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
913 {
914 struct aes_ctx *ctx = aes_ctx(tfm);
915 + int ts_state;
916 padlock_reset_key();
917 +
918 + ts_state = irq_ts_save();
919 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
920 + irq_ts_restore(ts_state);
921 }
922
923 static struct crypto_alg aes_alg = {
924 @@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
925 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
926 struct blkcipher_walk walk;
927 int err;
928 + int ts_state;
929
930 padlock_reset_key();
931
932 blkcipher_walk_init(&walk, dst, src, nbytes);
933 err = blkcipher_walk_virt(desc, &walk);
934
935 + ts_state = irq_ts_save();
936 while ((nbytes = walk.nbytes)) {
937 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
938 ctx->E, &ctx->cword.encrypt,
939 @@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
940 nbytes &= AES_BLOCK_SIZE - 1;
941 err = blkcipher_walk_done(desc, &walk, nbytes);
942 }
943 + irq_ts_restore(ts_state);
944
945 return err;
946 }
947 @@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
948 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
949 struct blkcipher_walk walk;
950 int err;
951 + int ts_state;
952
953 padlock_reset_key();
954
955 blkcipher_walk_init(&walk, dst, src, nbytes);
956 err = blkcipher_walk_virt(desc, &walk);
957
958 + ts_state = irq_ts_save();
959 while ((nbytes = walk.nbytes)) {
960 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
961 ctx->D, &ctx->cword.decrypt,
962 @@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
963 nbytes &= AES_BLOCK_SIZE - 1;
964 err = blkcipher_walk_done(desc, &walk, nbytes);
965 }
966 -
967 + irq_ts_restore(ts_state);
968 return err;
969 }
970
971 @@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
972 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
973 struct blkcipher_walk walk;
974 int err;
975 + int ts_state;
976
977 padlock_reset_key();
978
979 blkcipher_walk_init(&walk, dst, src, nbytes);
980 err = blkcipher_walk_virt(desc, &walk);
981
982 + ts_state = irq_ts_save();
983 while ((nbytes = walk.nbytes)) {
984 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
985 walk.dst.virt.addr, ctx->E,
986 @@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
987 nbytes &= AES_BLOCK_SIZE - 1;
988 err = blkcipher_walk_done(desc, &walk, nbytes);
989 }
990 + irq_ts_restore(ts_state);
991
992 return err;
993 }
994 @@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
995 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
996 struct blkcipher_walk walk;
997 int err;
998 + int ts_state;
999
1000 padlock_reset_key();
1001
1002 blkcipher_walk_init(&walk, dst, src, nbytes);
1003 err = blkcipher_walk_virt(desc, &walk);
1004
1005 + ts_state = irq_ts_save();
1006 while ((nbytes = walk.nbytes)) {
1007 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
1008 ctx->D, walk.iv, &ctx->cword.decrypt,
1009 @@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
1010 err = blkcipher_walk_done(desc, &walk, nbytes);
1011 }
1012
1013 + irq_ts_restore(ts_state);
1014 return err;
1015 }
1016
1017 diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
1018 index c666b4e..355f8c6 100644
1019 --- a/drivers/crypto/padlock-sha.c
1020 +++ b/drivers/crypto/padlock-sha.c
1021 @@ -22,6 +22,7 @@
1022 #include <linux/interrupt.h>
1023 #include <linux/kernel.h>
1024 #include <linux/scatterlist.h>
1025 +#include <asm/i387.h>
1026 #include "padlock.h"
1027
1028 #define SHA1_DEFAULT_FALLBACK "sha1-generic"
1029 @@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
1030 * PadLock microcode needs it that big. */
1031 char buf[128+16];
1032 char *result = NEAREST_ALIGNED(buf);
1033 + int ts_state;
1034
1035 ((uint32_t *)result)[0] = SHA1_H0;
1036 ((uint32_t *)result)[1] = SHA1_H1;
1037 @@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
1038 ((uint32_t *)result)[3] = SHA1_H3;
1039 ((uint32_t *)result)[4] = SHA1_H4;
1040
1041 + /* prevent taking the spurious DNA fault with padlock. */
1042 + ts_state = irq_ts_save();
1043 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
1044 : "+S"(in), "+D"(result)
1045 : "c"(count), "a"(0));
1046 + irq_ts_restore(ts_state);
1047
1048 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
1049 }
1050 @@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
1051 * PadLock microcode needs it that big. */
1052 char buf[128+16];
1053 char *result = NEAREST_ALIGNED(buf);
1054 + int ts_state;
1055
1056 ((uint32_t *)result)[0] = SHA256_H0;
1057 ((uint32_t *)result)[1] = SHA256_H1;
1058 @@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
1059 ((uint32_t *)result)[6] = SHA256_H6;
1060 ((uint32_t *)result)[7] = SHA256_H7;
1061
1062 + /* prevent taking the spurious DNA fault with padlock. */
1063 + ts_state = irq_ts_save();
1064 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
1065 : "+S"(in), "+D"(result)
1066 : "c"(count), "a"(0));
1067 + irq_ts_restore(ts_state);
1068
1069 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
1070 }
1071 diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
1072 index 9686734..711ca08 100644
1073 --- a/drivers/i2c/Kconfig
1074 +++ b/drivers/i2c/Kconfig
1075 @@ -38,6 +38,20 @@ config I2C_CHARDEV
1076 This support is also available as a module. If so, the module
1077 will be called i2c-dev.
1078
1079 +config I2C_HELPER_AUTO
1080 + bool "Autoselect pertinent helper modules"
1081 + default y
1082 + help
1083 + Some I2C bus drivers require so-called "I2C algorithm" modules
1084 + to work. These are basically software-only abstractions of generic
1085 + I2C interfaces. This option will autoselect them so that you don't
1086 + have to care.
1087 +
1088 + Unselect this only if you need to enable additional helper
1089 + modules, for example for use with external I2C bus drivers.
1090 +
1091 + In doubt, say Y.
1092 +
1093 source drivers/i2c/algos/Kconfig
1094 source drivers/i2c/busses/Kconfig
1095 source drivers/i2c/chips/Kconfig
1096 diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
1097 index 7137a17..b788579 100644
1098 --- a/drivers/i2c/algos/Kconfig
1099 +++ b/drivers/i2c/algos/Kconfig
1100 @@ -2,15 +2,20 @@
1101 # I2C algorithm drivers configuration
1102 #
1103
1104 +menu "I2C Algorithms"
1105 + depends on !I2C_HELPER_AUTO
1106 +
1107 config I2C_ALGOBIT
1108 - tristate
1109 + tristate "I2C bit-banging interfaces"
1110
1111 config I2C_ALGOPCF
1112 - tristate
1113 + tristate "I2C PCF 8584 interfaces"
1114
1115 config I2C_ALGOPCA
1116 - tristate
1117 + tristate "I2C PCA 9564 interfaces"
1118
1119 config I2C_ALGO_SGI
1120 tristate
1121 depends on SGI_IP22 || SGI_IP32 || X86_VISWS
1122 +
1123 +endmenu
1124 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
1125 index d0175f4..08a7384 100644
1126 --- a/drivers/i2c/i2c-core.c
1127 +++ b/drivers/i2c/i2c-core.c
1128 @@ -1196,9 +1196,11 @@ i2c_new_probed_device(struct i2c_adapter *adap,
1129 if ((addr_list[i] & ~0x07) == 0x30
1130 || (addr_list[i] & ~0x0f) == 0x50
1131 || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) {
1132 + union i2c_smbus_data data;
1133 +
1134 if (i2c_smbus_xfer(adap, addr_list[i], 0,
1135 I2C_SMBUS_READ, 0,
1136 - I2C_SMBUS_BYTE, NULL) >= 0)
1137 + I2C_SMBUS_BYTE, &data) >= 0)
1138 break;
1139 } else {
1140 if (i2c_smbus_xfer(adap, addr_list[i], 0,
1141 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
1142 index 0cc854e..614f9ce 100644
1143 --- a/drivers/ide/ide-cd.c
1144 +++ b/drivers/ide/ide-cd.c
1145 @@ -1298,6 +1298,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1146
1147 int stat;
1148 struct request req;
1149 + u32 blocklen;
1150
1151 ide_cd_init_rq(drive, &req);
1152
1153 @@ -1314,23 +1315,24 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1154 /*
1155 * Sanity check the given block size
1156 */
1157 - switch (capbuf.blocklen) {
1158 - case __constant_cpu_to_be32(512):
1159 - case __constant_cpu_to_be32(1024):
1160 - case __constant_cpu_to_be32(2048):
1161 - case __constant_cpu_to_be32(4096):
1162 + blocklen = be32_to_cpu(capbuf.blocklen);
1163 + switch (blocklen) {
1164 + case 512:
1165 + case 1024:
1166 + case 2048:
1167 + case 4096:
1168 break;
1169 default:
1170 printk(KERN_ERR "%s: weird block size %u\n",
1171 - drive->name, capbuf.blocklen);
1172 + drive->name, blocklen);
1173 printk(KERN_ERR "%s: default to 2kb block size\n",
1174 drive->name);
1175 - capbuf.blocklen = __constant_cpu_to_be32(2048);
1176 + blocklen = 2048;
1177 break;
1178 }
1179
1180 *capacity = 1 + be32_to_cpu(capbuf.lba);
1181 - *sectors_per_frame = be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
1182 + *sectors_per_frame = blocklen >> SECTOR_BITS;
1183 return 0;
1184 }
1185
1186 diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
1187 index 992b1cf..0cfddf4 100644
1188 --- a/drivers/ide/pci/cs5520.c
1189 +++ b/drivers/ide/pci/cs5520.c
1190 @@ -123,6 +123,7 @@ static const struct ide_dma_ops cs5520_dma_ops = {
1191 #define DECLARE_CS_DEV(name_str) \
1192 { \
1193 .name = name_str, \
1194 + .enablebits = { {0x60, 0x01, 0x01}, {0x60, 0x02, 0x02} }, \
1195 .port_ops = &cs5520_port_ops, \
1196 .dma_ops = &cs5520_dma_ops, \
1197 .host_flags = IDE_HFLAG_ISA_PORTS | \
1198 diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
1199 index 6ab0411..cbf6472 100644
1200 --- a/drivers/ide/pci/it821x.c
1201 +++ b/drivers/ide/pci/it821x.c
1202 @@ -512,8 +512,14 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
1203 }
1204
1205 static struct ide_dma_ops it821x_pass_through_dma_ops = {
1206 + .dma_host_set = ide_dma_host_set,
1207 + .dma_setup = ide_dma_setup,
1208 + .dma_exec_cmd = ide_dma_exec_cmd,
1209 .dma_start = it821x_dma_start,
1210 .dma_end = it821x_dma_end,
1211 + .dma_test_irq = ide_dma_test_irq,
1212 + .dma_timeout = ide_dma_timeout,
1213 + .dma_lost_irq = ide_dma_lost_irq,
1214 };
1215
1216 /**
1217 diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
1218 index dd13a37..3a3e4c1 100644
1219 --- a/drivers/misc/acer-wmi.c
1220 +++ b/drivers/misc/acer-wmi.c
1221 @@ -742,11 +742,30 @@ static acpi_status get_u32(u32 *value, u32 cap)
1222
1223 static acpi_status set_u32(u32 value, u32 cap)
1224 {
1225 + acpi_status status;
1226 +
1227 if (interface->capability & cap) {
1228 switch (interface->type) {
1229 case ACER_AMW0:
1230 return AMW0_set_u32(value, cap, interface);
1231 case ACER_AMW0_V2:
1232 + if (cap == ACER_CAP_MAILLED)
1233 + return AMW0_set_u32(value, cap, interface);
1234 +
1235 + /*
1236 + * On some models, some WMID methods don't toggle
1237 + * properly. For those cases, we want to run the AMW0
1238 + * method afterwards to be certain we've really toggled
1239 + * the device state.
1240 + */
1241 + if (cap == ACER_CAP_WIRELESS ||
1242 + cap == ACER_CAP_BLUETOOTH) {
1243 + status = WMID_set_u32(value, cap, interface);
1244 + if (ACPI_FAILURE(status))
1245 + return status;
1246 +
1247 + return AMW0_set_u32(value, cap, interface);
1248 + }
1249 case ACER_WMID:
1250 return WMID_set_u32(value, cap, interface);
1251 default:
1252 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1253 index 6572425..42d7c0a 100644
1254 --- a/drivers/net/r8169.c
1255 +++ b/drivers/net/r8169.c
1256 @@ -1438,8 +1438,10 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1257
1258 rtl_hw_phy_config(dev);
1259
1260 - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1261 - RTL_W8(0x82, 0x01);
1262 + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
1263 + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1264 + RTL_W8(0x82, 0x01);
1265 + }
1266
1267 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1268
1269 diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
1270 index 076d88b..aefd4f6 100644
1271 --- a/drivers/net/wireless/rtl8187.h
1272 +++ b/drivers/net/wireless/rtl8187.h
1273 @@ -67,6 +67,10 @@ struct rtl8187_priv {
1274 const struct rtl818x_rf_ops *rf;
1275 struct ieee80211_vif *vif;
1276 int mode;
1277 + /* The mutex protects the TX loopback state.
1278 + * Any attempt to set channels concurrently locks the device.
1279 + */
1280 + struct mutex conf_mutex;
1281
1282 /* rtl8187 specific */
1283 struct ieee80211_channel channels[14];
1284 diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
1285 index 9223ada..d49d1c6 100644
1286 --- a/drivers/net/wireless/rtl8187_dev.c
1287 +++ b/drivers/net/wireless/rtl8187_dev.c
1288 @@ -580,6 +580,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
1289 struct rtl8187_priv *priv = dev->priv;
1290 u32 reg;
1291
1292 + mutex_lock(&priv->conf_mutex);
1293 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
1294 /* Enable TX loopback on MAC level to avoid TX during channel
1295 * changes, as this has be seen to causes problems and the
1296 @@ -610,6 +611,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
1297 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
1298 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
1299 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
1300 + mutex_unlock(&priv->conf_mutex);
1301 return 0;
1302 }
1303
1304 @@ -814,6 +816,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1305 printk(KERN_ERR "rtl8187: Cannot register device\n");
1306 goto err_free_dev;
1307 }
1308 + mutex_init(&priv->conf_mutex);
1309
1310 printk(KERN_INFO "%s: hwaddr %s, rtl8187 V%d + %s\n",
1311 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
1312 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1313 index 338a3f9..c14de8e 100644
1314 --- a/drivers/pci/quirks.c
1315 +++ b/drivers/pci/quirks.c
1316 @@ -1683,9 +1683,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
1317 */
1318 static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
1319 {
1320 - /* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */
1321 + /*
1322 + * Only disable the VPD capability for 5706, 5706S, 5708,
1323 + * 5708S and 5709 rev. A
1324 + */
1325 if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
1326 + (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
1327 (dev->device == PCI_DEVICE_ID_NX2_5708) ||
1328 + (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
1329 ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
1330 (dev->revision & 0xf0) == 0x0)) {
1331 if (dev->vpd)
1332 diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
1333 index da876d3..74d12b5 100644
1334 --- a/drivers/scsi/hptiop.c
1335 +++ b/drivers/scsi/hptiop.c
1336 @@ -1249,6 +1249,13 @@ static struct pci_device_id hptiop_id_table[] = {
1337 { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
1338 { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
1339 { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
1340 + { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
1341 + { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
1342 + { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
1343 + { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
1344 + { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
1345 + { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
1346 + { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
1347 { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
1348 { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
1349 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1350 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
1351 index 8dd88fc..8728e87 100644
1352 --- a/drivers/scsi/qla2xxx/qla_attr.c
1353 +++ b/drivers/scsi/qla2xxx/qla_attr.c
1354 @@ -972,26 +972,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
1355 }
1356
1357 static void
1358 -qla2x00_get_rport_loss_tmo(struct fc_rport *rport)
1359 +qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1360 {
1361 - struct Scsi_Host *host = rport_to_shost(rport);
1362 - scsi_qla_host_t *ha = shost_priv(host);
1363 -
1364 - rport->dev_loss_tmo = ha->port_down_retry_count + 5;
1365 + if (timeout)
1366 + rport->dev_loss_tmo = timeout;
1367 + else
1368 + rport->dev_loss_tmo = 1;
1369 }
1370
1371 static void
1372 -qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1373 +qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1374 {
1375 struct Scsi_Host *host = rport_to_shost(rport);
1376 - scsi_qla_host_t *ha = shost_priv(host);
1377 + fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1378 +
1379 + qla2x00_abort_fcport_cmds(fcport);
1380 +
1381 + /*
1382 + * Transport has effectively 'deleted' the rport, clear
1383 + * all local references.
1384 + */
1385 + spin_lock_irq(host->host_lock);
1386 + fcport->rport = NULL;
1387 + *((fc_port_t **)rport->dd_data) = NULL;
1388 + spin_unlock_irq(host->host_lock);
1389 +}
1390
1391 - if (timeout)
1392 - ha->port_down_retry_count = timeout;
1393 - else
1394 - ha->port_down_retry_count = 1;
1395 +static void
1396 +qla2x00_terminate_rport_io(struct fc_rport *rport)
1397 +{
1398 + fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1399
1400 - rport->dev_loss_tmo = ha->port_down_retry_count + 5;
1401 + qla2x00_abort_fcport_cmds(fcport);
1402 + scsi_target_unblock(&rport->dev);
1403 }
1404
1405 static int
1406 @@ -1248,11 +1261,12 @@ struct fc_function_template qla2xxx_transport_functions = {
1407 .get_starget_port_id = qla2x00_get_starget_port_id,
1408 .show_starget_port_id = 1,
1409
1410 - .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1411 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1412 .show_rport_dev_loss_tmo = 1,
1413
1414 .issue_fc_host_lip = qla2x00_issue_lip,
1415 + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1416 + .terminate_rport_io = qla2x00_terminate_rport_io,
1417 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1418
1419 .vport_create = qla24xx_vport_create,
1420 @@ -1291,11 +1305,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1421 .get_starget_port_id = qla2x00_get_starget_port_id,
1422 .show_starget_port_id = 1,
1423
1424 - .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1425 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1426 .show_rport_dev_loss_tmo = 1,
1427
1428 .issue_fc_host_lip = qla2x00_issue_lip,
1429 + .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1430 + .terminate_rport_io = qla2x00_terminate_rport_io,
1431 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1432 };
1433
1434 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
1435 index 8dd6000..7b0ddc8 100644
1436 --- a/drivers/scsi/qla2xxx/qla_def.h
1437 +++ b/drivers/scsi/qla2xxx/qla_def.h
1438 @@ -1544,7 +1544,6 @@ typedef struct fc_port {
1439 int login_retry;
1440 atomic_t port_down_timer;
1441
1442 - spinlock_t rport_lock;
1443 struct fc_rport *rport, *drport;
1444 u32 supported_classes;
1445
1446 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
1447 index 9b4bebe..5a50fb7 100644
1448 --- a/drivers/scsi/qla2xxx/qla_gbl.h
1449 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
1450 @@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
1451 extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
1452 uint16_t, uint16_t);
1453
1454 +extern void qla2x00_abort_fcport_cmds(fc_port_t *);
1455 +
1456 /*
1457 * Global Functions in qla_mid.c source file.
1458 */
1459 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1460 index bbbc5a6..c7388fa 100644
1461 --- a/drivers/scsi/qla2xxx/qla_init.c
1462 +++ b/drivers/scsi/qla2xxx/qla_init.c
1463 @@ -1864,12 +1864,11 @@ qla2x00_rport_del(void *data)
1464 {
1465 fc_port_t *fcport = data;
1466 struct fc_rport *rport;
1467 - unsigned long flags;
1468
1469 - spin_lock_irqsave(&fcport->rport_lock, flags);
1470 + spin_lock_irq(fcport->ha->host->host_lock);
1471 rport = fcport->drport;
1472 fcport->drport = NULL;
1473 - spin_unlock_irqrestore(&fcport->rport_lock, flags);
1474 + spin_unlock_irq(fcport->ha->host->host_lock);
1475 if (rport)
1476 fc_remote_port_delete(rport);
1477 }
1478 @@ -1898,7 +1897,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1479 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1480 fcport->flags = FCF_RLC_SUPPORT;
1481 fcport->supported_classes = FC_COS_UNSPECIFIED;
1482 - spin_lock_init(&fcport->rport_lock);
1483
1484 return fcport;
1485 }
1486 @@ -2243,28 +2241,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
1487 {
1488 struct fc_rport_identifiers rport_ids;
1489 struct fc_rport *rport;
1490 - unsigned long flags;
1491
1492 if (fcport->drport)
1493 qla2x00_rport_del(fcport);
1494 - if (fcport->rport)
1495 - return;
1496
1497 rport_ids.node_name = wwn_to_u64(fcport->node_name);
1498 rport_ids.port_name = wwn_to_u64(fcport->port_name);
1499 rport_ids.port_id = fcport->d_id.b.domain << 16 |
1500 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1501 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1502 - rport = fc_remote_port_add(ha->host, 0, &rport_ids);
1503 + fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
1504 if (!rport) {
1505 qla_printk(KERN_WARNING, ha,
1506 "Unable to allocate fc remote port!\n");
1507 return;
1508 }
1509 - spin_lock_irqsave(&fcport->rport_lock, flags);
1510 - fcport->rport = rport;
1511 + spin_lock_irq(fcport->ha->host->host_lock);
1512 *((fc_port_t **)rport->dd_data) = fcport;
1513 - spin_unlock_irqrestore(&fcport->rport_lock, flags);
1514 + spin_unlock_irq(fcport->ha->host->host_lock);
1515
1516 rport->supported_classes = fcport->supported_classes;
1517
1518 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1519 index 48eaa3b..047ee64 100644
1520 --- a/drivers/scsi/qla2xxx/qla_os.c
1521 +++ b/drivers/scsi/qla2xxx/qla_os.c
1522 @@ -388,7 +388,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1523 }
1524
1525 /* Close window on fcport/rport state-transitioning. */
1526 - if (!*(fc_port_t **)rport->dd_data) {
1527 + if (fcport->drport) {
1528 cmd->result = DID_IMM_RETRY << 16;
1529 goto qc_fail_command;
1530 }
1531 @@ -455,7 +455,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1532 }
1533
1534 /* Close window on fcport/rport state-transitioning. */
1535 - if (!*(fc_port_t **)rport->dd_data) {
1536 + if (fcport->drport) {
1537 cmd->result = DID_IMM_RETRY << 16;
1538 goto qc24_fail_command;
1539 }
1540 @@ -617,6 +617,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
1541 return (return_status);
1542 }
1543
1544 +void
1545 +qla2x00_abort_fcport_cmds(fc_port_t *fcport)
1546 +{
1547 + int cnt;
1548 + unsigned long flags;
1549 + srb_t *sp;
1550 + scsi_qla_host_t *ha = fcport->ha;
1551 + scsi_qla_host_t *pha = to_qla_parent(ha);
1552 +
1553 + spin_lock_irqsave(&pha->hardware_lock, flags);
1554 + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1555 + sp = pha->outstanding_cmds[cnt];
1556 + if (!sp)
1557 + continue;
1558 + if (sp->fcport != fcport)
1559 + continue;
1560 +
1561 + spin_unlock_irqrestore(&pha->hardware_lock, flags);
1562 + if (ha->isp_ops->abort_command(ha, sp)) {
1563 + DEBUG2(qla_printk(KERN_WARNING, ha,
1564 + "Abort failed -- %lx\n", sp->cmd->serial_number));
1565 + } else {
1566 + if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
1567 + QLA_SUCCESS)
1568 + DEBUG2(qla_printk(KERN_WARNING, ha,
1569 + "Abort failed while waiting -- %lx\n",
1570 + sp->cmd->serial_number));
1571 +
1572 + }
1573 + spin_lock_irqsave(&pha->hardware_lock, flags);
1574 + }
1575 + spin_unlock_irqrestore(&pha->hardware_lock, flags);
1576 +}
1577 +
1578 static void
1579 qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
1580 {
1581 @@ -1073,7 +1107,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1582 else
1583 scsi_deactivate_tcq(sdev, ha->max_q_depth);
1584
1585 - rport->dev_loss_tmo = ha->port_down_retry_count + 5;
1586 + rport->dev_loss_tmo = ha->port_down_retry_count;
1587
1588 return 0;
1589 }
1590 @@ -1813,7 +1847,6 @@ static inline void
1591 qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1592 int defer)
1593 {
1594 - unsigned long flags;
1595 struct fc_rport *rport;
1596
1597 if (!fcport->rport)
1598 @@ -1821,19 +1854,13 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1599
1600 rport = fcport->rport;
1601 if (defer) {
1602 - spin_lock_irqsave(&fcport->rport_lock, flags);
1603 + spin_lock_irq(ha->host->host_lock);
1604 fcport->drport = rport;
1605 - fcport->rport = NULL;
1606 - *(fc_port_t **)rport->dd_data = NULL;
1607 - spin_unlock_irqrestore(&fcport->rport_lock, flags);
1608 + spin_unlock_irq(ha->host->host_lock);
1609 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
1610 - } else {
1611 - spin_lock_irqsave(&fcport->rport_lock, flags);
1612 - fcport->rport = NULL;
1613 - *(fc_port_t **)rport->dd_data = NULL;
1614 - spin_unlock_irqrestore(&fcport->rport_lock, flags);
1615 + qla2xxx_wake_dpc(ha);
1616 + } else
1617 fc_remote_port_delete(rport);
1618 - }
1619 }
1620
1621 /*
1622 diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
1623 index 75a64a6..b29360e 100644
1624 --- a/drivers/scsi/scsi_transport_spi.c
1625 +++ b/drivers/scsi/scsi_transport_spi.c
1626 @@ -366,12 +366,14 @@ spi_transport_rd_attr(rti, "%d\n");
1627 spi_transport_rd_attr(pcomp_en, "%d\n");
1628 spi_transport_rd_attr(hold_mcs, "%d\n");
1629
1630 -/* we only care about the first child device so we return 1 */
1631 +/* we only care about the first child device that's a real SCSI device
1632 + * so we return 1 to terminate the iteration when we find it */
1633 static int child_iter(struct device *dev, void *data)
1634 {
1635 - struct scsi_device *sdev = to_scsi_device(dev);
1636 + if (!scsi_is_sdev_device(dev))
1637 + return 0;
1638
1639 - spi_dv_device(sdev);
1640 + spi_dv_device(to_scsi_device(dev));
1641 return 1;
1642 }
1643
1644 diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
1645 index 0fe031f..1bcf3c3 100644
1646 --- a/drivers/scsi/ses.c
1647 +++ b/drivers/scsi/ses.c
1648 @@ -345,14 +345,14 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
1649 return 0;
1650 }
1651
1652 -#define VPD_INQUIRY_SIZE 512
1653 +#define VPD_INQUIRY_SIZE 36
1654
1655 static void ses_match_to_enclosure(struct enclosure_device *edev,
1656 struct scsi_device *sdev)
1657 {
1658 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
1659 unsigned char *desc;
1660 - int len;
1661 + u16 vpd_len;
1662 struct efd efd = {
1663 .addr = 0,
1664 };
1665 @@ -372,9 +372,19 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
1666 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
1667 goto free;
1668
1669 - len = (buf[2] << 8) + buf[3];
1670 + vpd_len = (buf[2] << 8) + buf[3];
1671 + kfree(buf);
1672 + buf = kmalloc(vpd_len, GFP_KERNEL);
1673 + if (!buf)
1674 + return;
1675 + cmd[3] = vpd_len >> 8;
1676 + cmd[4] = vpd_len & 0xff;
1677 + if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
1678 + vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
1679 + goto free;
1680 +
1681 desc = buf + 4;
1682 - while (desc < buf + len) {
1683 + while (desc < buf + vpd_len) {
1684 enum scsi_protocol proto = desc[0] >> 4;
1685 u8 code_set = desc[0] & 0x0f;
1686 u8 piv = desc[1] & 0x80;
1687 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1688 index fe47d14..2fdbc10 100644
1689 --- a/drivers/usb/core/message.c
1690 +++ b/drivers/usb/core/message.c
1691 @@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1692 continue;
1693 dev_dbg(&dev->dev, "unregistering interface %s\n",
1694 interface->dev.bus_id);
1695 - device_del(&interface->dev);
1696 usb_remove_sysfs_intf_files(interface);
1697 + device_del(&interface->dev);
1698 }
1699
1700 /* Now that the interfaces are unbound, nobody should
1701 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1702 index 0ff4a39..7ee2abc 100644
1703 --- a/drivers/usb/serial/ftdi_sio.c
1704 +++ b/drivers/usb/serial/ftdi_sio.c
1705 @@ -553,6 +553,7 @@ static struct usb_device_id id_table_combined [] = {
1706 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
1707 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
1708 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
1709 + { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
1710 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
1711 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
1712 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
1713 @@ -636,6 +637,10 @@ static struct usb_device_id id_table_combined [] = {
1714 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1715 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
1716 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1717 + { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
1718 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1719 + { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
1720 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1721 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
1722 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
1723 { }, /* Optional parameter entry */
1724 diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
1725 index 8302eca..ac23a3a 100644
1726 --- a/drivers/usb/serial/ftdi_sio.h
1727 +++ b/drivers/usb/serial/ftdi_sio.h
1728 @@ -524,6 +524,7 @@
1729 #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
1730 #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
1731 #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
1732 +#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
1733 #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
1734
1735 /*
1736 @@ -815,6 +816,11 @@
1737 #define OLIMEX_VID 0x15BA
1738 #define OLIMEX_ARM_USB_OCD_PID 0x0003
1739
1740 +/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
1741 +/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
1742 +#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
1743 +#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
1744 +
1745 /* www.elsterelectricity.com Elster Unicom III Optical Probe */
1746 #define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
1747
1748 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1749 index 2a0dd1b..63287ad 100644
1750 --- a/drivers/usb/serial/pl2303.c
1751 +++ b/drivers/usb/serial/pl2303.c
1752 @@ -89,7 +89,6 @@ static struct usb_device_id id_table [] = {
1753 { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
1754 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
1755 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
1756 - { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
1757 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
1758 { } /* Terminating entry */
1759 };
1760 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1761 index 6ac3bbc..a3bd039 100644
1762 --- a/drivers/usb/serial/pl2303.h
1763 +++ b/drivers/usb/serial/pl2303.h
1764 @@ -107,10 +107,6 @@
1765 #define COREGA_VENDOR_ID 0x07aa
1766 #define COREGA_PRODUCT_ID 0x002a
1767
1768 -/* HL HL-340 (ID: 4348:5523) */
1769 -#define HL340_VENDOR_ID 0x4348
1770 -#define HL340_PRODUCT_ID 0x5523
1771 -
1772 /* Y.C. Cable U.S.A., Inc - USB to RS-232 */
1773 #define YCCABLE_VENDOR_ID 0x05ad
1774 #define YCCABLE_PRODUCT_ID 0x0fba
1775 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1776 index db1db4c..38034e2 100644
1777 --- a/drivers/usb/serial/usb-serial.c
1778 +++ b/drivers/usb/serial/usb-serial.c
1779 @@ -119,9 +119,6 @@ static void return_serial(struct usb_serial *serial)
1780
1781 dbg("%s", __func__);
1782
1783 - if (serial == NULL)
1784 - return;
1785 -
1786 for (i = 0; i < serial->num_ports; ++i) {
1787 serial_table[serial->minor + i] = NULL;
1788 }
1789 @@ -140,7 +137,8 @@ static void destroy_serial(struct kref *kref)
1790 serial->type->shutdown(serial);
1791
1792 /* return the minor range that this device had */
1793 - return_serial(serial);
1794 + if (serial->minor != SERIAL_TTY_NO_MINOR)
1795 + return_serial(serial);
1796
1797 for (i = 0; i < serial->num_ports; ++i)
1798 serial->port[i]->open_count = 0;
1799 @@ -562,6 +560,7 @@ static struct usb_serial * create_serial (struct usb_device *dev,
1800 serial->interface = interface;
1801 kref_init(&serial->kref);
1802 mutex_init(&serial->disc_mutex);
1803 + serial->minor = SERIAL_TTY_NO_MINOR;
1804
1805 return serial;
1806 }
1807 diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
1808 index 3fcde9f..d8d6633 100644
1809 --- a/drivers/usb/storage/scsiglue.c
1810 +++ b/drivers/usb/storage/scsiglue.c
1811 @@ -73,7 +73,6 @@ static const char* host_info(struct Scsi_Host *host)
1812 static int slave_alloc (struct scsi_device *sdev)
1813 {
1814 struct us_data *us = host_to_us(sdev->host);
1815 - struct usb_host_endpoint *bulk_in_ep;
1816
1817 /*
1818 * Set the INQUIRY transfer length to 36. We don't use any of
1819 @@ -82,16 +81,22 @@ static int slave_alloc (struct scsi_device *sdev)
1820 */
1821 sdev->inquiry_len = 36;
1822
1823 - /* Scatter-gather buffers (all but the last) must have a length
1824 - * divisible by the bulk maxpacket size. Otherwise a data packet
1825 - * would end up being short, causing a premature end to the data
1826 - * transfer. We'll use the maxpacket value of the bulk-IN pipe
1827 - * to set the SCSI device queue's DMA alignment mask.
1828 + /* USB has unusual DMA-alignment requirements: Although the
1829 + * starting address of each scatter-gather element doesn't matter,
1830 + * the length of each element except the last must be divisible
1831 + * by the Bulk maxpacket value. There's currently no way to
1832 + * express this by block-layer constraints, so we'll cop out
1833 + * and simply require addresses to be aligned at 512-byte
1834 + * boundaries. This is okay since most block I/O involves
1835 + * hardware sectors that are multiples of 512 bytes in length,
1836 + * and since host controllers up through USB 2.0 have maxpacket
1837 + * values no larger than 512.
1838 + *
1839 + * But it doesn't suffice for Wireless USB, where Bulk maxpacket
1840 + * values can be as large as 2048. To make that work properly
1841 + * will require changes to the block layer.
1842 */
1843 - bulk_in_ep = us->pusb_dev->ep_in[usb_pipeendpoint(us->recv_bulk_pipe)];
1844 - blk_queue_update_dma_alignment(sdev->request_queue,
1845 - le16_to_cpu(bulk_in_ep->desc.wMaxPacketSize) - 1);
1846 - /* wMaxPacketSize must be a power of 2 */
1847 + blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
1848
1849 /*
1850 * The UFI spec treates the Peripheral Qualifier bits in an
1851 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
1852 index 6610d2d..f2062e1 100644
1853 --- a/drivers/usb/storage/transport.c
1854 +++ b/drivers/usb/storage/transport.c
1855 @@ -1034,8 +1034,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1856
1857 /* try to compute the actual residue, based on how much data
1858 * was really transferred and what the device tells us */
1859 - if (residue) {
1860 - if (!(us->flags & US_FL_IGNORE_RESIDUE)) {
1861 + if (residue && !(us->flags & US_FL_IGNORE_RESIDUE)) {
1862 +
1863 + /* Heuristically detect devices that generate bogus residues
1864 + * by seeing what happens with INQUIRY and READ CAPACITY
1865 + * commands.
1866 + */
1867 + if (bcs->Status == US_BULK_STAT_OK &&
1868 + scsi_get_resid(srb) == 0 &&
1869 + ((srb->cmnd[0] == INQUIRY &&
1870 + transfer_length == 36) ||
1871 + (srb->cmnd[0] == READ_CAPACITY &&
1872 + transfer_length == 8))) {
1873 + us->flags |= US_FL_IGNORE_RESIDUE;
1874 +
1875 + } else {
1876 residue = min(residue, transfer_length);
1877 scsi_set_resid(srb, max(scsi_get_resid(srb),
1878 (int) residue));
1879 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1880 index 39a7c11..6a04476 100644
1881 --- a/drivers/usb/storage/unusual_devs.h
1882 +++ b/drivers/usb/storage/unusual_devs.h
1883 @@ -358,14 +358,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
1884 US_FL_FIX_CAPACITY),
1885
1886 /* Reported by Emil Larsson <emil@swip.net> */
1887 -UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
1888 +UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
1889 "NIKON",
1890 "NIKON DSC D80",
1891 US_SC_DEVICE, US_PR_DEVICE, NULL,
1892 US_FL_FIX_CAPACITY),
1893
1894 /* Reported by Ortwin Glueck <odi@odi.ch> */
1895 -UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
1896 +UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
1897 "NIKON",
1898 "NIKON DSC D40",
1899 US_SC_DEVICE, US_PR_DEVICE, NULL,
1900 @@ -1187,6 +1187,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
1901 US_SC_DEVICE, US_PR_DEVICE, NULL,
1902 US_FL_FIX_INQUIRY ),
1903
1904 +/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
1905 +UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
1906 + "Simple Tech/Datafab",
1907 + "CF+SM Reader",
1908 + US_SC_DEVICE, US_PR_DEVICE, NULL,
1909 + US_FL_IGNORE_RESIDUE ),
1910 +
1911 /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
1912 * to the USB storage specification in two ways:
1913 * - They tell us they are using transport protocol CBI. In reality they
1914 @@ -1758,6 +1765,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1915 US_SC_DEVICE, US_PR_DEVICE, NULL,
1916 US_FL_FIX_CAPACITY ),
1917
1918 +/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1919 +UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1920 + "iRiver",
1921 + "MP3 T10",
1922 + US_SC_DEVICE, US_PR_DEVICE, NULL,
1923 + US_FL_IGNORE_RESIDUE ),
1924 +
1925 /*
1926 * David Härdeman <david@2gen.com>
1927 * The key makes the SCSI stack print confusing (but harmless) messages
1928 diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
1929 index 5001bd4..21d61b3 100644
1930 --- a/drivers/video/arkfb.c
1931 +++ b/drivers/video/arkfb.c
1932 @@ -958,20 +958,20 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
1933 /* Prepare PCI device */
1934 rc = pci_enable_device(dev);
1935 if (rc < 0) {
1936 - dev_err(info->dev, "cannot enable PCI device\n");
1937 + dev_err(info->device, "cannot enable PCI device\n");
1938 goto err_enable_device;
1939 }
1940
1941 rc = pci_request_regions(dev, "arkfb");
1942 if (rc < 0) {
1943 - dev_err(info->dev, "cannot reserve framebuffer region\n");
1944 + dev_err(info->device, "cannot reserve framebuffer region\n");
1945 goto err_request_regions;
1946 }
1947
1948 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
1949 if (! par->dac) {
1950 rc = -ENOMEM;
1951 - dev_err(info->dev, "RAMDAC initialization failed\n");
1952 + dev_err(info->device, "RAMDAC initialization failed\n");
1953 goto err_dac;
1954 }
1955
1956 @@ -982,7 +982,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
1957 info->screen_base = pci_iomap(dev, 0, 0);
1958 if (! info->screen_base) {
1959 rc = -ENOMEM;
1960 - dev_err(info->dev, "iomap for framebuffer failed\n");
1961 + dev_err(info->device, "iomap for framebuffer failed\n");
1962 goto err_iomap;
1963 }
1964
1965 @@ -1004,19 +1004,19 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
1966 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
1967 if (! ((rc == 1) || (rc == 2))) {
1968 rc = -EINVAL;
1969 - dev_err(info->dev, "mode %s not found\n", mode_option);
1970 + dev_err(info->device, "mode %s not found\n", mode_option);
1971 goto err_find_mode;
1972 }
1973
1974 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1975 if (rc < 0) {
1976 - dev_err(info->dev, "cannot allocate colormap\n");
1977 + dev_err(info->device, "cannot allocate colormap\n");
1978 goto err_alloc_cmap;
1979 }
1980
1981 rc = register_framebuffer(info);
1982 if (rc < 0) {
1983 - dev_err(info->dev, "cannot register framebugger\n");
1984 + dev_err(info->device, "cannot register framebugger\n");
1985 goto err_reg_fb;
1986 }
1987
1988 @@ -1090,7 +1090,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1989 struct fb_info *info = pci_get_drvdata(dev);
1990 struct arkfb_info *par = info->par;
1991
1992 - dev_info(info->dev, "suspend\n");
1993 + dev_info(info->device, "suspend\n");
1994
1995 acquire_console_sem();
1996 mutex_lock(&(par->open_lock));
1997 @@ -1121,7 +1121,7 @@ static int ark_pci_resume (struct pci_dev* dev)
1998 struct fb_info *info = pci_get_drvdata(dev);
1999 struct arkfb_info *par = info->par;
2000
2001 - dev_info(info->dev, "resume\n");
2002 + dev_info(info->device, "resume\n");
2003
2004 acquire_console_sem();
2005 mutex_lock(&(par->open_lock));
2006 diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
2007 index 3ca27cb..aa95f83 100644
2008 --- a/drivers/video/aty/radeon_accel.c
2009 +++ b/drivers/video/aty/radeon_accel.c
2010 @@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
2011 OUTREG(DP_WRITE_MSK, 0xffffffff);
2012 OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
2013
2014 + radeon_fifo_wait(2);
2015 + OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
2016 + OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
2017 +
2018 radeon_fifo_wait(2);
2019 OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
2020 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
2021 @@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
2022 OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
2023 | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
2024
2025 + radeon_fifo_wait(2);
2026 + OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
2027 + OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
2028 +
2029 radeon_fifo_wait(3);
2030 OUTREG(SRC_Y_X, (sy << 16) | sx);
2031 OUTREG(DST_Y_X, (dy << 16) | dx);
2032 @@ -241,8 +249,8 @@ void radeonfb_engine_reset(struct radeonfb_info *rinfo)
2033 INREG(HOST_PATH_CNTL);
2034 OUTREG(HOST_PATH_CNTL, host_path_cntl);
2035
2036 - if (rinfo->family != CHIP_FAMILY_R300 ||
2037 - rinfo->family != CHIP_FAMILY_R350 ||
2038 + if (rinfo->family != CHIP_FAMILY_R300 &&
2039 + rinfo->family != CHIP_FAMILY_R350 &&
2040 rinfo->family != CHIP_FAMILY_RV350)
2041 OUTREG(RBBM_SOFT_RESET, rbbm_soft_reset);
2042
2043 diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
2044 index 89da27b..2ad06b0 100644
2045 --- a/drivers/video/matrox/matroxfb_maven.c
2046 +++ b/drivers/video/matrox/matroxfb_maven.c
2047 @@ -1266,7 +1266,7 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
2048 ERROR4:;
2049 i2c_detach_client(new_client);
2050 ERROR3:;
2051 - kfree(new_client);
2052 + kfree(data);
2053 ERROR0:;
2054 return err;
2055 }
2056 diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
2057 index 2972f11..8361bd0 100644
2058 --- a/drivers/video/s3fb.c
2059 +++ b/drivers/video/s3fb.c
2060 @@ -903,13 +903,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
2061 /* Prepare PCI device */
2062 rc = pci_enable_device(dev);
2063 if (rc < 0) {
2064 - dev_err(info->dev, "cannot enable PCI device\n");
2065 + dev_err(info->device, "cannot enable PCI device\n");
2066 goto err_enable_device;
2067 }
2068
2069 rc = pci_request_regions(dev, "s3fb");
2070 if (rc < 0) {
2071 - dev_err(info->dev, "cannot reserve framebuffer region\n");
2072 + dev_err(info->device, "cannot reserve framebuffer region\n");
2073 goto err_request_regions;
2074 }
2075
2076 @@ -921,7 +921,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
2077 info->screen_base = pci_iomap(dev, 0, 0);
2078 if (! info->screen_base) {
2079 rc = -ENOMEM;
2080 - dev_err(info->dev, "iomap for framebuffer failed\n");
2081 + dev_err(info->device, "iomap for framebuffer failed\n");
2082 goto err_iomap;
2083 }
2084
2085 @@ -965,19 +965,19 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
2086 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
2087 if (! ((rc == 1) || (rc == 2))) {
2088 rc = -EINVAL;
2089 - dev_err(info->dev, "mode %s not found\n", mode_option);
2090 + dev_err(info->device, "mode %s not found\n", mode_option);
2091 goto err_find_mode;
2092 }
2093
2094 rc = fb_alloc_cmap(&info->cmap, 256, 0);
2095 if (rc < 0) {
2096 - dev_err(info->dev, "cannot allocate colormap\n");
2097 + dev_err(info->device, "cannot allocate colormap\n");
2098 goto err_alloc_cmap;
2099 }
2100
2101 rc = register_framebuffer(info);
2102 if (rc < 0) {
2103 - dev_err(info->dev, "cannot register framebuffer\n");
2104 + dev_err(info->device, "cannot register framebuffer\n");
2105 goto err_reg_fb;
2106 }
2107
2108 @@ -1053,7 +1053,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
2109 struct fb_info *info = pci_get_drvdata(dev);
2110 struct s3fb_info *par = info->par;
2111
2112 - dev_info(info->dev, "suspend\n");
2113 + dev_info(info->device, "suspend\n");
2114
2115 acquire_console_sem();
2116 mutex_lock(&(par->open_lock));
2117 @@ -1085,7 +1085,7 @@ static int s3_pci_resume(struct pci_dev* dev)
2118 struct s3fb_info *par = info->par;
2119 int err;
2120
2121 - dev_info(info->dev, "resume\n");
2122 + dev_info(info->device, "resume\n");
2123
2124 acquire_console_sem();
2125 mutex_lock(&(par->open_lock));
2126 @@ -1102,7 +1102,7 @@ static int s3_pci_resume(struct pci_dev* dev)
2127 if (err) {
2128 mutex_unlock(&(par->open_lock));
2129 release_console_sem();
2130 - dev_err(info->dev, "error %d enabling device for resume\n", err);
2131 + dev_err(info->device, "error %d enabling device for resume\n", err);
2132 return err;
2133 }
2134 pci_set_master(dev);
2135 diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
2136 index 536ab11..f5f282d 100644
2137 --- a/drivers/video/vt8623fb.c
2138 +++ b/drivers/video/vt8623fb.c
2139 @@ -677,13 +677,13 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
2140
2141 rc = pci_enable_device(dev);
2142 if (rc < 0) {
2143 - dev_err(info->dev, "cannot enable PCI device\n");
2144 + dev_err(info->device, "cannot enable PCI device\n");
2145 goto err_enable_device;
2146 }
2147
2148 rc = pci_request_regions(dev, "vt8623fb");
2149 if (rc < 0) {
2150 - dev_err(info->dev, "cannot reserve framebuffer region\n");
2151 + dev_err(info->device, "cannot reserve framebuffer region\n");
2152 goto err_request_regions;
2153 }
2154
2155 @@ -696,14 +696,14 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
2156 info->screen_base = pci_iomap(dev, 0, 0);
2157 if (! info->screen_base) {
2158 rc = -ENOMEM;
2159 - dev_err(info->dev, "iomap for framebuffer failed\n");
2160 + dev_err(info->device, "iomap for framebuffer failed\n");
2161 goto err_iomap_1;
2162 }
2163
2164 par->mmio_base = pci_iomap(dev, 1, 0);
2165 if (! par->mmio_base) {
2166 rc = -ENOMEM;
2167 - dev_err(info->dev, "iomap for MMIO failed\n");
2168 + dev_err(info->device, "iomap for MMIO failed\n");
2169 goto err_iomap_2;
2170 }
2171
2172 @@ -714,7 +714,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
2173 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
2174 info->screen_size = memsize1 << 20;
2175 else {
2176 - dev_err(info->dev, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
2177 + dev_err(info->device, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
2178 info->screen_size = 16 << 20;
2179 }
2180
2181 @@ -731,19 +731,19 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
2182 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
2183 if (! ((rc == 1) || (rc == 2))) {
2184 rc = -EINVAL;
2185 - dev_err(info->dev, "mode %s not found\n", mode_option);
2186 + dev_err(info->device, "mode %s not found\n", mode_option);
2187 goto err_find_mode;
2188 }
2189
2190 rc = fb_alloc_cmap(&info->cmap, 256, 0);
2191 if (rc < 0) {
2192 - dev_err(info->dev, "cannot allocate colormap\n");
2193 + dev_err(info->device, "cannot allocate colormap\n");
2194 goto err_alloc_cmap;
2195 }
2196
2197 rc = register_framebuffer(info);
2198 if (rc < 0) {
2199 - dev_err(info->dev, "cannot register framebugger\n");
2200 + dev_err(info->device, "cannot register framebugger\n");
2201 goto err_reg_fb;
2202 }
2203
2204 @@ -817,7 +817,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
2205 struct fb_info *info = pci_get_drvdata(dev);
2206 struct vt8623fb_info *par = info->par;
2207
2208 - dev_info(info->dev, "suspend\n");
2209 + dev_info(info->device, "suspend\n");
2210
2211 acquire_console_sem();
2212 mutex_lock(&(par->open_lock));
2213 @@ -848,7 +848,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
2214 struct fb_info *info = pci_get_drvdata(dev);
2215 struct vt8623fb_info *par = info->par;
2216
2217 - dev_info(info->dev, "resume\n");
2218 + dev_info(info->device, "resume\n");
2219
2220 acquire_console_sem();
2221 mutex_lock(&(par->open_lock));
2222 diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
2223 index f58e41d..4276546 100644
2224 --- a/fs/cifs/asn1.c
2225 +++ b/fs/cifs/asn1.c
2226 @@ -400,7 +400,7 @@ asn1_oid_decode(struct asn1_ctx *ctx,
2227 size = eoc - ctx->pointer + 1;
2228
2229 /* first subid actually encodes first two subids */
2230 - if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
2231 + if (size < 2 || size > UINT_MAX/sizeof(unsigned long))
2232 return 0;
2233
2234 *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
2235 diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
2236 index 7013aaf..2434ab0 100644
2237 --- a/fs/cifs/cifs_spnego.c
2238 +++ b/fs/cifs/cifs_spnego.c
2239 @@ -66,8 +66,8 @@ struct key_type cifs_spnego_key_type = {
2240 .describe = user_describe,
2241 };
2242
2243 -#define MAX_VER_STR_LEN 9 /* length of longest version string e.g.
2244 - strlen(";ver=0xFF") */
2245 +#define MAX_VER_STR_LEN 8 /* length of longest version string e.g.
2246 + strlen("ver=0xFF") */
2247 #define MAX_MECH_STR_LEN 13 /* length of longest security mechanism name, eg
2248 in future could have strlen(";sec=ntlmsspi") */
2249 #define MAX_IPV6_ADDR_LEN 42 /* eg FEDC:BA98:7654:3210:FEDC:BA98:7654:3210/60 */
2250 @@ -81,11 +81,15 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
2251 struct key *spnego_key;
2252 const char *hostname = server->hostname;
2253
2254 - /* BB: come up with better scheme for determining length */
2255 - /* length of fields (with semicolons): ver=0xyz ipv4= ipaddress host=
2256 - hostname sec=mechanism uid=0x uid */
2257 - desc_len = MAX_VER_STR_LEN + 5 + MAX_IPV6_ADDR_LEN + 1 + 6 +
2258 - strlen(hostname) + MAX_MECH_STR_LEN + 8 + (sizeof(uid_t) * 2);
2259 + /* length of fields (with semicolons): ver=0xyz ip4=ipaddress
2260 + host=hostname sec=mechanism uid=0xFF user=username */
2261 + desc_len = MAX_VER_STR_LEN +
2262 + 6 /* len of "host=" */ + strlen(hostname) +
2263 + 5 /* len of ";ipv4=" */ + MAX_IPV6_ADDR_LEN +
2264 + MAX_MECH_STR_LEN +
2265 + 7 /* len of ";uid=0x" */ + (sizeof(uid_t) * 2) +
2266 + 6 /* len of ";user=" */ + strlen(sesInfo->userName) + 1;
2267 +
2268 spnego_key = ERR_PTR(-ENOMEM);
2269 description = kzalloc(desc_len, GFP_KERNEL);
2270 if (description == NULL)
2271 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2272 index 86b4d5f..6203609 100644
2273 --- a/fs/cifs/cifsfs.c
2274 +++ b/fs/cifs/cifsfs.c
2275 @@ -175,6 +175,8 @@ out_no_root:
2276 if (inode)
2277 iput(inode);
2278
2279 + cifs_umount(sb, cifs_sb);
2280 +
2281 out_mount_failed:
2282 if (cifs_sb) {
2283 #ifdef CONFIG_CIFS_DFS_UPCALL
2284 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2285 index 2e904bd..227c553 100644
2286 --- a/fs/cifs/inode.c
2287 +++ b/fs/cifs/inode.c
2288 @@ -649,6 +649,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino)
2289 inode->i_fop = &simple_dir_operations;
2290 inode->i_uid = cifs_sb->mnt_uid;
2291 inode->i_gid = cifs_sb->mnt_gid;
2292 + } else if (rc) {
2293 _FreeXid(xid);
2294 iget_failed(inode);
2295 return ERR_PTR(rc);
2296 diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
2297 index d837893..47f9583 100644
2298 --- a/include/asm-sparc64/futex.h
2299 +++ b/include/asm-sparc64/futex.h
2300 @@ -59,7 +59,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
2301 __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
2302 break;
2303 case FUTEX_OP_ANDN:
2304 - __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
2305 + __futex_cas_op("andn\t%2, %4, %1", ret, oldval, uaddr, oparg);
2306 break;
2307 case FUTEX_OP_XOR:
2308 __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
2309 diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
2310 index 0bb9bf5..630eb4e 100644
2311 --- a/include/asm-sparc64/irq.h
2312 +++ b/include/asm-sparc64/irq.h
2313 @@ -90,4 +90,8 @@ static inline unsigned long get_softint(void)
2314 return retval;
2315 }
2316
2317 +extern void *hardirq_stack[NR_CPUS];
2318 +extern void *softirq_stack[NR_CPUS];
2319 +#define __ARCH_HAS_DO_SOFTIRQ
2320 +
2321 #endif
2322 diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
2323 index b163da7..4f18096 100644
2324 --- a/include/asm-sparc64/ptrace.h
2325 +++ b/include/asm-sparc64/ptrace.h
2326 @@ -134,9 +134,9 @@ struct global_reg_snapshot {
2327 unsigned long tnpc;
2328 unsigned long o7;
2329 unsigned long i7;
2330 + unsigned long rpc;
2331 struct thread_info *thread;
2332 unsigned long pad1;
2333 - unsigned long pad2;
2334 };
2335
2336 #define __ARCH_WANT_COMPAT_SYS_PTRACE
2337 @@ -314,9 +314,9 @@ extern void __show_regs(struct pt_regs *);
2338 #define GR_SNAP_TNPC 0x10
2339 #define GR_SNAP_O7 0x18
2340 #define GR_SNAP_I7 0x20
2341 -#define GR_SNAP_THREAD 0x28
2342 -#define GR_SNAP_PAD1 0x30
2343 -#define GR_SNAP_PAD2 0x38
2344 +#define GR_SNAP_RPC 0x28
2345 +#define GR_SNAP_THREAD 0x30
2346 +#define GR_SNAP_PAD1 0x38
2347
2348 #endif /* __KERNEL__ */
2349
2350 diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
2351 index 37672f7..4b683af 100644
2352 --- a/include/asm-x86/i387.h
2353 +++ b/include/asm-x86/i387.h
2354 @@ -13,6 +13,7 @@
2355 #include <linux/sched.h>
2356 #include <linux/kernel_stat.h>
2357 #include <linux/regset.h>
2358 +#include <linux/hardirq.h>
2359 #include <asm/asm.h>
2360 #include <asm/processor.h>
2361 #include <asm/sigcontext.h>
2362 @@ -290,6 +291,37 @@ static inline void kernel_fpu_end(void)
2363 preempt_enable();
2364 }
2365
2366 +/*
2367 + * Some instructions like VIA's padlock instructions generate a spurious
2368 + * DNA fault but don't modify SSE registers. And these instructions
2369 + * get used from interrupt context aswell. To prevent these kernel instructions
2370 + * in interrupt context interact wrongly with other user/kernel fpu usage, we
2371 + * should use them only in the context of irq_ts_save/restore()
2372 + */
2373 +static inline int irq_ts_save(void)
2374 +{
2375 + /*
2376 + * If we are in process context, we are ok to take a spurious DNA fault.
2377 + * Otherwise, doing clts() in process context require pre-emption to
2378 + * be disabled or some heavy lifting like kernel_fpu_begin()
2379 + */
2380 + if (!in_interrupt())
2381 + return 0;
2382 +
2383 + if (read_cr0() & X86_CR0_TS) {
2384 + clts();
2385 + return 1;
2386 + }
2387 +
2388 + return 0;
2389 +}
2390 +
2391 +static inline void irq_ts_restore(int TS_state)
2392 +{
2393 + if (TS_state)
2394 + stts();
2395 +}
2396 +
2397 #ifdef CONFIG_X86_64
2398
2399 static inline void save_init_fpu(struct task_struct *tsk)
2400 diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
2401 index 21e89bf..bf2a3d2 100644
2402 --- a/include/asm-x86/spinlock.h
2403 +++ b/include/asm-x86/spinlock.h
2404 @@ -65,7 +65,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
2405 {
2406 int tmp = ACCESS_ONCE(lock->slock);
2407
2408 - return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
2409 + return (((tmp >> 8) - tmp) & 0xff) > 1;
2410 }
2411
2412 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
2413 @@ -129,7 +129,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
2414 {
2415 int tmp = ACCESS_ONCE(lock->slock);
2416
2417 - return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
2418 + return (((tmp >> 16) - tmp) & 0xffff) > 1;
2419 }
2420
2421 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
2422 diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
2423 index 8f891cb..4a6583d 100644
2424 --- a/include/linux/usb/serial.h
2425 +++ b/include/linux/usb/serial.h
2426 @@ -17,7 +17,8 @@
2427 #include <linux/mutex.h>
2428
2429 #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
2430 -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */
2431 +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
2432 +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */
2433
2434 /* The maximum number of ports one device can grab at once */
2435 #define MAX_NUM_PORTS 8
2436 diff --git a/include/video/radeon.h b/include/video/radeon.h
2437 index 83467e1..099ffa5 100644
2438 --- a/include/video/radeon.h
2439 +++ b/include/video/radeon.h
2440 @@ -527,8 +527,9 @@
2441
2442
2443 /* DSTCACHE_CTLSTAT bit constants */
2444 -#define RB2D_DC_FLUSH (3 << 0)
2445 -#define RB2D_DC_FLUSH_ALL 0xf
2446 +#define RB2D_DC_FLUSH_2D (1 << 0)
2447 +#define RB2D_DC_FREE_2D (1 << 2)
2448 +#define RB2D_DC_FLUSH_ALL (RB2D_DC_FLUSH_2D | RB2D_DC_FREE_2D)
2449 #define RB2D_DC_BUSY (1 << 31)
2450
2451
2452 @@ -741,6 +742,10 @@
2453 #define SOFT_RESET_RB (1 << 6)
2454 #define SOFT_RESET_HDP (1 << 7)
2455
2456 +/* WAIT_UNTIL bit constants */
2457 +#define WAIT_DMA_GUI_IDLE (1 << 9)
2458 +#define WAIT_2D_IDLECLEAN (1 << 16)
2459 +
2460 /* SURFACE_CNTL bit consants */
2461 #define SURF_TRANSLATION_DIS (1 << 8)
2462 #define NONSURF_AP0_SWP_16BPP (1 << 20)
2463 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
2464 index dbd8398..0ffaeb0 100644
2465 --- a/kernel/posix-timers.c
2466 +++ b/kernel/posix-timers.c
2467 @@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
2468 else
2469 schedule_next_timer(timr);
2470
2471 - info->si_overrun = timr->it_overrun_last;
2472 + info->si_overrun += timr->it_overrun_last;
2473 }
2474
2475 if (timr)
2476 unlock_timer(timr, flags);
2477 }
2478
2479 -int posix_timer_event(struct k_itimer *timr,int si_private)
2480 +int posix_timer_event(struct k_itimer *timr, int si_private)
2481 {
2482 - memset(&timr->sigq->info, 0, sizeof(siginfo_t));
2483 + /*
2484 + * FIXME: if ->sigq is queued we can race with
2485 + * dequeue_signal()->do_schedule_next_timer().
2486 + *
2487 + * If dequeue_signal() sees the "right" value of
2488 + * si_sys_private it calls do_schedule_next_timer().
2489 + * We re-queue ->sigq and drop ->it_lock().
2490 + * do_schedule_next_timer() locks the timer
2491 + * and re-schedules it while ->sigq is pending.
2492 + * Not really bad, but not that we want.
2493 + */
2494 timr->sigq->info.si_sys_private = si_private;
2495 - /* Send signal to the process that owns this timer.*/
2496
2497 timr->sigq->info.si_signo = timr->it_sigev_signo;
2498 - timr->sigq->info.si_errno = 0;
2499 timr->sigq->info.si_code = SI_TIMER;
2500 timr->sigq->info.si_tid = timr->it_id;
2501 timr->sigq->info.si_value = timr->it_sigev_value;
2502 @@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
2503 kmem_cache_free(posix_timers_cache, tmr);
2504 tmr = NULL;
2505 }
2506 + memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
2507 return tmr;
2508 }
2509
2510 diff --git a/kernel/relay.c b/kernel/relay.c
2511 index 7de644c..f5a5a96 100644
2512 --- a/kernel/relay.c
2513 +++ b/kernel/relay.c
2514 @@ -832,6 +832,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
2515 size_t n_subbufs = buf->chan->n_subbufs;
2516 size_t read_subbuf;
2517
2518 + if (buf->subbufs_produced == buf->subbufs_consumed &&
2519 + buf->offset == buf->bytes_consumed)
2520 + return;
2521 +
2522 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
2523 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
2524 buf->bytes_consumed = 0;
2525 @@ -863,6 +867,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
2526
2527 relay_file_read_consume(buf, read_pos, 0);
2528
2529 + consumed = buf->subbufs_consumed;
2530 +
2531 if (unlikely(buf->offset > subbuf_size)) {
2532 if (produced == consumed)
2533 return 0;
2534 @@ -881,8 +887,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
2535 if (consumed > produced)
2536 produced += n_subbufs * subbuf_size;
2537
2538 - if (consumed == produced)
2539 + if (consumed == produced) {
2540 + if (buf->offset == subbuf_size &&
2541 + buf->subbufs_produced > buf->subbufs_consumed)
2542 + return 1;
2543 return 0;
2544 + }
2545
2546 return 1;
2547 }
2548 diff --git a/kernel/signal.c b/kernel/signal.c
2549 index 6c0958e..c5bf0c0 100644
2550 --- a/kernel/signal.c
2551 +++ b/kernel/signal.c
2552 @@ -1319,6 +1319,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
2553 q->info.si_overrun++;
2554 goto out;
2555 }
2556 + q->info.si_overrun = 0;
2557
2558 signalfd_notify(t, sig);
2559 pending = group ? &t->signal->shared_pending : &t->pending;
2560 diff --git a/lib/random32.c b/lib/random32.c
2561 index ca87d86..217d5c4 100644
2562 --- a/lib/random32.c
2563 +++ b/lib/random32.c
2564 @@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state)
2565 return (state->s1 ^ state->s2 ^ state->s3);
2566 }
2567
2568 -static void __set_random32(struct rnd_state *state, unsigned long s)
2569 +/*
2570 + * Handle minimum values for seeds
2571 + */
2572 +static inline u32 __seed(u32 x, u32 m)
2573 {
2574 - if (s == 0)
2575 - s = 1; /* default seed is 1 */
2576 -
2577 -#define LCG(n) (69069 * n)
2578 - state->s1 = LCG(s);
2579 - state->s2 = LCG(state->s1);
2580 - state->s3 = LCG(state->s2);
2581 -
2582 - /* "warm it up" */
2583 - __random32(state);
2584 - __random32(state);
2585 - __random32(state);
2586 - __random32(state);
2587 - __random32(state);
2588 - __random32(state);
2589 + return (x < m) ? x + m : x;
2590 }
2591
2592 /**
2593 @@ -107,7 +96,7 @@ void srandom32(u32 entropy)
2594 */
2595 for_each_possible_cpu (i) {
2596 struct rnd_state *state = &per_cpu(net_rand_state, i);
2597 - __set_random32(state, state->s1 ^ entropy);
2598 + state->s1 = __seed(state->s1 ^ entropy, 1);
2599 }
2600 }
2601 EXPORT_SYMBOL(srandom32);
2602 @@ -122,7 +111,19 @@ static int __init random32_init(void)
2603
2604 for_each_possible_cpu(i) {
2605 struct rnd_state *state = &per_cpu(net_rand_state,i);
2606 - __set_random32(state, i + jiffies);
2607 +
2608 +#define LCG(x) ((x) * 69069) /* super-duper LCG */
2609 + state->s1 = __seed(LCG(i + jiffies), 1);
2610 + state->s2 = __seed(LCG(state->s1), 7);
2611 + state->s3 = __seed(LCG(state->s2), 15);
2612 +
2613 + /* "warm it up" */
2614 + __random32(state);
2615 + __random32(state);
2616 + __random32(state);
2617 + __random32(state);
2618 + __random32(state);
2619 + __random32(state);
2620 }
2621 return 0;
2622 }
2623 @@ -135,13 +136,18 @@ core_initcall(random32_init);
2624 static int __init random32_reseed(void)
2625 {
2626 int i;
2627 - unsigned long seed;
2628
2629 for_each_possible_cpu(i) {
2630 struct rnd_state *state = &per_cpu(net_rand_state,i);
2631 + u32 seeds[3];
2632 +
2633 + get_random_bytes(&seeds, sizeof(seeds));
2634 + state->s1 = __seed(seeds[0], 1);
2635 + state->s2 = __seed(seeds[1], 7);
2636 + state->s3 = __seed(seeds[2], 15);
2637
2638 - get_random_bytes(&seed, sizeof(seed));
2639 - __set_random32(state, seed);
2640 + /* mix it in */
2641 + __random32(state);
2642 }
2643 return 0;
2644 }
2645 diff --git a/mm/memory.c b/mm/memory.c
2646 index 2302d22..0755c52 100644
2647 --- a/mm/memory.c
2648 +++ b/mm/memory.c
2649 @@ -2748,16 +2748,26 @@ int make_pages_present(unsigned long addr, unsigned long end)
2650
2651 vma = find_vma(current->mm, addr);
2652 if (!vma)
2653 - return -1;
2654 + return -ENOMEM;
2655 write = (vma->vm_flags & VM_WRITE) != 0;
2656 BUG_ON(addr >= end);
2657 BUG_ON(end > vma->vm_end);
2658 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
2659 ret = get_user_pages(current, current->mm, addr,
2660 len, write, 0, NULL, NULL);
2661 - if (ret < 0)
2662 + if (ret < 0) {
2663 + /*
2664 + SUS require strange return value to mlock
2665 + - invalid addr generate to ENOMEM.
2666 + - out of memory should generate EAGAIN.
2667 + */
2668 + if (ret == -EFAULT)
2669 + ret = -ENOMEM;
2670 + else if (ret == -ENOMEM)
2671 + ret = -EAGAIN;
2672 return ret;
2673 - return ret == len ? 0 : -1;
2674 + }
2675 + return ret == len ? 0 : -ENOMEM;
2676 }
2677
2678 #if !defined(__HAVE_ARCH_GATE_AREA)
2679 diff --git a/mm/mlock.c b/mm/mlock.c
2680 index 7b26560..01fbe93 100644
2681 --- a/mm/mlock.c
2682 +++ b/mm/mlock.c
2683 @@ -78,8 +78,6 @@ success:
2684
2685 mm->locked_vm -= pages;
2686 out:
2687 - if (ret == -ENOMEM)
2688 - ret = -EAGAIN;
2689 return ret;
2690 }
2691
2692 diff --git a/net/dccp/proto.c b/net/dccp/proto.c
2693 index 9dfe247..ebfd56b 100644
2694 --- a/net/dccp/proto.c
2695 +++ b/net/dccp/proto.c
2696 @@ -476,6 +476,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
2697
2698 if (copy_from_user(&opt, optval, sizeof(opt)))
2699 return -EFAULT;
2700 + /*
2701 + * rfc4340: 6.1. Change Options
2702 + */
2703 + if (opt.dccpsf_len < 1)
2704 + return -EINVAL;
2705
2706 val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
2707 if (!val)
2708 diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
2709 index dfa0d71..f97ffc5 100644
2710 --- a/net/ipv4/ipvs/ip_vs_est.c
2711 +++ b/net/ipv4/ipvs/ip_vs_est.c
2712 @@ -172,8 +172,11 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
2713 kfree(est);
2714 killed++;
2715 }
2716 - if (killed && est_list == NULL)
2717 - del_timer_sync(&est_timer);
2718 + while (killed && !est_list && try_to_del_timer_sync(&est_timer) < 0) {
2719 + write_unlock_bh(&est_lock);
2720 + cpu_relax();
2721 + write_lock_bh(&est_lock);
2722 + }
2723 write_unlock_bh(&est_lock);
2724 }
2725
2726 diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
2727 index d182a2a..3872d4d 100644
2728 --- a/net/ipv4/syncookies.c
2729 +++ b/net/ipv4/syncookies.c
2730 @@ -301,6 +301,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
2731 ireq->rmt_port = th->source;
2732 ireq->loc_addr = ip_hdr(skb)->daddr;
2733 ireq->rmt_addr = ip_hdr(skb)->saddr;
2734 + ireq->ecn_ok = 0;
2735 ireq->snd_wscale = tcp_opt.snd_wscale;
2736 ireq->rcv_wscale = tcp_opt.rcv_wscale;
2737 ireq->sack_ok = tcp_opt.sack_ok;
2738 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2739 index 48cdce9..4019770 100644
2740 --- a/net/ipv6/ip6_output.c
2741 +++ b/net/ipv6/ip6_output.c
2742 @@ -231,6 +231,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
2743 skb_reset_network_header(skb);
2744 hdr = ipv6_hdr(skb);
2745
2746 + /* Allow local fragmentation. */
2747 + if (ipfragok)
2748 + skb->local_df = 1;
2749 +
2750 /*
2751 * Fill in the IPv6 header
2752 */
2753 diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
2754 index 3ecc115..c8d84e3 100644
2755 --- a/net/ipv6/syncookies.c
2756 +++ b/net/ipv6/syncookies.c
2757 @@ -223,6 +223,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
2758
2759 req->expires = 0UL;
2760 req->retrans = 0;
2761 + ireq->ecn_ok = 0;
2762 ireq->snd_wscale = tcp_opt.snd_wscale;
2763 ireq->rcv_wscale = tcp_opt.rcv_wscale;
2764 ireq->sack_ok = tcp_opt.sack_ok;
2765 diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
2766 index 78de716..9596331 100644
2767 --- a/sound/soc/fsl/fsl_dma.c
2768 +++ b/sound/soc/fsl/fsl_dma.c
2769 @@ -132,12 +132,17 @@ struct fsl_dma_private {
2770 * Since each link descriptor has a 32-bit byte count field, we set
2771 * period_bytes_max to the largest 32-bit number. We also have no maximum
2772 * number of periods.
2773 + *
2774 + * Note that we specify SNDRV_PCM_INFO_JOINT_DUPLEX here, but only because a
2775 + * limitation in the SSI driver requires the sample rates for playback and
2776 + * capture to be the same.
2777 */
2778 static const struct snd_pcm_hardware fsl_dma_hardware = {
2779
2780 .info = SNDRV_PCM_INFO_INTERLEAVED |
2781 SNDRV_PCM_INFO_MMAP |
2782 - SNDRV_PCM_INFO_MMAP_VALID,
2783 + SNDRV_PCM_INFO_MMAP_VALID |
2784 + SNDRV_PCM_INFO_JOINT_DUPLEX,
2785 .formats = FSLDMA_PCM_FORMATS,
2786 .rates = FSLDMA_PCM_RATES,
2787 .rate_min = 5512,
2788 @@ -322,14 +327,75 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_codec_dai *dai,
2789 * fsl_dma_open: open a new substream.
2790 *
2791 * Each substream has its own DMA buffer.
2792 + *
2793 + * ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link
2794 + * descriptors that ping-pong from one period to the next. For example, if
2795 + * there are six periods and two link descriptors, this is how they look
2796 + * before playback starts:
2797 + *
2798 + * The last link descriptor
2799 + * ____________ points back to the first
2800 + * | |
2801 + * V |
2802 + * ___ ___ |
2803 + * | |->| |->|
2804 + * |___| |___|
2805 + * | |
2806 + * | |
2807 + * V V
2808 + * _________________________________________
2809 + * | | | | | | | The DMA buffer is
2810 + * | | | | | | | divided into 6 parts
2811 + * |______|______|______|______|______|______|
2812 + *
2813 + * and here's how they look after the first period is finished playing:
2814 + *
2815 + * ____________
2816 + * | |
2817 + * V |
2818 + * ___ ___ |
2819 + * | |->| |->|
2820 + * |___| |___|
2821 + * | |
2822 + * |______________
2823 + * | |
2824 + * V V
2825 + * _________________________________________
2826 + * | | | | | | |
2827 + * | | | | | | |
2828 + * |______|______|______|______|______|______|
2829 + *
2830 + * The first link descriptor now points to the third period. The DMA
2831 + * controller is currently playing the second period. When it finishes, it
2832 + * will jump back to the first descriptor and play the third period.
2833 + *
2834 + * There are four reasons we do this:
2835 + *
2836 + * 1. The only way to get the DMA controller to automatically restart the
2837 + * transfer when it gets to the end of the buffer is to use chaining
2838 + * mode. Basic direct mode doesn't offer that feature.
2839 + * 2. We need to receive an interrupt at the end of every period. The DMA
2840 + * controller can generate an interrupt at the end of every link transfer
2841 + * (aka segment). Making each period into a DMA segment will give us the
2842 + * interrupts we need.
2843 + * 3. By creating only two link descriptors, regardless of the number of
2844 + * periods, we do not need to reallocate the link descriptors if the
2845 + * number of periods changes.
2846 + * 4. All of the audio data is still stored in a single, contiguous DMA
2847 + * buffer, which is what ALSA expects. We're just dividing it into
2848 + * contiguous parts, and creating a link descriptor for each one.
2849 */
2850 static int fsl_dma_open(struct snd_pcm_substream *substream)
2851 {
2852 struct snd_pcm_runtime *runtime = substream->runtime;
2853 struct fsl_dma_private *dma_private;
2854 + struct ccsr_dma_channel __iomem *dma_channel;
2855 dma_addr_t ld_buf_phys;
2856 + u64 temp_link; /* Pointer to next link descriptor */
2857 + u32 mr;
2858 unsigned int channel;
2859 int ret = 0;
2860 + unsigned int i;
2861
2862 /*
2863 * Reject any DMA buffer whose size is not a multiple of the period
2864 @@ -390,68 +456,74 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
2865 snd_soc_set_runtime_hwparams(substream, &fsl_dma_hardware);
2866 runtime->private_data = dma_private;
2867
2868 + /* Program the fixed DMA controller parameters */
2869 +
2870 + dma_channel = dma_private->dma_channel;
2871 +
2872 + temp_link = dma_private->ld_buf_phys +
2873 + sizeof(struct fsl_dma_link_descriptor);
2874 +
2875 + for (i = 0; i < NUM_DMA_LINKS; i++) {
2876 + struct fsl_dma_link_descriptor *link = &dma_private->link[i];
2877 +
2878 + link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
2879 + link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
2880 + link->next = cpu_to_be64(temp_link);
2881 +
2882 + temp_link += sizeof(struct fsl_dma_link_descriptor);
2883 + }
2884 + /* The last link descriptor points to the first */
2885 + dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys);
2886 +
2887 + /* Tell the DMA controller where the first link descriptor is */
2888 + out_be32(&dma_channel->clndar,
2889 + CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys));
2890 + out_be32(&dma_channel->eclndar,
2891 + CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys));
2892 +
2893 + /* The manual says the BCR must be clear before enabling EMP */
2894 + out_be32(&dma_channel->bcr, 0);
2895 +
2896 + /*
2897 + * Program the mode register for interrupts, external master control,
2898 + * and source/destination hold. Also clear the Channel Abort bit.
2899 + */
2900 + mr = in_be32(&dma_channel->mr) &
2901 + ~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE);
2902 +
2903 + /*
2904 + * We want External Master Start and External Master Pause enabled,
2905 + * because the SSI is controlling the DMA controller. We want the DMA
2906 + * controller to be set up in advance, and then we signal only the SSI
2907 + * to start transferring.
2908 + *
2909 + * We want End-Of-Segment Interrupts enabled, because this will generate
2910 + * an interrupt at the end of each segment (each link descriptor
2911 + * represents one segment). Each DMA segment is the same thing as an
2912 + * ALSA period, so this is how we get an interrupt at the end of every
2913 + * period.
2914 + *
2915 + * We want Error Interrupt enabled, so that we can get an error if
2916 + * the DMA controller is mis-programmed somehow.
2917 + */
2918 + mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN |
2919 + CCSR_DMA_MR_EMS_EN;
2920 +
2921 + /* For playback, we want the destination address to be held. For
2922 + capture, set the source address to be held. */
2923 + mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
2924 + CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE;
2925 +
2926 + out_be32(&dma_channel->mr, mr);
2927 +
2928 return 0;
2929 }
2930
2931 /**
2932 - * fsl_dma_hw_params: allocate the DMA buffer and the DMA link descriptors.
2933 + * fsl_dma_hw_params: continue initializing the DMA links
2934 *
2935 - * ALSA divides the DMA buffer into N periods. We create NUM_DMA_LINKS link
2936 - * descriptors that ping-pong from one period to the next. For example, if
2937 - * there are six periods and two link descriptors, this is how they look
2938 - * before playback starts:
2939 - *
2940 - * The last link descriptor
2941 - * ____________ points back to the first
2942 - * | |
2943 - * V |
2944 - * ___ ___ |
2945 - * | |->| |->|
2946 - * |___| |___|
2947 - * | |
2948 - * | |
2949 - * V V
2950 - * _________________________________________
2951 - * | | | | | | | The DMA buffer is
2952 - * | | | | | | | divided into 6 parts
2953 - * |______|______|______|______|______|______|
2954 - *
2955 - * and here's how they look after the first period is finished playing:
2956 - *
2957 - * ____________
2958 - * | |
2959 - * V |
2960 - * ___ ___ |
2961 - * | |->| |->|
2962 - * |___| |___|
2963 - * | |
2964 - * |______________
2965 - * | |
2966 - * V V
2967 - * _________________________________________
2968 - * | | | | | | |
2969 - * | | | | | | |
2970 - * |______|______|______|______|______|______|
2971 - *
2972 - * The first link descriptor now points to the third period. The DMA
2973 - * controller is currently playing the second period. When it finishes, it
2974 - * will jump back to the first descriptor and play the third period.
2975 - *
2976 - * There are four reasons we do this:
2977 - *
2978 - * 1. The only way to get the DMA controller to automatically restart the
2979 - * transfer when it gets to the end of the buffer is to use chaining
2980 - * mode. Basic direct mode doesn't offer that feature.
2981 - * 2. We need to receive an interrupt at the end of every period. The DMA
2982 - * controller can generate an interrupt at the end of every link transfer
2983 - * (aka segment). Making each period into a DMA segment will give us the
2984 - * interrupts we need.
2985 - * 3. By creating only two link descriptors, regardless of the number of
2986 - * periods, we do not need to reallocate the link descriptors if the
2987 - * number of periods changes.
2988 - * 4. All of the audio data is still stored in a single, contiguous DMA
2989 - * buffer, which is what ALSA expects. We're just dividing it into
2990 - * contiguous parts, and creating a link descriptor for each one.
2991 + * This function obtains hardware parameters about the opened stream and
2992 + * programs the DMA controller accordingly.
2993 *
2994 * Note that due to a quirk of the SSI's STX register, the target address
2995 * for the DMA operations depends on the sample size. So we don't program
2996 @@ -463,11 +535,8 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
2997 {
2998 struct snd_pcm_runtime *runtime = substream->runtime;
2999 struct fsl_dma_private *dma_private = runtime->private_data;
3000 - struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
3001
3002 dma_addr_t temp_addr; /* Pointer to next period */
3003 - u64 temp_link; /* Pointer to next link descriptor */
3004 - u32 mr; /* Temporary variable for MR register */
3005
3006 unsigned int i;
3007
3008 @@ -485,8 +554,6 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
3009 dma_private->dma_buf_next = dma_private->dma_buf_phys;
3010
3011 /*
3012 - * Initialize each link descriptor.
3013 - *
3014 * The actual address in STX0 (destination for playback, source for
3015 * capture) is based on the sample size, but we don't know the sample
3016 * size in this function, so we'll have to adjust that later. See
3017 @@ -502,16 +569,11 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
3018 * buffer itself.
3019 */
3020 temp_addr = substream->dma_buffer.addr;
3021 - temp_link = dma_private->ld_buf_phys +
3022 - sizeof(struct fsl_dma_link_descriptor);
3023
3024 for (i = 0; i < NUM_DMA_LINKS; i++) {
3025 struct fsl_dma_link_descriptor *link = &dma_private->link[i];
3026
3027 link->count = cpu_to_be32(period_size);
3028 - link->source_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
3029 - link->dest_attr = cpu_to_be32(CCSR_DMA_ATR_SNOOP);
3030 - link->next = cpu_to_be64(temp_link);
3031
3032 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3033 link->source_addr = cpu_to_be32(temp_addr);
3034 @@ -519,51 +581,7 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
3035 link->dest_addr = cpu_to_be32(temp_addr);
3036
3037 temp_addr += period_size;
3038 - temp_link += sizeof(struct fsl_dma_link_descriptor);
3039 }
3040 - /* The last link descriptor points to the first */
3041 - dma_private->link[i - 1].next = cpu_to_be64(dma_private->ld_buf_phys);
3042 -
3043 - /* Tell the DMA controller where the first link descriptor is */
3044 - out_be32(&dma_channel->clndar,
3045 - CCSR_DMA_CLNDAR_ADDR(dma_private->ld_buf_phys));
3046 - out_be32(&dma_channel->eclndar,
3047 - CCSR_DMA_ECLNDAR_ADDR(dma_private->ld_buf_phys));
3048 -
3049 - /* The manual says the BCR must be clear before enabling EMP */
3050 - out_be32(&dma_channel->bcr, 0);
3051 -
3052 - /*
3053 - * Program the mode register for interrupts, external master control,
3054 - * and source/destination hold. Also clear the Channel Abort bit.
3055 - */
3056 - mr = in_be32(&dma_channel->mr) &
3057 - ~(CCSR_DMA_MR_CA | CCSR_DMA_MR_DAHE | CCSR_DMA_MR_SAHE);
3058 -
3059 - /*
3060 - * We want External Master Start and External Master Pause enabled,
3061 - * because the SSI is controlling the DMA controller. We want the DMA
3062 - * controller to be set up in advance, and then we signal only the SSI
3063 - * to start transfering.
3064 - *
3065 - * We want End-Of-Segment Interrupts enabled, because this will generate
3066 - * an interrupt at the end of each segment (each link descriptor
3067 - * represents one segment). Each DMA segment is the same thing as an
3068 - * ALSA period, so this is how we get an interrupt at the end of every
3069 - * period.
3070 - *
3071 - * We want Error Interrupt enabled, so that we can get an error if
3072 - * the DMA controller is mis-programmed somehow.
3073 - */
3074 - mr |= CCSR_DMA_MR_EOSIE | CCSR_DMA_MR_EIE | CCSR_DMA_MR_EMP_EN |
3075 - CCSR_DMA_MR_EMS_EN;
3076 -
3077 - /* For playback, we want the destination address to be held. For
3078 - capture, set the source address to be held. */
3079 - mr |= (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
3080 - CCSR_DMA_MR_DAHE : CCSR_DMA_MR_SAHE;
3081 -
3082 - out_be32(&dma_channel->mr, mr);
3083
3084 return 0;
3085 }
3086 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
3087 index f588545..94f8567 100644
3088 --- a/sound/soc/fsl/fsl_ssi.c
3089 +++ b/sound/soc/fsl/fsl_ssi.c
3090 @@ -67,6 +67,8 @@
3091 * @ssi: pointer to the SSI's registers
3092 * @ssi_phys: physical address of the SSI registers
3093 * @irq: IRQ of this SSI
3094 + * @first_stream: pointer to the stream that was opened first
3095 + * @second_stream: pointer to second stream
3096 * @dev: struct device pointer
3097 * @playback: the number of playback streams opened
3098 * @capture: the number of capture streams opened
3099 @@ -79,6 +81,8 @@ struct fsl_ssi_private {
3100 struct ccsr_ssi __iomem *ssi;
3101 dma_addr_t ssi_phys;
3102 unsigned int irq;
3103 + struct snd_pcm_substream *first_stream;
3104 + struct snd_pcm_substream *second_stream;
3105 struct device *dev;
3106 unsigned int playback;
3107 unsigned int capture;
3108 @@ -342,6 +346,49 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream)
3109 */
3110 }
3111
3112 + if (!ssi_private->first_stream)
3113 + ssi_private->first_stream = substream;
3114 + else {
3115 + /* This is the second stream open, so we need to impose sample
3116 + * rate and maybe sample size constraints. Note that this can
3117 + * cause a race condition if the second stream is opened before
3118 + * the first stream is fully initialized.
3119 + *
3120 + * We provide some protection by checking to make sure the first
3121 + * stream is initialized, but it's not perfect. ALSA sometimes
3122 + * re-initializes the driver with a different sample rate or
3123 + * size. If the second stream is opened before the first stream
3124 + * has received its final parameters, then the second stream may
3125 + * be constrained to the wrong sample rate or size.
3126 + *
3127 + * FIXME: This code does not handle opening and closing streams
3128 + * repeatedly. If you open two streams and then close the first
3129 + * one, you may not be able to open another stream until you
3130 + * close the second one as well.
3131 + */
3132 + struct snd_pcm_runtime *first_runtime =
3133 + ssi_private->first_stream->runtime;
3134 +
3135 + if (!first_runtime->rate || !first_runtime->sample_bits) {
3136 + dev_err(substream->pcm->card->dev,
3137 + "set sample rate and size in %s stream first\n",
3138 + substream->stream == SNDRV_PCM_STREAM_PLAYBACK
3139 + ? "capture" : "playback");
3140 + return -EAGAIN;
3141 + }
3142 +
3143 + snd_pcm_hw_constraint_minmax(substream->runtime,
3144 + SNDRV_PCM_HW_PARAM_RATE,
3145 + first_runtime->rate, first_runtime->rate);
3146 +
3147 + snd_pcm_hw_constraint_minmax(substream->runtime,
3148 + SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
3149 + first_runtime->sample_bits,
3150 + first_runtime->sample_bits);
3151 +
3152 + ssi_private->second_stream = substream;
3153 + }
3154 +
3155 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3156 ssi_private->playback++;
3157
3158 @@ -371,18 +418,16 @@ static int fsl_ssi_prepare(struct snd_pcm_substream *substream)
3159 struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data;
3160
3161 struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
3162 - u32 wl;
3163
3164 - wl = CCSR_SSI_SxCCR_WL(snd_pcm_format_width(runtime->format));
3165 + if (substream == ssi_private->first_stream) {
3166 + u32 wl;
3167
3168 - clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
3169 + /* The SSI should always be disabled at this points (SSIEN=0) */
3170 + wl = CCSR_SSI_SxCCR_WL(snd_pcm_format_width(runtime->format));
3171
3172 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
3173 + /* In synchronous mode, the SSI uses STCCR for capture */
3174 clrsetbits_be32(&ssi->stccr, CCSR_SSI_SxCCR_WL_MASK, wl);
3175 - else
3176 - clrsetbits_be32(&ssi->srccr, CCSR_SSI_SxCCR_WL_MASK, wl);
3177 -
3178 - setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
3179 + }
3180
3181 return 0;
3182 }
3183 @@ -407,9 +452,13 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd)
3184 case SNDRV_PCM_TRIGGER_RESUME:
3185 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
3186 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
3187 - setbits32(&ssi->scr, CCSR_SSI_SCR_TE);
3188 + clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
3189 + setbits32(&ssi->scr,
3190 + CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE);
3191 } else {
3192 - setbits32(&ssi->scr, CCSR_SSI_SCR_RE);
3193 + clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN);
3194 + setbits32(&ssi->scr,
3195 + CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE);
3196
3197 /*
3198 * I think we need this delay to allow time for the SSI
3199 @@ -452,6 +501,11 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream)
3200 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
3201 ssi_private->capture--;
3202
3203 + if (ssi_private->first_stream == substream)
3204 + ssi_private->first_stream = ssi_private->second_stream;
3205 +
3206 + ssi_private->second_stream = NULL;
3207 +
3208 /*
3209 * If this is the last active substream, disable the SSI and release
3210 * the IRQ.