Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.33-r3/0105-2.6.33.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1055 - (hide annotations) (download)
Wed Jul 7 10:02:52 2010 UTC (13 years, 10 months ago) by niro
File size: 187681 byte(s)
2.6.33-magellan-r3; udpated to linux-2.6.33.6

1 niro 1055 diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
2     index 02838a4..86b5880 100644
3     --- a/Documentation/hwmon/ltc4245
4     +++ b/Documentation/hwmon/ltc4245
5     @@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
6     in7_min_alarm 3v output undervoltage alarm
7     in8_min_alarm Vee (-12v) output undervoltage alarm
8    
9     -in9_input GPIO #1 voltage data
10     -in10_input GPIO #2 voltage data
11     -in11_input GPIO #3 voltage data
12     +in9_input GPIO voltage data
13    
14     power1_input 12v power usage (mW)
15     power2_input 5v power usage (mW)
16     diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
17     index 8ba7044..b07bfee 100644
18     --- a/arch/arm/common/sa1111.c
19     +++ b/arch/arm/common/sa1111.c
20     @@ -887,8 +887,6 @@ static int sa1111_resume(struct platform_device *dev)
21     if (!save)
22     return 0;
23    
24     - spin_lock_irqsave(&sachip->lock, flags);
25     -
26     /*
27     * Ensure that the SA1111 is still here.
28     * FIXME: shouldn't do this here.
29     @@ -905,6 +903,13 @@ static int sa1111_resume(struct platform_device *dev)
30     * First of all, wake up the chip.
31     */
32     sa1111_wake(sachip);
33     +
34     + /*
35     + * Only lock for write ops. Also, sa1111_wake must be called with
36     + * released spinlock!
37     + */
38     + spin_lock_irqsave(&sachip->lock, flags);
39     +
40     sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
41     sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
42    
43     diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
44     index 70997d5..dd9598b 100644
45     --- a/arch/arm/mm/copypage-feroceon.c
46     +++ b/arch/arm/mm/copypage-feroceon.c
47     @@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
48     {
49     asm("\
50     stmfd sp!, {r4-r9, lr} \n\
51     - mov ip, %0 \n\
52     + mov ip, %2 \n\
53     1: mov lr, r1 \n\
54     ldmia r1!, {r2 - r9} \n\
55     pld [lr, #32] \n\
56     @@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
57     mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
58     ldmfd sp!, {r4-r9, pc}"
59     :
60     - : "I" (PAGE_SIZE));
61     + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
62     }
63    
64     void feroceon_copy_user_highpage(struct page *to, struct page *from,
65     diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
66     index 9ab0984..7bc0ac7 100644
67     --- a/arch/arm/mm/copypage-v4wb.c
68     +++ b/arch/arm/mm/copypage-v4wb.c
69     @@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
70     {
71     asm("\
72     stmfd sp!, {r4, lr} @ 2\n\
73     - mov r2, %0 @ 1\n\
74     + mov r2, %2 @ 1\n\
75     ldmia r1!, {r3, r4, ip, lr} @ 4\n\
76     1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
77     stmia r0!, {r3, r4, ip, lr} @ 4\n\
78     @@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
79     mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
80     ldmfd sp!, {r4, pc} @ 3"
81     :
82     - : "I" (PAGE_SIZE / 64));
83     + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
84     }
85    
86     void v4wb_copy_user_highpage(struct page *to, struct page *from,
87     diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
88     index 300efaf..35bf609 100644
89     --- a/arch/arm/mm/copypage-v4wt.c
90     +++ b/arch/arm/mm/copypage-v4wt.c
91     @@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
92     {
93     asm("\
94     stmfd sp!, {r4, lr} @ 2\n\
95     - mov r2, %0 @ 1\n\
96     + mov r2, %2 @ 1\n\
97     ldmia r1!, {r3, r4, ip, lr} @ 4\n\
98     1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
99     ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
100     @@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
101     mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
102     ldmfd sp!, {r4, pc} @ 3"
103     :
104     - : "I" (PAGE_SIZE / 64));
105     + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
106     }
107    
108     void v4wt_copy_user_highpage(struct page *to, struct page *from,
109     diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
110     index bc4525f..27dc363 100644
111     --- a/arch/arm/mm/copypage-xsc3.c
112     +++ b/arch/arm/mm/copypage-xsc3.c
113     @@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
114     {
115     asm("\
116     stmfd sp!, {r4, r5, lr} \n\
117     - mov lr, %0 \n\
118     + mov lr, %2 \n\
119     \n\
120     pld [r1, #0] \n\
121     pld [r1, #32] \n\
122     @@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
123     \n\
124     ldmfd sp!, {r4, r5, pc}"
125     :
126     - : "I" (PAGE_SIZE / 64 - 1));
127     + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
128     }
129    
130     void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
131     diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
132     index 10e0680..3191cd6 100644
133     --- a/arch/arm/mm/fault.c
134     +++ b/arch/arm/mm/fault.c
135     @@ -386,6 +386,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
136     if (addr < TASK_SIZE)
137     return do_page_fault(addr, fsr, regs);
138    
139     + if (user_mode(regs))
140     + goto bad_area;
141     +
142     index = pgd_index(addr);
143    
144     /*
145     diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
146     index a04ffbb..3cbdd5c 100644
147     --- a/arch/arm/mm/init.c
148     +++ b/arch/arm/mm/init.c
149     @@ -632,10 +632,10 @@ void __init mem_init(void)
150     void free_initmem(void)
151     {
152     #ifdef CONFIG_HAVE_TCM
153     - extern char *__tcm_start, *__tcm_end;
154     + extern char __tcm_start, __tcm_end;
155    
156     - totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
157     - __phys_to_pfn(__pa(__tcm_end)),
158     + totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
159     + __phys_to_pfn(__pa(&__tcm_end)),
160     "TCM link");
161     #endif
162    
163     diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
164     index 66dc2d0..d66cead 100644
165     --- a/arch/arm/vfp/vfphw.S
166     +++ b/arch/arm/vfp/vfphw.S
167     @@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
168     #ifdef CONFIG_VFPv3
169     @ d16 - d31 registers
170     .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
171     -1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
172     +1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
173     mov pc, lr
174     .org 1b + 8
175     .endr
176     diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
177     index 8542bc3..93f6c63 100644
178     --- a/arch/blackfin/include/asm/cache.h
179     +++ b/arch/blackfin/include/asm/cache.h
180     @@ -15,6 +15,8 @@
181     #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
182     #define SMP_CACHE_BYTES L1_CACHE_BYTES
183    
184     +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
185     +
186     #ifdef CONFIG_SMP
187     #define __cacheline_aligned
188     #else
189     diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
190     index 2797163..7dc0f0f 100644
191     --- a/arch/frv/include/asm/cache.h
192     +++ b/arch/frv/include/asm/cache.h
193     @@ -17,6 +17,8 @@
194     #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
195     #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
196    
197     +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
198     +
199     #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
200     #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
201    
202     diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
203     index fed3fd3..ecafbe1 100644
204     --- a/arch/m68k/include/asm/cache.h
205     +++ b/arch/m68k/include/asm/cache.h
206     @@ -8,4 +8,6 @@
207     #define L1_CACHE_SHIFT 4
208     #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
209    
210     +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
211     +
212     #endif
213     diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
214     index e03cfa2..6e2fe28 100644
215     --- a/arch/mn10300/include/asm/cache.h
216     +++ b/arch/mn10300/include/asm/cache.h
217     @@ -21,6 +21,8 @@
218     #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
219     #endif
220    
221     +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
222     +
223     /* data cache purge registers
224     * - read from the register to unconditionally purge that cache line
225     * - write address & 0xffffff00 to conditionally purge that cache line
226     diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
227     index 3ca1c61..27a7492 100644
228     --- a/arch/parisc/math-emu/decode_exc.c
229     +++ b/arch/parisc/math-emu/decode_exc.c
230     @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
231     return SIGNALCODE(SIGFPE, FPE_FLTINV);
232     case DIVISIONBYZEROEXCEPTION:
233     update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
234     + Clear_excp_register(exception_index);
235     return SIGNALCODE(SIGFPE, FPE_FLTDIV);
236     case INEXACTEXCEPTION:
237     update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
238     diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
239     index e6dc595..0ca0b99 100644
240     --- a/arch/powerpc/kvm/book3s.c
241     +++ b/arch/powerpc/kvm/book3s.c
242     @@ -766,6 +766,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
243     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
244     int i;
245    
246     + vcpu_load(vcpu);
247     +
248     sregs->pvr = vcpu->arch.pvr;
249    
250     sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
251     @@ -784,6 +786,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
252     sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
253     }
254     }
255     +
256     + vcpu_put(vcpu);
257     +
258     return 0;
259     }
260    
261     @@ -793,6 +798,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
262     struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
263     int i;
264    
265     + vcpu_load(vcpu);
266     +
267     kvmppc_set_pvr(vcpu, sregs->pvr);
268    
269     vcpu3s->sdr1 = sregs->u.s.sdr1;
270     @@ -819,6 +826,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
271    
272     /* Flush the MMU after messing with the segments */
273     kvmppc_mmu_pte_flush(vcpu, 0, 0);
274     +
275     + vcpu_put(vcpu);
276     +
277     return 0;
278     }
279    
280     diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
281     index 06f5a9e..0a5263e 100644
282     --- a/arch/powerpc/kvm/booke.c
283     +++ b/arch/powerpc/kvm/booke.c
284     @@ -443,6 +443,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
285     {
286     int i;
287    
288     + vcpu_load(vcpu);
289     +
290     regs->pc = vcpu->arch.pc;
291     regs->cr = vcpu->arch.cr;
292     regs->ctr = vcpu->arch.ctr;
293     @@ -463,6 +465,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
294     for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
295     regs->gpr[i] = vcpu->arch.gpr[i];
296    
297     + vcpu_put(vcpu);
298     +
299     return 0;
300     }
301    
302     @@ -470,6 +474,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
303     {
304     int i;
305    
306     + vcpu_load(vcpu);
307     +
308     vcpu->arch.pc = regs->pc;
309     vcpu->arch.cr = regs->cr;
310     vcpu->arch.ctr = regs->ctr;
311     @@ -489,6 +495,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
312     for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
313     vcpu->arch.gpr[i] = regs->gpr[i];
314    
315     + vcpu_put(vcpu);
316     +
317     return 0;
318     }
319    
320     @@ -517,7 +525,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
321     int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
322     struct kvm_translation *tr)
323     {
324     - return kvmppc_core_vcpu_translate(vcpu, tr);
325     + int r;
326     +
327     + vcpu_load(vcpu);
328     + r = kvmppc_core_vcpu_translate(vcpu, tr);
329     + vcpu_put(vcpu);
330     + return r;
331     }
332    
333     int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
334     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
335     index f06cf93..82098ae 100644
336     --- a/arch/powerpc/kvm/powerpc.c
337     +++ b/arch/powerpc/kvm/powerpc.c
338     @@ -181,7 +181,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
339     {
340     struct kvm_vcpu *vcpu;
341     vcpu = kvmppc_core_vcpu_create(kvm, id);
342     - kvmppc_create_vcpu_debugfs(vcpu, id);
343     + if (!IS_ERR(vcpu))
344     + kvmppc_create_vcpu_debugfs(vcpu, id);
345     return vcpu;
346     }
347    
348     diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
349     index 64e2e49..3ac0cd3 100644
350     --- a/arch/powerpc/lib/string.S
351     +++ b/arch/powerpc/lib/string.S
352     @@ -71,7 +71,7 @@ _GLOBAL(strcmp)
353    
354     _GLOBAL(strncmp)
355     PPC_LCMPI r5,0
356     - beqlr
357     + ble- 2f
358     mtctr r5
359     addi r5,r3,-1
360     addi r4,r4,-1
361     @@ -82,6 +82,8 @@ _GLOBAL(strncmp)
362     beqlr 1
363     bdnzt eq,1b
364     blr
365     +2: li r3,0
366     + blr
367    
368     _GLOBAL(strlen)
369     addi r4,r3,-1
370     diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
371     index 2c9e522..7fd90d0 100644
372     --- a/arch/powerpc/oprofile/op_model_cell.c
373     +++ b/arch/powerpc/oprofile/op_model_cell.c
374     @@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
375     index = ENTRIES-1;
376    
377     /* make sure index is valid */
378     - if ((index > ENTRIES) || (index < 0))
379     + if ((index >= ENTRIES) || (index < 0))
380     index = ENTRIES-1;
381    
382     return initial_lfsr[index];
383     diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
384     index b842378..da684a7 100644
385     --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
386     +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
387     @@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
388     for(;;);
389     }
390    
391     -static int qcss_tok; /* query-cpu-stopped-state token */
392     -
393     -/* Get state of physical CPU.
394     - * Return codes:
395     - * 0 - The processor is in the RTAS stopped state
396     - * 1 - stop-self is in progress
397     - * 2 - The processor is not in the RTAS stopped state
398     - * -1 - Hardware Error
399     - * -2 - Hardware Busy, Try again later.
400     - */
401     -static int query_cpu_stopped(unsigned int pcpu)
402     -{
403     - int cpu_status, status;
404     -
405     - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
406     - if (status != 0) {
407     - printk(KERN_ERR
408     - "RTAS query-cpu-stopped-state failed: %i\n", status);
409     - return status;
410     - }
411     -
412     - return cpu_status;
413     -}
414     -
415     static int pseries_cpu_disable(void)
416     {
417     int cpu = smp_processor_id();
418     @@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
419     } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
420    
421     for (tries = 0; tries < 25; tries++) {
422     - cpu_status = query_cpu_stopped(pcpu);
423     - if (cpu_status == 0 || cpu_status == -1)
424     + cpu_status = smp_query_cpu_stopped(pcpu);
425     + if (cpu_status == QCSS_STOPPED ||
426     + cpu_status == QCSS_HARDWARE_ERROR)
427     break;
428     cpu_relax();
429     }
430     @@ -400,6 +377,7 @@ static int __init pseries_cpu_hotplug_init(void)
431     struct device_node *np;
432     const char *typep;
433     int cpu;
434     + int qcss_tok;
435    
436     for_each_node_by_name(np, "interrupt-controller") {
437     typep = of_get_property(np, "compatible", NULL);
438     diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
439     index 0603c91..e724ef8 100644
440     --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
441     +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
442     @@ -4,6 +4,14 @@
443     #include <asm/hvcall.h>
444     #include <asm/page.h>
445    
446     +/* Get state of physical CPU from query_cpu_stopped */
447     +int smp_query_cpu_stopped(unsigned int pcpu);
448     +#define QCSS_STOPPED 0
449     +#define QCSS_STOPPING 1
450     +#define QCSS_NOT_STOPPED 2
451     +#define QCSS_HARDWARE_ERROR -1
452     +#define QCSS_HARDWARE_BUSY -2
453     +
454     static inline long poll_pending(void)
455     {
456     return plpar_hcall_norets(H_POLL_PENDING);
457     diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
458     index b488663..5530b4b 100644
459     --- a/arch/powerpc/platforms/pseries/smp.c
460     +++ b/arch/powerpc/platforms/pseries/smp.c
461     @@ -57,6 +57,28 @@
462     */
463     static cpumask_t of_spin_map;
464    
465     +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
466     +int smp_query_cpu_stopped(unsigned int pcpu)
467     +{
468     + int cpu_status, status;
469     + int qcss_tok = rtas_token("query-cpu-stopped-state");
470     +
471     + if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
472     + printk(KERN_INFO "Firmware doesn't support "
473     + "query-cpu-stopped-state\n");
474     + return QCSS_HARDWARE_ERROR;
475     + }
476     +
477     + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
478     + if (status != 0) {
479     + printk(KERN_ERR
480     + "RTAS query-cpu-stopped-state failed: %i\n", status);
481     + return status;
482     + }
483     +
484     + return cpu_status;
485     +}
486     +
487     /**
488     * smp_startup_cpu() - start the given cpu
489     *
490     @@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
491    
492     pcpu = get_hard_smp_processor_id(lcpu);
493    
494     + /* Check to see if the CPU out of FW already for kexec */
495     + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
496     + cpu_set(lcpu, of_spin_map);
497     + return 1;
498     + }
499     +
500     /* Fixup atomic count: it exited inside IRQ handler. */
501     task_thread_info(paca[lcpu].__current)->preempt_count = 0;
502    
503     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
504     index f8bcaef..e06d18c 100644
505     --- a/arch/s390/kvm/kvm-s390.c
506     +++ b/arch/s390/kvm/kvm-s390.c
507     @@ -339,11 +339,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
508    
509     rc = kvm_vcpu_init(vcpu, kvm, id);
510     if (rc)
511     - goto out_free_cpu;
512     + goto out_free_sie_block;
513     VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
514     vcpu->arch.sie_block);
515    
516     return vcpu;
517     +out_free_sie_block:
518     + free_page((unsigned long)(vcpu->arch.sie_block));
519     out_free_cpu:
520     kfree(vcpu);
521     out_nomem:
522     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
523     index 4f865e8..e62b4b9 100644
524     --- a/arch/x86/include/asm/kvm_host.h
525     +++ b/arch/x86/include/asm/kvm_host.h
526     @@ -193,6 +193,7 @@ union kvm_mmu_page_role {
527     unsigned invalid:1;
528     unsigned cr4_pge:1;
529     unsigned nxe:1;
530     + unsigned cr0_wp:1;
531     };
532     };
533    
534     @@ -533,6 +534,8 @@ struct kvm_x86_ops {
535     u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
536     bool (*gb_page_enable)(void);
537    
538     + void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
539     +
540     const struct trace_print_flags *exit_reasons_str;
541     };
542    
543     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
544     index 4604e6a..d86da72 100644
545     --- a/arch/x86/include/asm/msr-index.h
546     +++ b/arch/x86/include/asm/msr-index.h
547     @@ -199,8 +199,9 @@
548     #define MSR_IA32_EBL_CR_POWERON 0x0000002a
549     #define MSR_IA32_FEATURE_CONTROL 0x0000003a
550    
551     -#define FEATURE_CONTROL_LOCKED (1<<0)
552     -#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
553     +#define FEATURE_CONTROL_LOCKED (1<<0)
554     +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
555     +#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
556    
557     #define MSR_IA32_APICBASE 0x0000001b
558     #define MSR_IA32_APICBASE_BSP (1<<8)
559     diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
560     index 2e77516..ca15b93 100644
561     --- a/arch/x86/kernel/amd_iommu.c
562     +++ b/arch/x86/kernel/amd_iommu.c
563     @@ -1419,6 +1419,7 @@ static int __attach_device(struct device *dev,
564     struct protection_domain *domain)
565     {
566     struct iommu_dev_data *dev_data, *alias_data;
567     + int ret;
568    
569     dev_data = get_dev_data(dev);
570     alias_data = get_dev_data(dev_data->alias);
571     @@ -1430,13 +1431,14 @@ static int __attach_device(struct device *dev,
572     spin_lock(&domain->lock);
573    
574     /* Some sanity checks */
575     + ret = -EBUSY;
576     if (alias_data->domain != NULL &&
577     alias_data->domain != domain)
578     - return -EBUSY;
579     + goto out_unlock;
580    
581     if (dev_data->domain != NULL &&
582     dev_data->domain != domain)
583     - return -EBUSY;
584     + goto out_unlock;
585    
586     /* Do real assignment */
587     if (dev_data->alias != dev) {
588     @@ -1452,10 +1454,14 @@ static int __attach_device(struct device *dev,
589    
590     atomic_inc(&dev_data->bind);
591    
592     + ret = 0;
593     +
594     +out_unlock:
595     +
596     /* ready */
597     spin_unlock(&domain->lock);
598    
599     - return 0;
600     + return ret;
601     }
602    
603     /*
604     @@ -2256,10 +2262,6 @@ int __init amd_iommu_init_dma_ops(void)
605    
606     iommu_detected = 1;
607     swiotlb = 0;
608     -#ifdef CONFIG_GART_IOMMU
609     - gart_iommu_aperture_disabled = 1;
610     - gart_iommu_aperture = 0;
611     -#endif
612    
613     /* Make the driver finally visible to the drivers */
614     dma_ops = &amd_iommu_dma_ops;
615     diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
616     index 883d619..86a5a11 100644
617     --- a/arch/x86/kernel/amd_iommu_init.c
618     +++ b/arch/x86/kernel/amd_iommu_init.c
619     @@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
620     {
621     u8 *ret;
622    
623     - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
624     + if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
625     + pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
626     + address);
627     + pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
628     return NULL;
629     + }
630    
631     ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
632     if (ret != NULL)
633     @@ -1296,7 +1300,7 @@ static int __init amd_iommu_init(void)
634     ret = amd_iommu_init_dma_ops();
635    
636     if (ret)
637     - goto free;
638     + goto free_disable;
639    
640     amd_iommu_init_api();
641    
642     @@ -1314,9 +1318,10 @@ static int __init amd_iommu_init(void)
643     out:
644     return ret;
645    
646     -free:
647     +free_disable:
648     disable_iommus();
649    
650     +free:
651     amd_iommu_uninit_devices();
652    
653     free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
654     @@ -1335,6 +1340,15 @@ free:
655    
656     free_unity_maps();
657    
658     +#ifdef CONFIG_GART_IOMMU
659     + /*
660     + * We failed to initialize the AMD IOMMU - try fallback to GART
661     + * if possible.
662     + */
663     + gart_iommu_init();
664     +
665     +#endif
666     +
667     goto out;
668     }
669    
670     diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
671     index c7ca8e2..16307ff 100644
672     --- a/arch/x86/kernel/cpu/perf_event.c
673     +++ b/arch/x86/kernel/cpu/perf_event.c
674     @@ -1060,8 +1060,11 @@ static int __hw_perf_event_init(struct perf_event *event)
675     if (atomic_read(&active_events) == 0) {
676     if (!reserve_pmc_hardware())
677     err = -EBUSY;
678     - else
679     + else {
680     err = reserve_bts_hardware();
681     + if (err)
682     + release_pmc_hardware();
683     + }
684     }
685     if (!err)
686     atomic_inc(&active_events);
687     diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
688     index 03801f2..dfdfe46 100644
689     --- a/arch/x86/kernel/pvclock.c
690     +++ b/arch/x86/kernel/pvclock.c
691     @@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
692     return pv_tsc_khz;
693     }
694    
695     +static atomic64_t last_value = ATOMIC64_INIT(0);
696     +
697     cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
698     {
699     struct pvclock_shadow_time shadow;
700     unsigned version;
701     cycle_t ret, offset;
702     + u64 last;
703    
704     do {
705     version = pvclock_get_time_values(&shadow, src);
706     @@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
707     barrier();
708     } while (version != src->version);
709    
710     + /*
711     + * Assumption here is that last_value, a global accumulator, always goes
712     + * forward. If we are less than that, we should not be much smaller.
713     + * We assume there is an error marging we're inside, and then the correction
714     + * does not sacrifice accuracy.
715     + *
716     + * For reads: global may have changed between test and return,
717     + * but this means someone else updated poked the clock at a later time.
718     + * We just need to make sure we are not seeing a backwards event.
719     + *
720     + * For updates: last_value = ret is not enough, since two vcpus could be
721     + * updating at the same time, and one of them could be slightly behind,
722     + * making the assumption that last_value always go forward fail to hold.
723     + */
724     + last = atomic64_read(&last_value);
725     + do {
726     + if (ret < last)
727     + return last;
728     + last = atomic64_cmpxchg(&last_value, last, ret);
729     + } while (unlikely(last != ret));
730     +
731     return ret;
732     }
733    
734     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
735     index 5d9e40c..4772a9f 100644
736     --- a/arch/x86/kernel/setup.c
737     +++ b/arch/x86/kernel/setup.c
738     @@ -663,6 +663,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
739     DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
740     },
741     },
742     + /*
743     + * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
744     + * match on the product name.
745     + */
746     + {
747     + .callback = dmi_low_memory_corruption,
748     + .ident = "Phoenix BIOS",
749     + .matches = {
750     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
751     + },
752     + },
753     #endif
754     {}
755     };
756     diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
757     index 86c9f91..46b8277 100644
758     --- a/arch/x86/kernel/tboot.c
759     +++ b/arch/x86/kernel/tboot.c
760     @@ -46,6 +46,7 @@
761    
762     /* Global pointer to shared data; NULL means no measured launch. */
763     struct tboot *tboot __read_mostly;
764     +EXPORT_SYMBOL(tboot);
765    
766     /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
767     #define AP_WAIT_TIMEOUT 1
768     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
769     index 28c3d81..8822021 100644
770     --- a/arch/x86/kvm/mmu.c
771     +++ b/arch/x86/kvm/mmu.c
772     @@ -227,7 +227,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
773     }
774     EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
775    
776     -static int is_write_protection(struct kvm_vcpu *vcpu)
777     +static bool is_write_protection(struct kvm_vcpu *vcpu)
778     {
779     return vcpu->arch.cr0 & X86_CR0_WP;
780     }
781     @@ -2097,11 +2097,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
782     direct = 1;
783     if (mmu_check_root(vcpu, root_gfn))
784     return 1;
785     +
786     + spin_lock(&vcpu->kvm->mmu_lock);
787     sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
788     PT64_ROOT_LEVEL, direct,
789     ACC_ALL, NULL);
790     root = __pa(sp->spt);
791     ++sp->root_count;
792     + spin_unlock(&vcpu->kvm->mmu_lock);
793     vcpu->arch.mmu.root_hpa = root;
794     return 0;
795     }
796     @@ -2123,11 +2126,15 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
797     root_gfn = 0;
798     if (mmu_check_root(vcpu, root_gfn))
799     return 1;
800     +
801     + spin_lock(&vcpu->kvm->mmu_lock);
802     sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
803     PT32_ROOT_LEVEL, direct,
804     ACC_ALL, NULL);
805     root = __pa(sp->spt);
806     ++sp->root_count;
807     + spin_unlock(&vcpu->kvm->mmu_lock);
808     +
809     vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
810     }
811     vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
812     @@ -2448,6 +2455,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
813     r = paging32_init_context(vcpu);
814    
815     vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
816     + vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
817    
818     return r;
819     }
820     @@ -2487,7 +2495,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
821     goto out;
822     spin_lock(&vcpu->kvm->mmu_lock);
823     kvm_mmu_free_some_pages(vcpu);
824     + spin_unlock(&vcpu->kvm->mmu_lock);
825     r = mmu_alloc_roots(vcpu);
826     + spin_lock(&vcpu->kvm->mmu_lock);
827     mmu_sync_roots(vcpu);
828     spin_unlock(&vcpu->kvm->mmu_lock);
829     if (r)
830     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
831     index d42e191..a2f839c 100644
832     --- a/arch/x86/kvm/svm.c
833     +++ b/arch/x86/kvm/svm.c
834     @@ -128,6 +128,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
835     static void svm_complete_interrupts(struct vcpu_svm *svm);
836    
837     static int nested_svm_exit_handled(struct vcpu_svm *svm);
838     +static int nested_svm_intercept(struct vcpu_svm *svm);
839     static int nested_svm_vmexit(struct vcpu_svm *svm);
840     static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
841     bool has_error_code, u32 error_code);
842     @@ -1359,6 +1360,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
843     static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
844     bool has_error_code, u32 error_code)
845     {
846     + int vmexit;
847     +
848     if (!is_nested(svm))
849     return 0;
850    
851     @@ -1367,19 +1370,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
852     svm->vmcb->control.exit_info_1 = error_code;
853     svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
854    
855     - return nested_svm_exit_handled(svm);
856     + vmexit = nested_svm_intercept(svm);
857     + if (vmexit == NESTED_EXIT_DONE)
858     + svm->nested.exit_required = true;
859     +
860     + return vmexit;
861     }
862    
863     -static inline int nested_svm_intr(struct vcpu_svm *svm)
864     +/* This function returns true if it is save to enable the irq window */
865     +static inline bool nested_svm_intr(struct vcpu_svm *svm)
866     {
867     if (!is_nested(svm))
868     - return 0;
869     + return true;
870    
871     if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
872     - return 0;
873     + return true;
874    
875     if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
876     - return 0;
877     + return false;
878    
879     svm->vmcb->control.exit_code = SVM_EXIT_INTR;
880    
881     @@ -1392,13 +1400,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
882     */
883     svm->nested.exit_required = true;
884     trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
885     - return 1;
886     + return false;
887     }
888    
889     - return 0;
890     + return true;
891     }
892    
893     -static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
894     +static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
895     {
896     struct page *page;
897    
898     @@ -1406,7 +1414,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
899     if (is_error_page(page))
900     goto error;
901    
902     - return kmap_atomic(page, idx);
903     + *_page = page;
904     +
905     + return kmap(page);
906    
907     error:
908     kvm_release_page_clean(page);
909     @@ -1415,16 +1425,9 @@ error:
910     return NULL;
911     }
912    
913     -static void nested_svm_unmap(void *addr, enum km_type idx)
914     +static void nested_svm_unmap(struct page *page)
915     {
916     - struct page *page;
917     -
918     - if (!addr)
919     - return;
920     -
921     - page = kmap_atomic_to_page(addr);
922     -
923     - kunmap_atomic(addr, idx);
924     + kunmap(page);
925     kvm_release_page_dirty(page);
926     }
927    
928     @@ -1434,16 +1437,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
929     u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
930     bool ret = false;
931     u32 t0, t1;
932     - u8 *msrpm;
933     + u8 val;
934    
935     if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
936     return false;
937    
938     - msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
939     -
940     - if (!msrpm)
941     - goto out;
942     -
943     switch (msr) {
944     case 0 ... 0x1fff:
945     t0 = (msr * 2) % 8;
946     @@ -1464,11 +1462,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
947     goto out;
948     }
949    
950     - ret = msrpm[t1] & ((1 << param) << t0);
951     + if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
952     + ret = val & ((1 << param) << t0);
953    
954     out:
955     - nested_svm_unmap(msrpm, KM_USER0);
956     -
957     return ret;
958     }
959    
960     @@ -1500,7 +1497,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
961     /*
962     * If this function returns true, this #vmexit was already handled
963     */
964     -static int nested_svm_exit_handled(struct vcpu_svm *svm)
965     +static int nested_svm_intercept(struct vcpu_svm *svm)
966     {
967     u32 exit_code = svm->vmcb->control.exit_code;
968     int vmexit = NESTED_EXIT_HOST;
969     @@ -1546,9 +1543,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
970     }
971     }
972    
973     - if (vmexit == NESTED_EXIT_DONE) {
974     + return vmexit;
975     +}
976     +
977     +static int nested_svm_exit_handled(struct vcpu_svm *svm)
978     +{
979     + int vmexit;
980     +
981     + vmexit = nested_svm_intercept(svm);
982     +
983     + if (vmexit == NESTED_EXIT_DONE)
984     nested_svm_vmexit(svm);
985     - }
986    
987     return vmexit;
988     }
989     @@ -1590,6 +1595,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
990     struct vmcb *nested_vmcb;
991     struct vmcb *hsave = svm->nested.hsave;
992     struct vmcb *vmcb = svm->vmcb;
993     + struct page *page;
994    
995     trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
996     vmcb->control.exit_info_1,
997     @@ -1597,7 +1603,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
998     vmcb->control.exit_int_info,
999     vmcb->control.exit_int_info_err);
1000    
1001     - nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
1002     + nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1003     if (!nested_vmcb)
1004     return 1;
1005    
1006     @@ -1610,9 +1616,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1007     nested_vmcb->save.ds = vmcb->save.ds;
1008     nested_vmcb->save.gdtr = vmcb->save.gdtr;
1009     nested_vmcb->save.idtr = vmcb->save.idtr;
1010     + nested_vmcb->save.cr0 = svm->vcpu.arch.cr0;
1011     if (npt_enabled)
1012     nested_vmcb->save.cr3 = vmcb->save.cr3;
1013     + else
1014     + nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
1015     nested_vmcb->save.cr2 = vmcb->save.cr2;
1016     + nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
1017     nested_vmcb->save.rflags = vmcb->save.rflags;
1018     nested_vmcb->save.rip = vmcb->save.rip;
1019     nested_vmcb->save.rsp = vmcb->save.rsp;
1020     @@ -1687,7 +1697,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1021     /* Exit nested SVM mode */
1022     svm->nested.vmcb = 0;
1023    
1024     - nested_svm_unmap(nested_vmcb, KM_USER0);
1025     + nested_svm_unmap(page);
1026    
1027     kvm_mmu_reset_context(&svm->vcpu);
1028     kvm_mmu_load(&svm->vcpu);
1029     @@ -1698,9 +1708,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1030     static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1031     {
1032     u32 *nested_msrpm;
1033     + struct page *page;
1034     int i;
1035    
1036     - nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1037     + nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1038     if (!nested_msrpm)
1039     return false;
1040    
1041     @@ -1709,7 +1720,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1042    
1043     svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1044    
1045     - nested_svm_unmap(nested_msrpm, KM_USER0);
1046     + nested_svm_unmap(page);
1047    
1048     return true;
1049     }
1050     @@ -1719,8 +1730,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1051     struct vmcb *nested_vmcb;
1052     struct vmcb *hsave = svm->nested.hsave;
1053     struct vmcb *vmcb = svm->vmcb;
1054     + struct page *page;
1055    
1056     - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1057     + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1058     if (!nested_vmcb)
1059     return false;
1060    
1061     @@ -1794,21 +1806,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1062     svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1063     svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1064    
1065     - /* We don't want a nested guest to be more powerful than the guest,
1066     - so all intercepts are ORed */
1067     - svm->vmcb->control.intercept_cr_read |=
1068     - nested_vmcb->control.intercept_cr_read;
1069     - svm->vmcb->control.intercept_cr_write |=
1070     - nested_vmcb->control.intercept_cr_write;
1071     - svm->vmcb->control.intercept_dr_read |=
1072     - nested_vmcb->control.intercept_dr_read;
1073     - svm->vmcb->control.intercept_dr_write |=
1074     - nested_vmcb->control.intercept_dr_write;
1075     - svm->vmcb->control.intercept_exceptions |=
1076     - nested_vmcb->control.intercept_exceptions;
1077     -
1078     - svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1079     -
1080     svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1081    
1082     /* cache intercepts */
1083     @@ -1826,13 +1823,40 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1084     else
1085     svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1086    
1087     + if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
1088     + /* We only want the cr8 intercept bits of the guest */
1089     + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
1090     + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1091     + }
1092     +
1093     + /* We don't want to see VMMCALLs from a nested guest */
1094     + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
1095     +
1096     + /*
1097     + * We don't want a nested guest to be more powerful than the guest, so
1098     + * all intercepts are ORed
1099     + */
1100     + svm->vmcb->control.intercept_cr_read |=
1101     + nested_vmcb->control.intercept_cr_read;
1102     + svm->vmcb->control.intercept_cr_write |=
1103     + nested_vmcb->control.intercept_cr_write;
1104     + svm->vmcb->control.intercept_dr_read |=
1105     + nested_vmcb->control.intercept_dr_read;
1106     + svm->vmcb->control.intercept_dr_write |=
1107     + nested_vmcb->control.intercept_dr_write;
1108     + svm->vmcb->control.intercept_exceptions |=
1109     + nested_vmcb->control.intercept_exceptions;
1110     +
1111     + svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1112     +
1113     + svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
1114     svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1115     svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1116     svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1117     svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1118     svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1119    
1120     - nested_svm_unmap(nested_vmcb, KM_USER0);
1121     + nested_svm_unmap(page);
1122    
1123     enable_gif(svm);
1124    
1125     @@ -1858,6 +1882,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1126     static int vmload_interception(struct vcpu_svm *svm)
1127     {
1128     struct vmcb *nested_vmcb;
1129     + struct page *page;
1130    
1131     if (nested_svm_check_permissions(svm))
1132     return 1;
1133     @@ -1865,12 +1890,12 @@ static int vmload_interception(struct vcpu_svm *svm)
1134     svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1135     skip_emulated_instruction(&svm->vcpu);
1136    
1137     - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1138     + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1139     if (!nested_vmcb)
1140     return 1;
1141    
1142     nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1143     - nested_svm_unmap(nested_vmcb, KM_USER0);
1144     + nested_svm_unmap(page);
1145    
1146     return 1;
1147     }
1148     @@ -1878,6 +1903,7 @@ static int vmload_interception(struct vcpu_svm *svm)
1149     static int vmsave_interception(struct vcpu_svm *svm)
1150     {
1151     struct vmcb *nested_vmcb;
1152     + struct page *page;
1153    
1154     if (nested_svm_check_permissions(svm))
1155     return 1;
1156     @@ -1885,12 +1911,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
1157     svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1158     skip_emulated_instruction(&svm->vcpu);
1159    
1160     - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1161     + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1162     if (!nested_vmcb)
1163     return 1;
1164    
1165     nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1166     - nested_svm_unmap(nested_vmcb, KM_USER0);
1167     + nested_svm_unmap(page);
1168    
1169     return 1;
1170     }
1171     @@ -2487,6 +2513,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
1172     {
1173     struct vcpu_svm *svm = to_svm(vcpu);
1174    
1175     + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1176     + return;
1177     +
1178     if (irr == -1)
1179     return;
1180    
1181     @@ -2544,13 +2573,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
1182     {
1183     struct vcpu_svm *svm = to_svm(vcpu);
1184    
1185     - nested_svm_intr(svm);
1186     -
1187     /* In case GIF=0 we can't rely on the CPU to tell us when
1188     * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
1189     * The next time we get that intercept, this function will be
1190     * called again though and we'll get the vintr intercept. */
1191     - if (gif_set(svm)) {
1192     + if (gif_set(svm) && nested_svm_intr(svm)) {
1193     svm_set_vintr(svm);
1194     svm_inject_irq(svm, 0x0);
1195     }
1196     @@ -2590,6 +2617,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1197     {
1198     struct vcpu_svm *svm = to_svm(vcpu);
1199    
1200     + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1201     + return;
1202     +
1203     if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1204     int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1205     kvm_set_cr8(vcpu, cr8);
1206     @@ -2601,6 +2631,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1207     struct vcpu_svm *svm = to_svm(vcpu);
1208     u64 cr8;
1209    
1210     + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1211     + return;
1212     +
1213     cr8 = kvm_get_cr8(vcpu);
1214     svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1215     svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1216     @@ -2857,6 +2890,20 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1217     return 0;
1218     }
1219    
1220     +static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1221     +{
1222     + switch (func) {
1223     + case 0x8000000A:
1224     + entry->eax = 1; /* SVM revision 1 */
1225     + entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1226     + ASID emulation to nested SVM */
1227     + entry->ecx = 0; /* Reserved */
1228     + entry->edx = 0; /* Do not support any additional features */
1229     +
1230     + break;
1231     + }
1232     +}
1233     +
1234     static const struct trace_print_flags svm_exit_reasons_str[] = {
1235     { SVM_EXIT_READ_CR0, "read_cr0" },
1236     { SVM_EXIT_READ_CR3, "read_cr3" },
1237     @@ -2981,6 +3028,7 @@ static struct kvm_x86_ops svm_x86_ops = {
1238    
1239     .exit_reasons_str = svm_exit_reasons_str,
1240     .gb_page_enable = svm_gb_page_enable,
1241     + .set_supported_cpuid = svm_set_supported_cpuid,
1242     };
1243    
1244     static int __init svm_init(void)
1245     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1246     index 3acbe19..2840568 100644
1247     --- a/arch/x86/kvm/vmx.c
1248     +++ b/arch/x86/kvm/vmx.c
1249     @@ -26,6 +26,7 @@
1250     #include <linux/sched.h>
1251     #include <linux/moduleparam.h>
1252     #include <linux/ftrace_event.h>
1253     +#include <linux/tboot.h>
1254     #include "kvm_cache_regs.h"
1255     #include "x86.h"
1256    
1257     @@ -1125,9 +1126,16 @@ static __init int vmx_disabled_by_bios(void)
1258     u64 msr;
1259    
1260     rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1261     - return (msr & (FEATURE_CONTROL_LOCKED |
1262     - FEATURE_CONTROL_VMXON_ENABLED))
1263     - == FEATURE_CONTROL_LOCKED;
1264     + if (msr & FEATURE_CONTROL_LOCKED) {
1265     + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
1266     + && tboot_enabled())
1267     + return 1;
1268     + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
1269     + && !tboot_enabled())
1270     + return 1;
1271     + }
1272     +
1273     + return 0;
1274     /* locked but not enabled */
1275     }
1276    
1277     @@ -1135,21 +1143,23 @@ static int hardware_enable(void *garbage)
1278     {
1279     int cpu = raw_smp_processor_id();
1280     u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1281     - u64 old;
1282     + u64 old, test_bits;
1283    
1284     if (read_cr4() & X86_CR4_VMXE)
1285     return -EBUSY;
1286    
1287     INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1288     rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1289     - if ((old & (FEATURE_CONTROL_LOCKED |
1290     - FEATURE_CONTROL_VMXON_ENABLED))
1291     - != (FEATURE_CONTROL_LOCKED |
1292     - FEATURE_CONTROL_VMXON_ENABLED))
1293     +
1294     + test_bits = FEATURE_CONTROL_LOCKED;
1295     + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1296     + if (tboot_enabled())
1297     + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
1298     +
1299     + if ((old & test_bits) != test_bits) {
1300     /* enable and lock */
1301     - wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1302     - FEATURE_CONTROL_LOCKED |
1303     - FEATURE_CONTROL_VMXON_ENABLED);
1304     + wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
1305     + }
1306     write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1307     asm volatile (ASM_VMX_VMXON_RAX
1308     : : "a"(&phys_addr), "m"(phys_addr)
1309     @@ -3993,6 +4003,10 @@ static bool vmx_gb_page_enable(void)
1310     return false;
1311     }
1312    
1313     +static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1314     +{
1315     +}
1316     +
1317     static struct kvm_x86_ops vmx_x86_ops = {
1318     .cpu_has_kvm_support = cpu_has_kvm_support,
1319     .disabled_by_bios = vmx_disabled_by_bios,
1320     @@ -4057,6 +4071,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
1321    
1322     .exit_reasons_str = vmx_exit_reasons_str,
1323     .gb_page_enable = vmx_gb_page_enable,
1324     + .set_supported_cpuid = vmx_set_supported_cpuid,
1325     };
1326    
1327     static int __init vmx_init(void)
1328     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1329     index dd78927..07750de 100644
1330     --- a/arch/x86/kvm/x86.c
1331     +++ b/arch/x86/kvm/x86.c
1332     @@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
1333    
1334     void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1335     {
1336     - kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
1337     + kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0eul) | (msw & 0x0f));
1338     }
1339     EXPORT_SYMBOL_GPL(kvm_lmsw);
1340    
1341     @@ -570,48 +570,42 @@ static u32 emulated_msrs[] = {
1342     MSR_IA32_MISC_ENABLE,
1343     };
1344    
1345     -static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1346     +static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1347     {
1348     - if (efer & efer_reserved_bits) {
1349     - kvm_inject_gp(vcpu, 0);
1350     - return;
1351     - }
1352     + if (efer & efer_reserved_bits)
1353     + return 1;
1354    
1355     if (is_paging(vcpu)
1356     - && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1357     - kvm_inject_gp(vcpu, 0);
1358     - return;
1359     - }
1360     + && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME))
1361     + return 1;
1362    
1363     if (efer & EFER_FFXSR) {
1364     struct kvm_cpuid_entry2 *feat;
1365    
1366     feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1367     - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1368     - kvm_inject_gp(vcpu, 0);
1369     - return;
1370     - }
1371     + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1372     + return 1;
1373     }
1374    
1375     if (efer & EFER_SVME) {
1376     struct kvm_cpuid_entry2 *feat;
1377    
1378     feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1379     - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1380     - kvm_inject_gp(vcpu, 0);
1381     - return;
1382     - }
1383     + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1384     + return 1;
1385     }
1386    
1387     - kvm_x86_ops->set_efer(vcpu, efer);
1388     -
1389     efer &= ~EFER_LMA;
1390     efer |= vcpu->arch.shadow_efer & EFER_LMA;
1391    
1392     + kvm_x86_ops->set_efer(vcpu, efer);
1393     +
1394     vcpu->arch.shadow_efer = efer;
1395    
1396     vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
1397     kvm_mmu_reset_context(vcpu);
1398     +
1399     + return 0;
1400     }
1401    
1402     void kvm_enable_efer_bits(u64 mask)
1403     @@ -641,14 +635,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1404    
1405     static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1406     {
1407     - static int version;
1408     + int version;
1409     + int r;
1410     struct pvclock_wall_clock wc;
1411     struct timespec boot;
1412    
1413     if (!wall_clock)
1414     return;
1415    
1416     - version++;
1417     + r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1418     + if (r)
1419     + return;
1420     +
1421     + if (version & 1)
1422     + ++version; /* first time write, random junk */
1423     +
1424     + ++version;
1425    
1426     kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1427    
1428     @@ -938,8 +940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1429     {
1430     switch (msr) {
1431     case MSR_EFER:
1432     - set_efer(vcpu, data);
1433     - break;
1434     + return set_efer(vcpu, data);
1435     case MSR_K7_HWCR:
1436     data &= ~(u64)0x40; /* ignore flush filter disable */
1437     if (data != 0) {
1438     @@ -1542,6 +1543,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1439     {
1440     int r;
1441    
1442     + vcpu_load(vcpu);
1443     r = -E2BIG;
1444     if (cpuid->nent < vcpu->arch.cpuid_nent)
1445     goto out;
1446     @@ -1553,6 +1555,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1447    
1448     out:
1449     cpuid->nent = vcpu->arch.cpuid_nent;
1450     + vcpu_put(vcpu);
1451     return r;
1452     }
1453    
1454     @@ -1688,6 +1691,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1455     entry->ecx &= kvm_supported_word6_x86_features;
1456     break;
1457     }
1458     +
1459     + kvm_x86_ops->set_supported_cpuid(function, entry);
1460     +
1461     put_cpu();
1462     }
1463    
1464     @@ -1802,6 +1808,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1465     int r;
1466     unsigned bank_num = mcg_cap & 0xff, bank;
1467    
1468     + vcpu_load(vcpu);
1469     r = -EINVAL;
1470     if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
1471     goto out;
1472     @@ -1816,6 +1823,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1473     for (bank = 0; bank < bank_num; bank++)
1474     vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1475     out:
1476     + vcpu_put(vcpu);
1477     return r;
1478     }
1479    
1480     @@ -2083,7 +2091,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1481     r = -EFAULT;
1482     if (copy_from_user(&mce, argp, sizeof mce))
1483     goto out;
1484     + vcpu_load(vcpu);
1485     r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1486     + vcpu_put(vcpu);
1487     break;
1488     }
1489     case KVM_GET_VCPU_EVENTS: {
1490     diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1491     index 2c505ee..f1fb411 100644
1492     --- a/arch/x86/oprofile/nmi_int.c
1493     +++ b/arch/x86/oprofile/nmi_int.c
1494     @@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
1495     static void nmi_cpu_start(void *dummy)
1496     {
1497     struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1498     - model->start(msrs);
1499     + if (!msrs->controls)
1500     + WARN_ON_ONCE(1);
1501     + else
1502     + model->start(msrs);
1503     }
1504    
1505     static int nmi_start(void)
1506     @@ -107,7 +110,10 @@ static int nmi_start(void)
1507     static void nmi_cpu_stop(void *dummy)
1508     {
1509     struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1510     - model->stop(msrs);
1511     + if (!msrs->controls)
1512     + WARN_ON_ONCE(1);
1513     + else
1514     + model->stop(msrs);
1515     }
1516    
1517     static void nmi_stop(void)
1518     diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1519     index 987267f..a9c6611 100644
1520     --- a/arch/x86/xen/suspend.c
1521     +++ b/arch/x86/xen/suspend.c
1522     @@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)
1523    
1524     void xen_arch_resume(void)
1525     {
1526     - smp_call_function(xen_vcpu_notify_restore,
1527     - (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1528     + on_each_cpu(xen_vcpu_notify_restore,
1529     + (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1530     }
1531     diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
1532     index f04c989..ed8cd3c 100644
1533     --- a/arch/xtensa/include/asm/cache.h
1534     +++ b/arch/xtensa/include/asm/cache.h
1535     @@ -29,5 +29,6 @@
1536     # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
1537     #endif
1538    
1539     +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1540    
1541     #endif /* _XTENSA_CACHE_H */
1542     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1543     index 023f4e6..d0f8146 100644
1544     --- a/block/cfq-iosched.c
1545     +++ b/block/cfq-iosched.c
1546     @@ -2481,15 +2481,10 @@ static void cfq_free_io_context(struct io_context *ioc)
1547     __call_for_each_cic(ioc, cic_free_func);
1548     }
1549    
1550     -static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1551     +static void cfq_put_cooperator(struct cfq_queue *cfqq)
1552     {
1553     struct cfq_queue *__cfqq, *next;
1554    
1555     - if (unlikely(cfqq == cfqd->active_queue)) {
1556     - __cfq_slice_expired(cfqd, cfqq, 0);
1557     - cfq_schedule_dispatch(cfqd);
1558     - }
1559     -
1560     /*
1561     * If this queue was scheduled to merge with another queue, be
1562     * sure to drop the reference taken on that queue (and others in
1563     @@ -2505,6 +2500,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1564     cfq_put_queue(__cfqq);
1565     __cfqq = next;
1566     }
1567     +}
1568     +
1569     +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1570     +{
1571     + if (unlikely(cfqq == cfqd->active_queue)) {
1572     + __cfq_slice_expired(cfqd, cfqq, 0);
1573     + cfq_schedule_dispatch(cfqd);
1574     + }
1575     +
1576     + cfq_put_cooperator(cfqq);
1577    
1578     cfq_put_queue(cfqq);
1579     }
1580     @@ -3459,6 +3464,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
1581     }
1582    
1583     cic_set_cfqq(cic, NULL, 1);
1584     +
1585     + cfq_put_cooperator(cfqq);
1586     +
1587     cfq_put_queue(cfqq);
1588     return NULL;
1589     }
1590     diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1591     index fc2f26b..c5fef01 100644
1592     --- a/drivers/acpi/video_detect.c
1593     +++ b/drivers/acpi/video_detect.c
1594     @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
1595     ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
1596     if (!strcmp("video", str))
1597     acpi_video_support |=
1598     - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
1599     + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
1600     }
1601     return 1;
1602     }
1603     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1604     index 2401c9c..8e9b132 100644
1605     --- a/drivers/ata/libata-core.c
1606     +++ b/drivers/ata/libata-core.c
1607     @@ -159,6 +159,10 @@ int libata_allow_tpm = 0;
1608     module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
1609     MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
1610    
1611     +static int atapi_an;
1612     +module_param(atapi_an, int, 0444);
1613     +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
1614     +
1615     MODULE_AUTHOR("Jeff Garzik");
1616     MODULE_DESCRIPTION("Library module for ATA devices");
1617     MODULE_LICENSE("GPL");
1618     @@ -2570,7 +2574,8 @@ int ata_dev_configure(struct ata_device *dev)
1619     * to enable ATAPI AN to discern between PHY status
1620     * changed notifications and ATAPI ANs.
1621     */
1622     - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1623     + if (atapi_an &&
1624     + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1625     (!sata_pmp_attached(ap) ||
1626     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
1627     unsigned int err_mask;
1628     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1629     index 730ef3c..06e7204 100644
1630     --- a/drivers/ata/libata-sff.c
1631     +++ b/drivers/ata/libata-sff.c
1632     @@ -893,7 +893,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1633     do_write);
1634     }
1635    
1636     - if (!do_write)
1637     + if (!do_write && !PageSlab(page))
1638     flush_dcache_page(page);
1639    
1640     qc->curbytes += qc->sect_size;
1641     diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
1642     index 0c82d33..952641e 100644
1643     --- a/drivers/ata/sata_nv.c
1644     +++ b/drivers/ata/sata_nv.c
1645     @@ -1673,7 +1673,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
1646     mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1647     mask &= ~(NV_INT_ALL_MCP55 << shift);
1648     writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1649     - ata_sff_freeze(ap);
1650     }
1651    
1652     static void nv_mcp55_thaw(struct ata_port *ap)
1653     @@ -1687,7 +1686,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1654     mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1655     mask |= (NV_INT_MASK_MCP55 << shift);
1656     writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1657     - ata_sff_thaw(ap);
1658     }
1659    
1660     static void nv_adma_error_handler(struct ata_port *ap)
1661     @@ -2478,8 +2476,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1662     }
1663    
1664     pci_set_master(pdev);
1665     - return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
1666     - IRQF_SHARED, ipriv->sht);
1667     + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1668     }
1669    
1670     #ifdef CONFIG_PM
1671     diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
1672     index 02efd9a..e35596b 100644
1673     --- a/drivers/ata/sata_via.c
1674     +++ b/drivers/ata/sata_via.c
1675     @@ -558,6 +558,19 @@ static void svia_configure(struct pci_dev *pdev)
1676     tmp8 |= NATIVE_MODE_ALL;
1677     pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
1678     }
1679     +
1680     + /*
1681     + * vt6421 has problems talking to some drives. The following
1682     + * is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
1683     + * Please add proper documentation if possible.
1684     + *
1685     + * https://bugzilla.kernel.org/show_bug.cgi?id=15173
1686     + */
1687     + if (pdev->device == 0x3249) {
1688     + pci_read_config_byte(pdev, 0x52, &tmp8);
1689     + tmp8 |= 1 << 2;
1690     + pci_write_config_byte(pdev, 0x52, tmp8);
1691     + }
1692     }
1693    
1694     static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1695     diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
1696     index 958bd15..7b71a15 100644
1697     --- a/drivers/base/cpu.c
1698     +++ b/drivers/base/cpu.c
1699     @@ -183,7 +183,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf)
1700     /* display offline cpus < nr_cpu_ids */
1701     if (!alloc_cpumask_var(&offline, GFP_KERNEL))
1702     return -ENOMEM;
1703     - cpumask_complement(offline, cpu_online_mask);
1704     + cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
1705     n = cpulist_scnprintf(buf, len, offline);
1706     free_cpumask_var(offline);
1707    
1708     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1709     index 176f175..aa79cfd 100644
1710     --- a/drivers/char/ipmi/ipmi_si_intf.c
1711     +++ b/drivers/char/ipmi/ipmi_si_intf.c
1712     @@ -311,9 +311,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
1713     {
1714     /* Deliver the message to the upper layer with the lock
1715     released. */
1716     - spin_unlock(&(smi_info->si_lock));
1717     - ipmi_smi_msg_received(smi_info->intf, msg);
1718     - spin_lock(&(smi_info->si_lock));
1719     +
1720     + if (smi_info->run_to_completion) {
1721     + ipmi_smi_msg_received(smi_info->intf, msg);
1722     + } else {
1723     + spin_unlock(&(smi_info->si_lock));
1724     + ipmi_smi_msg_received(smi_info->intf, msg);
1725     + spin_lock(&(smi_info->si_lock));
1726     + }
1727     }
1728    
1729     static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1730     diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
1731     index 6fe4f77..234d9f6 100644
1732     --- a/drivers/clocksource/sh_cmt.c
1733     +++ b/drivers/clocksource/sh_cmt.c
1734     @@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
1735     static int sh_cmt_clocksource_enable(struct clocksource *cs)
1736     {
1737     struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
1738     - int ret;
1739    
1740     p->total_cycles = 0;
1741    
1742     - ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
1743     - if (ret)
1744     - return ret;
1745     -
1746     - /* TODO: calculate good shift from rate and counter bit width */
1747     - cs->shift = 0;
1748     - cs->mult = clocksource_hz2mult(p->rate, cs->shift);
1749     - return 0;
1750     + return sh_cmt_start(p, FLAG_CLOCKSOURCE);
1751     }
1752    
1753     static void sh_cmt_clocksource_disable(struct clocksource *cs)
1754     @@ -444,7 +436,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
1755     cs->disable = sh_cmt_clocksource_disable;
1756     cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
1757     cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
1758     +
1759     + /* clk_get_rate() needs an enabled clock */
1760     + clk_enable(p->clk);
1761     + p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
1762     + clk_disable(p->clk);
1763     +
1764     + /* TODO: calculate good shift from rate and counter bit width */
1765     + cs->shift = 10;
1766     + cs->mult = clocksource_hz2mult(p->rate, cs->shift);
1767     +
1768     pr_info("sh_cmt: %s used as clock source\n", cs->name);
1769     +
1770     clocksource_register(cs);
1771     return 0;
1772     }
1773     diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
1774     index 961f5b5..c073246 100644
1775     --- a/drivers/clocksource/sh_tmu.c
1776     +++ b/drivers/clocksource/sh_tmu.c
1777     @@ -199,16 +199,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
1778     static int sh_tmu_clocksource_enable(struct clocksource *cs)
1779     {
1780     struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
1781     - int ret;
1782     -
1783     - ret = sh_tmu_enable(p);
1784     - if (ret)
1785     - return ret;
1786    
1787     - /* TODO: calculate good shift from rate and counter bit width */
1788     - cs->shift = 10;
1789     - cs->mult = clocksource_hz2mult(p->rate, cs->shift);
1790     - return 0;
1791     + return sh_tmu_enable(p);
1792     }
1793    
1794     static void sh_tmu_clocksource_disable(struct clocksource *cs)
1795     @@ -228,6 +220,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
1796     cs->disable = sh_tmu_clocksource_disable;
1797     cs->mask = CLOCKSOURCE_MASK(32);
1798     cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
1799     +
1800     + /* clk_get_rate() needs an enabled clock */
1801     + clk_enable(p->clk);
1802     + /* channel will be configured at parent clock / 4 */
1803     + p->rate = clk_get_rate(p->clk) / 4;
1804     + clk_disable(p->clk);
1805     + /* TODO: calculate good shift from rate and counter bit width */
1806     + cs->shift = 10;
1807     + cs->mult = clocksource_hz2mult(p->rate, cs->shift);
1808     +
1809     pr_info("sh_tmu: %s used as clock source\n", cs->name);
1810     clocksource_register(cs);
1811     return 0;
1812     diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
1813     index 5045156..991447b 100644
1814     --- a/drivers/firewire/core-card.c
1815     +++ b/drivers/firewire/core-card.c
1816     @@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
1817     static void fw_card_bm_work(struct work_struct *work)
1818     {
1819     struct fw_card *card = container_of(work, struct fw_card, work.work);
1820     - struct fw_device *root_device;
1821     + struct fw_device *root_device, *irm_device;
1822     struct fw_node *root_node;
1823     unsigned long flags;
1824     int root_id, new_root_id, irm_id, local_id;
1825     @@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
1826     bool do_reset = false;
1827     bool root_device_is_running;
1828     bool root_device_is_cmc;
1829     + bool irm_is_1394_1995_only;
1830    
1831     spin_lock_irqsave(&card->lock, flags);
1832    
1833     @@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
1834     }
1835    
1836     generation = card->generation;
1837     +
1838     root_node = card->root_node;
1839     fw_node_get(root_node);
1840     root_device = root_node->data;
1841     root_device_is_running = root_device &&
1842     atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
1843     root_device_is_cmc = root_device && root_device->cmc;
1844     +
1845     + irm_device = card->irm_node->data;
1846     + irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
1847     + (irm_device->config_rom[2] & 0x000000f0) == 0;
1848     +
1849     root_id = root_node->node_id;
1850     irm_id = card->irm_node->node_id;
1851     local_id = card->local_node->node_id;
1852     @@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)
1853    
1854     if (!card->irm_node->link_on) {
1855     new_root_id = local_id;
1856     - fw_notify("IRM has link off, making local node (%02x) root.\n",
1857     - new_root_id);
1858     + fw_notify("%s, making local node (%02x) root.\n",
1859     + "IRM has link off", new_root_id);
1860     + goto pick_me;
1861     + }
1862     +
1863     + if (irm_is_1394_1995_only) {
1864     + new_root_id = local_id;
1865     + fw_notify("%s, making local node (%02x) root.\n",
1866     + "IRM is not 1394a compliant", new_root_id);
1867     goto pick_me;
1868     }
1869    
1870     @@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
1871     * root, and thus, IRM.
1872     */
1873     new_root_id = local_id;
1874     - fw_notify("BM lock failed, making local node (%02x) root.\n",
1875     - new_root_id);
1876     + fw_notify("%s, making local node (%02x) root.\n",
1877     + "BM lock failed", new_root_id);
1878     goto pick_me;
1879     }
1880     } else if (card->bm_generation != generation) {
1881     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1882     index bfd0e4a..48b07ef 100644
1883     --- a/drivers/gpu/drm/drm_edid.c
1884     +++ b/drivers/gpu/drm/drm_edid.c
1885     @@ -334,7 +334,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
1886     DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1887     /* 1024x768@85Hz */
1888     { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1889     - 1072, 1376, 0, 768, 769, 772, 808, 0,
1890     + 1168, 1376, 0, 768, 769, 772, 808, 0,
1891     DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1892     /* 1152x864@75Hz */
1893     { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1894     diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1895     index 4746bfe..930664c 100644
1896     --- a/drivers/gpu/drm/i915/i915_drv.c
1897     +++ b/drivers/gpu/drm/i915/i915_drv.c
1898     @@ -68,7 +68,8 @@ const static struct intel_device_info intel_845g_info = {
1899     };
1900    
1901     const static struct intel_device_info intel_i85x_info = {
1902     - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
1903     + .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
1904     + .cursor_needs_physical = 1,
1905     };
1906    
1907     const static struct intel_device_info intel_i865g_info = {
1908     @@ -140,7 +141,7 @@ const static struct pci_device_id pciidlist[] = {
1909     INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
1910     INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
1911     INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
1912     - INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
1913     + INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
1914     INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
1915     INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
1916     INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
1917     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1918     index 0b33757..6679741 100644
1919     --- a/drivers/gpu/drm/i915/i915_drv.h
1920     +++ b/drivers/gpu/drm/i915/i915_drv.h
1921     @@ -175,6 +175,7 @@ struct intel_overlay;
1922     struct intel_device_info {
1923     u8 is_mobile : 1;
1924     u8 is_i8xx : 1;
1925     + u8 is_i85x : 1;
1926     u8 is_i915g : 1;
1927     u8 is_i9xx : 1;
1928     u8 is_i945gm : 1;
1929     @@ -1027,7 +1028,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1930    
1931     #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1932     #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1933     -#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1934     +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1935     #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1936     #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1937     #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1938     diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1939     index c00c978..4a44de4 100644
1940     --- a/drivers/gpu/drm/i915/i915_gem.c
1941     +++ b/drivers/gpu/drm/i915/i915_gem.c
1942     @@ -2641,6 +2641,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1943     return -EINVAL;
1944     }
1945    
1946     + /* If the object is bigger than the entire aperture, reject it early
1947     + * before evicting everything in a vain attempt to find space.
1948     + */
1949     + if (obj->size > dev->gtt_total) {
1950     + DRM_ERROR("Attempting to bind an object larger than the aperture\n");
1951     + return -E2BIG;
1952     + }
1953     +
1954     search_free:
1955     free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1956     obj->size, alignment, 0);
1957     @@ -4175,6 +4183,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1958     int ret;
1959    
1960     i915_verify_inactive(dev, __FILE__, __LINE__);
1961     +
1962     + if (obj_priv->gtt_space != NULL) {
1963     + if (alignment == 0)
1964     + alignment = i915_gem_get_gtt_alignment(obj);
1965     + if (obj_priv->gtt_offset & (alignment - 1)) {
1966     + ret = i915_gem_object_unbind(obj);
1967     + if (ret)
1968     + return ret;
1969     + }
1970     + }
1971     +
1972     if (obj_priv->gtt_space == NULL) {
1973     ret = i915_gem_object_bind_to_gtt(obj, alignment);
1974     if (ret)
1975     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1976     index 4d88315..ff02664 100644
1977     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1978     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1979     @@ -514,6 +514,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
1980     }
1981    
1982     /* look up gpio for ddc, hpd */
1983     + ddc_bus.valid = false;
1984     + hpd.hpd = RADEON_HPD_NONE;
1985     if ((le16_to_cpu(path->usDeviceTag) &
1986     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
1987     for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
1988     @@ -569,9 +571,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
1989     break;
1990     }
1991     }
1992     - } else {
1993     - hpd.hpd = RADEON_HPD_NONE;
1994     - ddc_bus.valid = false;
1995     }
1996    
1997     conn_id = le16_to_cpu(path->usConnObjectId);
1998     @@ -1137,7 +1136,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1999     lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
2000     le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
2001     lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
2002     - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2003     + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
2004     lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
2005     le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2006     lvds->panel_pwr_delay =
2007     diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
2008     index f129bbb..419630d 100644
2009     --- a/drivers/gpu/drm/radeon/radeon_cp.c
2010     +++ b/drivers/gpu/drm/radeon/radeon_cp.c
2011     @@ -1646,6 +1646,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
2012     radeon_cp_load_microcode(dev_priv);
2013     radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
2014    
2015     + dev_priv->have_z_offset = 0;
2016     radeon_do_engine_reset(dev);
2017     radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
2018    
2019     diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
2020     index c57ad60..ebaee61 100644
2021     --- a/drivers/gpu/drm/radeon/radeon_drv.h
2022     +++ b/drivers/gpu/drm/radeon/radeon_drv.h
2023     @@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
2024    
2025     u32 scratch_ages[5];
2026    
2027     + int have_z_offset;
2028     +
2029     /* starting from here on, data is preserved accross an open */
2030     uint32_t flags; /* see radeon_chip_flags */
2031     resource_size_t fb_aper_offset;
2032     diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
2033     index 067167c..16436bb 100644
2034     --- a/drivers/gpu/drm/radeon/radeon_state.c
2035     +++ b/drivers/gpu/drm/radeon/radeon_state.c
2036     @@ -101,6 +101,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
2037     DRM_ERROR("Invalid depth buffer offset\n");
2038     return -EINVAL;
2039     }
2040     + dev_priv->have_z_offset = 1;
2041     break;
2042    
2043     case RADEON_EMIT_PP_CNTL:
2044     @@ -876,6 +877,12 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
2045     if (tmp & RADEON_BACK)
2046     flags |= RADEON_FRONT;
2047     }
2048     + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
2049     + if (!dev_priv->have_z_offset) {
2050     + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
2051     + flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
2052     + }
2053     + }
2054    
2055     if (flags & (RADEON_FRONT | RADEON_BACK)) {
2056    
2057     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2058     index 8455f3d..0a404db 100644
2059     --- a/drivers/hid/hid-core.c
2060     +++ b/drivers/hid/hid-core.c
2061     @@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = {
2062     { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
2063     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2064     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2065     + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2066     { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
2067     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
2068     { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
2069     diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2070     index 62416e6..3975e03 100644
2071     --- a/drivers/hid/hid-gyration.c
2072     +++ b/drivers/hid/hid-gyration.c
2073     @@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2074     static const struct hid_device_id gyration_devices[] = {
2075     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2076     { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2077     + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2078     { }
2079     };
2080     MODULE_DEVICE_TABLE(hid, gyration_devices);
2081     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2082     index 793691f..5ec8a79 100644
2083     --- a/drivers/hid/hid-ids.h
2084     +++ b/drivers/hid/hid-ids.h
2085     @@ -256,6 +256,7 @@
2086     #define USB_VENDOR_ID_GYRATION 0x0c16
2087     #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
2088     #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
2089     +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
2090    
2091     #define USB_VENDOR_ID_HAPP 0x078b
2092     #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
2093     diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
2094     index 65c232a..21d201b 100644
2095     --- a/drivers/hwmon/ltc4245.c
2096     +++ b/drivers/hwmon/ltc4245.c
2097     @@ -45,9 +45,7 @@ enum ltc4245_cmd {
2098     LTC4245_VEEIN = 0x19,
2099     LTC4245_VEESENSE = 0x1a,
2100     LTC4245_VEEOUT = 0x1b,
2101     - LTC4245_GPIOADC1 = 0x1c,
2102     - LTC4245_GPIOADC2 = 0x1d,
2103     - LTC4245_GPIOADC3 = 0x1e,
2104     + LTC4245_GPIOADC = 0x1c,
2105     };
2106    
2107     struct ltc4245_data {
2108     @@ -61,7 +59,7 @@ struct ltc4245_data {
2109     u8 cregs[0x08];
2110    
2111     /* Voltage registers */
2112     - u8 vregs[0x0f];
2113     + u8 vregs[0x0d];
2114     };
2115    
2116     static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2117     @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2118     data->cregs[i] = val;
2119     }
2120    
2121     - /* Read voltage registers -- 0x10 to 0x1f */
2122     + /* Read voltage registers -- 0x10 to 0x1c */
2123     for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
2124     val = i2c_smbus_read_byte_data(client, i+0x10);
2125     if (unlikely(val < 0))
2126     @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
2127     case LTC4245_VEEOUT:
2128     voltage = regval * -55;
2129     break;
2130     - case LTC4245_GPIOADC1:
2131     - case LTC4245_GPIOADC2:
2132     - case LTC4245_GPIOADC3:
2133     + case LTC4245_GPIOADC:
2134     voltage = regval * 10;
2135     break;
2136     default:
2137     @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
2138     LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
2139    
2140     /* GPIO voltages */
2141     -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
2142     -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
2143     -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
2144     +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
2145    
2146     /* Power Consumption (virtual) */
2147     LTC4245_POWER(power1_input, LTC4245_12VSENSE);
2148     @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
2149     &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
2150    
2151     &sensor_dev_attr_in9_input.dev_attr.attr,
2152     - &sensor_dev_attr_in10_input.dev_attr.attr,
2153     - &sensor_dev_attr_in11_input.dev_attr.attr,
2154    
2155     &sensor_dev_attr_power1_input.dev_attr.attr,
2156     &sensor_dev_attr_power2_input.dev_attr.attr,
2157     diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
2158     index d8c0c8d..6cda023 100644
2159     --- a/drivers/input/mouse/psmouse-base.c
2160     +++ b/drivers/input/mouse/psmouse-base.c
2161     @@ -1382,6 +1382,7 @@ static int psmouse_reconnect(struct serio *serio)
2162     struct psmouse *psmouse = serio_get_drvdata(serio);
2163     struct psmouse *parent = NULL;
2164     struct serio_driver *drv = serio->drv;
2165     + unsigned char type;
2166     int rc = -1;
2167    
2168     if (!drv || !psmouse) {
2169     @@ -1401,10 +1402,15 @@ static int psmouse_reconnect(struct serio *serio)
2170     if (psmouse->reconnect) {
2171     if (psmouse->reconnect(psmouse))
2172     goto out;
2173     - } else if (psmouse_probe(psmouse) < 0 ||
2174     - psmouse->type != psmouse_extensions(psmouse,
2175     - psmouse_max_proto, false)) {
2176     - goto out;
2177     + } else {
2178     + psmouse_reset(psmouse);
2179     +
2180     + if (psmouse_probe(psmouse) < 0)
2181     + goto out;
2182     +
2183     + type = psmouse_extensions(psmouse, psmouse_max_proto, false);
2184     + if (psmouse->type != type)
2185     + goto out;
2186     }
2187    
2188     /* ok, the device type (and capabilities) match the old one,
2189     diff --git a/drivers/md/linear.c b/drivers/md/linear.c
2190     index 001317b..12f4acb 100644
2191     --- a/drivers/md/linear.c
2192     +++ b/drivers/md/linear.c
2193     @@ -281,6 +281,7 @@ static int linear_stop (mddev_t *mddev)
2194     rcu_barrier();
2195     blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2196     kfree(conf);
2197     + mddev->private = NULL;
2198    
2199     return 0;
2200     }
2201     diff --git a/drivers/md/md.c b/drivers/md/md.c
2202     index 2ecd1d5..ec10d74 100644
2203     --- a/drivers/md/md.c
2204     +++ b/drivers/md/md.c
2205     @@ -507,9 +507,36 @@ static inline int mddev_trylock(mddev_t * mddev)
2206     return mutex_trylock(&mddev->reconfig_mutex);
2207     }
2208    
2209     -static inline void mddev_unlock(mddev_t * mddev)
2210     -{
2211     - mutex_unlock(&mddev->reconfig_mutex);
2212     +static struct attribute_group md_redundancy_group;
2213     +
2214     +static void mddev_unlock(mddev_t * mddev)
2215     +{
2216     + if (mddev->to_remove) {
2217     + /* These cannot be removed under reconfig_mutex as
2218     + * an access to the files will try to take reconfig_mutex
2219     + * while holding the file unremovable, which leads to
2220     + * a deadlock.
2221     + * So hold open_mutex instead - we are allowed to take
2222     + * it while holding reconfig_mutex, and md_run can
2223     + * use it to wait for the remove to complete.
2224     + */
2225     + struct attribute_group *to_remove = mddev->to_remove;
2226     + mddev->to_remove = NULL;
2227     + mutex_lock(&mddev->open_mutex);
2228     + mutex_unlock(&mddev->reconfig_mutex);
2229     +
2230     + if (to_remove != &md_redundancy_group)
2231     + sysfs_remove_group(&mddev->kobj, to_remove);
2232     + if (mddev->pers == NULL ||
2233     + mddev->pers->sync_request == NULL) {
2234     + sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2235     + if (mddev->sysfs_action)
2236     + sysfs_put(mddev->sysfs_action);
2237     + mddev->sysfs_action = NULL;
2238     + }
2239     + mutex_unlock(&mddev->open_mutex);
2240     + } else
2241     + mutex_unlock(&mddev->reconfig_mutex);
2242    
2243     md_wakeup_thread(mddev->thread);
2244     }
2245     @@ -2979,6 +3006,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2246     /* Looks like we have a winner */
2247     mddev_suspend(mddev);
2248     mddev->pers->stop(mddev);
2249     +
2250     + if (mddev->pers->sync_request == NULL &&
2251     + pers->sync_request != NULL) {
2252     + /* need to add the md_redundancy_group */
2253     + if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
2254     + printk(KERN_WARNING
2255     + "md: cannot register extra attributes for %s\n",
2256     + mdname(mddev));
2257     + mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
2258     + }
2259     + if (mddev->pers->sync_request != NULL &&
2260     + pers->sync_request == NULL) {
2261     + /* need to remove the md_redundancy_group */
2262     + if (mddev->to_remove == NULL)
2263     + mddev->to_remove = &md_redundancy_group;
2264     + }
2265     +
2266     module_put(mddev->pers->owner);
2267     /* Invalidate devices that are now superfluous */
2268     list_for_each_entry(rdev, &mddev->disks, same_set)
2269     @@ -4081,15 +4125,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
2270     {
2271     mddev_t *mddev = container_of(ws, mddev_t, del_work);
2272    
2273     - if (mddev->private) {
2274     - sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2275     - if (mddev->private != (void*)1)
2276     - sysfs_remove_group(&mddev->kobj, mddev->private);
2277     - if (mddev->sysfs_action)
2278     - sysfs_put(mddev->sysfs_action);
2279     - mddev->sysfs_action = NULL;
2280     - mddev->private = NULL;
2281     - }
2282     sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
2283     kobject_del(&mddev->kobj);
2284     kobject_put(&mddev->kobj);
2285     @@ -4247,6 +4282,13 @@ static int do_md_run(mddev_t * mddev)
2286     if (mddev->pers)
2287     return -EBUSY;
2288    
2289     + /* These two calls synchronise us with the
2290     + * sysfs_remove_group calls in mddev_unlock,
2291     + * so they must have completed.
2292     + */
2293     + mutex_lock(&mddev->open_mutex);
2294     + mutex_unlock(&mddev->open_mutex);
2295     +
2296     /*
2297     * Analyze all RAID superblock(s)
2298     */
2299     @@ -4535,8 +4577,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
2300     mddev->queue->unplug_fn = NULL;
2301     mddev->queue->backing_dev_info.congested_fn = NULL;
2302     module_put(mddev->pers->owner);
2303     - if (mddev->pers->sync_request && mddev->private == NULL)
2304     - mddev->private = (void*)1;
2305     + if (mddev->pers->sync_request && mddev->to_remove == NULL)
2306     + mddev->to_remove = &md_redundancy_group;
2307     mddev->pers = NULL;
2308     /* tell userspace to handle 'inactive' */
2309     sysfs_notify_dirent(mddev->sysfs_state);
2310     @@ -5495,6 +5537,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2311     int err = 0;
2312     void __user *argp = (void __user *)arg;
2313     mddev_t *mddev = NULL;
2314     + int ro;
2315    
2316     if (!capable(CAP_SYS_ADMIN))
2317     return -EACCES;
2318     @@ -5630,6 +5673,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2319     err = do_md_stop(mddev, 1, 1);
2320     goto done_unlock;
2321    
2322     + case BLKROSET:
2323     + if (get_user(ro, (int __user *)(arg))) {
2324     + err = -EFAULT;
2325     + goto done_unlock;
2326     + }
2327     + err = -EINVAL;
2328     +
2329     + /* if the bdev is going readonly the value of mddev->ro
2330     + * does not matter, no writes are coming
2331     + */
2332     + if (ro)
2333     + goto done_unlock;
2334     +
2335     + /* are we are already prepared for writes? */
2336     + if (mddev->ro != 1)
2337     + goto done_unlock;
2338     +
2339     + /* transitioning to readauto need only happen for
2340     + * arrays that call md_write_start
2341     + */
2342     + if (mddev->pers) {
2343     + err = restart_array(mddev);
2344     + if (err == 0) {
2345     + mddev->ro = 2;
2346     + set_disk_ro(mddev->gendisk, 0);
2347     + }
2348     + }
2349     + goto done_unlock;
2350     }
2351    
2352     /*
2353     diff --git a/drivers/md/md.h b/drivers/md/md.h
2354     index 8e4c75c..722f5df 100644
2355     --- a/drivers/md/md.h
2356     +++ b/drivers/md/md.h
2357     @@ -305,6 +305,7 @@ struct mddev_s
2358     atomic_t max_corr_read_errors; /* max read retries */
2359     struct list_head all_mddevs;
2360    
2361     + struct attribute_group *to_remove;
2362     /* Generic barrier handling.
2363     * If there is a pending barrier request, all other
2364     * writes are blocked while the devices are flushed.
2365     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2366     index 859bd3f..db2de5a 100644
2367     --- a/drivers/md/raid1.c
2368     +++ b/drivers/md/raid1.c
2369     @@ -417,7 +417,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
2370     */
2371     static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2372     {
2373     - const unsigned long this_sector = r1_bio->sector;
2374     + const sector_t this_sector = r1_bio->sector;
2375     int new_disk = conf->last_used, disk = new_disk;
2376     int wonly_disk = -1;
2377     const int sectors = r1_bio->sectors;
2378     @@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2379     retry:
2380     if (conf->mddev->recovery_cp < MaxSector &&
2381     (this_sector + sectors >= conf->next_resync)) {
2382     - /* Choose the first operation device, for consistancy */
2383     + /* Choose the first operational device, for consistancy */
2384     new_disk = 0;
2385    
2386     for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
2387     @@ -911,9 +911,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
2388     if (test_bit(Faulty, &rdev->flags)) {
2389     rdev_dec_pending(rdev, mddev);
2390     r1_bio->bios[i] = NULL;
2391     - } else
2392     + } else {
2393     r1_bio->bios[i] = bio;
2394     - targets++;
2395     + targets++;
2396     + }
2397     } else
2398     r1_bio->bios[i] = NULL;
2399     }
2400     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2401     index 047c468..5fb1ad6 100644
2402     --- a/drivers/md/raid10.c
2403     +++ b/drivers/md/raid10.c
2404     @@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
2405     */
2406     static int read_balance(conf_t *conf, r10bio_t *r10_bio)
2407     {
2408     - const unsigned long this_sector = r10_bio->sector;
2409     + const sector_t this_sector = r10_bio->sector;
2410     int disk, slot, nslot;
2411     const int sectors = r10_bio->sectors;
2412     sector_t new_distance, current_distance;
2413     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2414     index 0468f5b..105a3dc 100644
2415     --- a/drivers/md/raid5.c
2416     +++ b/drivers/md/raid5.c
2417     @@ -5086,7 +5086,9 @@ static int run(mddev_t *mddev)
2418     }
2419    
2420     /* Ok, everything is just fine now */
2421     - if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2422     + if (mddev->to_remove == &raid5_attrs_group)
2423     + mddev->to_remove = NULL;
2424     + else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2425     printk(KERN_WARNING
2426     "raid5: failed to create sysfs attributes for %s\n",
2427     mdname(mddev));
2428     @@ -5133,7 +5135,8 @@ static int stop(mddev_t *mddev)
2429     mddev->queue->backing_dev_info.congested_fn = NULL;
2430     blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2431     free_conf(conf);
2432     - mddev->private = &raid5_attrs_group;
2433     + mddev->private = NULL;
2434     + mddev->to_remove = &raid5_attrs_group;
2435     return 0;
2436     }
2437    
2438     diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
2439     index 5d0241b..06e0e86 100644
2440     --- a/drivers/media/video/gspca/stv06xx/stv06xx.c
2441     +++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
2442     @@ -496,8 +496,6 @@ static const __devinitdata struct usb_device_id device_table[] = {
2443     {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
2444     /* QuickCam Messenger (new) */
2445     {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
2446     - /* QuickCam Messenger (new) */
2447     - {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
2448     {}
2449     };
2450     MODULE_DEVICE_TABLE(usb, device_table);
2451     diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
2452     index dbf4de3..69698e5 100644
2453     --- a/drivers/net/arcnet/com20020-pci.c
2454     +++ b/drivers/net/arcnet/com20020-pci.c
2455     @@ -165,8 +165,8 @@ static struct pci_device_id com20020pci_id_table[] = {
2456     { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2457     { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2458     { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2459     - { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2460     - { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2461     + { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
2462     + { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
2463     { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2464     { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2465     {0,}
2466     diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
2467     index 542a4f7..2ee19d1 100644
2468     --- a/drivers/net/can/sja1000/sja1000.c
2469     +++ b/drivers/net/can/sja1000/sja1000.c
2470     @@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
2471     .brp_inc = 1,
2472     };
2473    
2474     +static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
2475     +{
2476     + unsigned long flags;
2477     +
2478     + /*
2479     + * The command register needs some locking and time to settle
2480     + * the write_reg() operation - especially on SMP systems.
2481     + */
2482     + spin_lock_irqsave(&priv->cmdreg_lock, flags);
2483     + priv->write_reg(priv, REG_CMR, val);
2484     + priv->read_reg(priv, REG_SR);
2485     + spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
2486     +}
2487     +
2488     static int sja1000_probe_chip(struct net_device *dev)
2489     {
2490     struct sja1000_priv *priv = netdev_priv(dev);
2491     @@ -279,7 +293,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
2492    
2493     can_put_echo_skb(skb, dev, 0);
2494    
2495     - priv->write_reg(priv, REG_CMR, CMD_TR);
2496     + sja1000_write_cmdreg(priv, CMD_TR);
2497    
2498     return NETDEV_TX_OK;
2499     }
2500     @@ -328,7 +342,7 @@ static void sja1000_rx(struct net_device *dev)
2501     cf->can_id = id;
2502    
2503     /* release receive buffer */
2504     - priv->write_reg(priv, REG_CMR, CMD_RRB);
2505     + sja1000_write_cmdreg(priv, CMD_RRB);
2506    
2507     netif_rx(skb);
2508    
2509     @@ -356,7 +370,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
2510     cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
2511     stats->rx_over_errors++;
2512     stats->rx_errors++;
2513     - priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
2514     + sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
2515     }
2516    
2517     if (isrc & IRQ_EI) {
2518     diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
2519     index 97a622b..de8e778 100644
2520     --- a/drivers/net/can/sja1000/sja1000.h
2521     +++ b/drivers/net/can/sja1000/sja1000.h
2522     @@ -167,6 +167,7 @@ struct sja1000_priv {
2523    
2524     void __iomem *reg_base; /* ioremap'ed address to registers */
2525     unsigned long irq_flags; /* for request_irq() */
2526     + spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
2527    
2528     u16 flags; /* custom mode flags */
2529     u8 ocr; /* output control register */
2530     diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
2531     index 04b382f..83eef8e 100644
2532     --- a/drivers/net/mlx4/icm.c
2533     +++ b/drivers/net/mlx4/icm.c
2534     @@ -174,9 +174,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
2535    
2536     if (chunk->nsg <= 0)
2537     goto fail;
2538     + }
2539    
2540     + if (chunk->npages == MLX4_ICM_CHUNK_LEN)
2541     chunk = NULL;
2542     - }
2543    
2544     npages -= 1 << cur_order;
2545     } else {
2546     diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
2547     index 449a982..8015310 100644
2548     --- a/drivers/net/pppol2tp.c
2549     +++ b/drivers/net/pppol2tp.c
2550     @@ -977,7 +977,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
2551     /* Calculate UDP checksum if configured to do so */
2552     if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
2553     skb->ip_summed = CHECKSUM_NONE;
2554     - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
2555     + else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
2556     + (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
2557     skb->ip_summed = CHECKSUM_COMPLETE;
2558     csum = skb_checksum(skb, 0, udp_len, 0);
2559     uh->check = csum_tcpudp_magic(inet->inet_saddr,
2560     diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
2561     index 701ddb7..5e7e1bd 100644
2562     --- a/drivers/net/wireless/ath/ar9170/hw.h
2563     +++ b/drivers/net/wireless/ath/ar9170/hw.h
2564     @@ -424,5 +424,6 @@ enum ar9170_txq {
2565    
2566     #define AR9170_TXQ_DEPTH 32
2567     #define AR9170_TX_MAX_PENDING 128
2568     +#define AR9170_RX_STREAM_MAX_SIZE 65535
2569    
2570     #endif /* __AR9170_HW_H */
2571     diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
2572     index f9d6db8..9ea9845 100644
2573     --- a/drivers/net/wireless/ath/ar9170/main.c
2574     +++ b/drivers/net/wireless/ath/ar9170/main.c
2575     @@ -2538,7 +2538,7 @@ void *ar9170_alloc(size_t priv_size)
2576     * tends to split the streams into seperate rx descriptors.
2577     */
2578    
2579     - skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2580     + skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2581     if (!skb)
2582     goto err_nomem;
2583    
2584     diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
2585     index 0387658..db541d4 100644
2586     --- a/drivers/net/wireless/ath/ar9170/usb.c
2587     +++ b/drivers/net/wireless/ath/ar9170/usb.c
2588     @@ -66,18 +66,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
2589     { USB_DEVICE(0x0cf3, 0x1001) },
2590     /* TP-Link TL-WN821N v2 */
2591     { USB_DEVICE(0x0cf3, 0x1002) },
2592     + /* 3Com Dual Band 802.11n USB Adapter */
2593     + { USB_DEVICE(0x0cf3, 0x1010) },
2594     + /* H3C Dual Band 802.11n USB Adapter */
2595     + { USB_DEVICE(0x0cf3, 0x1011) },
2596     /* Cace Airpcap NX */
2597     { USB_DEVICE(0xcace, 0x0300) },
2598     /* D-Link DWA 160 A1 */
2599     { USB_DEVICE(0x07d1, 0x3c10) },
2600     /* D-Link DWA 160 A2 */
2601     { USB_DEVICE(0x07d1, 0x3a09) },
2602     + /* Netgear WNA1000 */
2603     + { USB_DEVICE(0x0846, 0x9040) },
2604     /* Netgear WNDA3100 */
2605     { USB_DEVICE(0x0846, 0x9010) },
2606     /* Netgear WN111 v2 */
2607     { USB_DEVICE(0x0846, 0x9001) },
2608     /* Zydas ZD1221 */
2609     { USB_DEVICE(0x0ace, 0x1221) },
2610     + /* Proxim ORiNOCO 802.11n USB */
2611     + { USB_DEVICE(0x1435, 0x0804) },
2612     + /* WNC Generic 11n USB Dongle */
2613     + { USB_DEVICE(0x1435, 0x0326) },
2614     /* ZyXEL NWD271N */
2615     { USB_DEVICE(0x0586, 0x3417) },
2616     /* Z-Com UB81 BG */
2617     diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2618     index ced648b..d377809 100644
2619     --- a/drivers/net/wireless/ath/ath5k/base.c
2620     +++ b/drivers/net/wireless/ath/ath5k/base.c
2621     @@ -1210,6 +1210,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2622     struct ath5k_hw *ah = sc->ah;
2623     struct sk_buff *skb = bf->skb;
2624     struct ath5k_desc *ds;
2625     + int ret;
2626    
2627     if (!skb) {
2628     skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
2629     @@ -1236,9 +1237,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2630     ds = bf->desc;
2631     ds->ds_link = bf->daddr; /* link to self */
2632     ds->ds_data = bf->skbaddr;
2633     - ah->ah_setup_rx_desc(ah, ds,
2634     - skb_tailroom(skb), /* buffer size */
2635     - 0);
2636     + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
2637     + if (ret)
2638     + return ret;
2639    
2640     if (sc->rxlink != NULL)
2641     *sc->rxlink = bf->daddr;
2642     @@ -2996,13 +2997,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2643    
2644     if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
2645     if (*new_flags & FIF_PROMISC_IN_BSS) {
2646     - rfilt |= AR5K_RX_FILTER_PROM;
2647     __set_bit(ATH_STAT_PROMISC, sc->status);
2648     } else {
2649     __clear_bit(ATH_STAT_PROMISC, sc->status);
2650     }
2651     }
2652    
2653     + if (test_bit(ATH_STAT_PROMISC, sc->status))
2654     + rfilt |= AR5K_RX_FILTER_PROM;
2655     +
2656     /* Note, AR5K_RX_FILTER_MCAST is already enabled */
2657     if (*new_flags & FIF_ALLMULTI) {
2658     mfilt[0] = ~0;
2659     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2660     index 3534d86..2423068 100644
2661     --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2662     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2663     @@ -2083,10 +2083,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2664     }
2665     /* Else we have enough samples; calculate estimate of
2666     * actual average throughput */
2667     -
2668     - /* Sanity-check TPT calculations */
2669     - BUG_ON(window->average_tpt != ((window->success_ratio *
2670     - tbl->expected_tpt[index] + 64) / 128));
2671     + if (window->average_tpt != ((window->success_ratio *
2672     + tbl->expected_tpt[index] + 64) / 128)) {
2673     + IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2674     + window->average_tpt = ((window->success_ratio *
2675     + tbl->expected_tpt[index] + 64) / 128);
2676     + }
2677    
2678     /* If we are searching for better modulation mode, check success. */
2679     if (lq_sta->search_better_tbl &&
2680     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
2681     index 5622a55..664dcd5 100644
2682     --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
2683     +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
2684     @@ -3365,6 +3365,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2685     */
2686     spin_lock_init(&priv->reg_lock);
2687     spin_lock_init(&priv->lock);
2688     +
2689     + /*
2690     + * stop and reset the on-board processor just in case it is in a
2691     + * strange state ... like being left stranded by a primary kernel
2692     + * and this is now the kdump kernel trying to start up
2693     + */
2694     + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2695     +
2696     iwl_hw_detect(priv);
2697     IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
2698     priv->cfg->name, priv->hw_rev);
2699     diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2700     index adbb3ea..ca0fb8b 100644
2701     --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2702     +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2703     @@ -4022,6 +4022,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
2704     spin_lock_init(&priv->reg_lock);
2705     spin_lock_init(&priv->lock);
2706    
2707     + /*
2708     + * stop and reset the on-board processor just in case it is in a
2709     + * strange state ... like being left stranded by a primary kernel
2710     + * and this is now the kdump kernel trying to start up
2711     + */
2712     + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2713     +
2714     /***********************
2715     * 4. Read EEPROM
2716     * ********************/
2717     diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
2718     index b3c4fbd..805284d 100644
2719     --- a/drivers/net/wireless/p54/p54usb.c
2720     +++ b/drivers/net/wireless/p54/p54usb.c
2721     @@ -78,6 +78,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
2722     {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
2723     {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
2724     {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
2725     + {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
2726     {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
2727     {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
2728     {}
2729     diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
2730     index 8a40a14..77b7657 100644
2731     --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
2732     +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
2733     @@ -189,6 +189,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
2734     info->flags |= IEEE80211_TX_STAT_ACK;
2735    
2736     info->status.rates[0].count = (flags & 0xFF) + 1;
2737     + info->status.rates[1].idx = -1;
2738    
2739     ieee80211_tx_status_irqsafe(dev, skb);
2740     if (ring->entries - skb_queue_len(&ring->queue) == 2)
2741     diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
2742     index 9423f22..d74b89b 100644
2743     --- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
2744     +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
2745     @@ -160,6 +160,7 @@ disable:
2746     sdio_disable_func(func);
2747     release:
2748     sdio_release_host(func);
2749     + wl1251_free_hw(wl);
2750     return ret;
2751     }
2752    
2753     diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
2754     index 166b67e..de82183 100644
2755     --- a/drivers/oprofile/cpu_buffer.c
2756     +++ b/drivers/oprofile/cpu_buffer.c
2757     @@ -30,23 +30,7 @@
2758    
2759     #define OP_BUFFER_FLAGS 0
2760    
2761     -/*
2762     - * Read and write access is using spin locking. Thus, writing to the
2763     - * buffer by NMI handler (x86) could occur also during critical
2764     - * sections when reading the buffer. To avoid this, there are 2
2765     - * buffers for independent read and write access. Read access is in
2766     - * process context only, write access only in the NMI handler. If the
2767     - * read buffer runs empty, both buffers are swapped atomically. There
2768     - * is potentially a small window during swapping where the buffers are
2769     - * disabled and samples could be lost.
2770     - *
2771     - * Using 2 buffers is a little bit overhead, but the solution is clear
2772     - * and does not require changes in the ring buffer implementation. It
2773     - * can be changed to a single buffer solution when the ring buffer
2774     - * access is implemented as non-locking atomic code.
2775     - */
2776     -static struct ring_buffer *op_ring_buffer_read;
2777     -static struct ring_buffer *op_ring_buffer_write;
2778     +static struct ring_buffer *op_ring_buffer;
2779     DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
2780    
2781     static void wq_sync_buffer(struct work_struct *work);
2782     @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
2783    
2784     void free_cpu_buffers(void)
2785     {
2786     - if (op_ring_buffer_read)
2787     - ring_buffer_free(op_ring_buffer_read);
2788     - op_ring_buffer_read = NULL;
2789     - if (op_ring_buffer_write)
2790     - ring_buffer_free(op_ring_buffer_write);
2791     - op_ring_buffer_write = NULL;
2792     + if (op_ring_buffer)
2793     + ring_buffer_free(op_ring_buffer);
2794     + op_ring_buffer = NULL;
2795     }
2796    
2797     #define RB_EVENT_HDR_SIZE 4
2798     @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
2799     unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
2800     RB_EVENT_HDR_SIZE);
2801    
2802     - op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
2803     - if (!op_ring_buffer_read)
2804     - goto fail;
2805     - op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
2806     - if (!op_ring_buffer_write)
2807     + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
2808     + if (!op_ring_buffer)
2809     goto fail;
2810    
2811     for_each_possible_cpu(i) {
2812     @@ -162,16 +140,11 @@ struct op_sample
2813     *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
2814     {
2815     entry->event = ring_buffer_lock_reserve
2816     - (op_ring_buffer_write, sizeof(struct op_sample) +
2817     + (op_ring_buffer, sizeof(struct op_sample) +
2818     size * sizeof(entry->sample->data[0]));
2819     - if (entry->event)
2820     - entry->sample = ring_buffer_event_data(entry->event);
2821     - else
2822     - entry->sample = NULL;
2823     -
2824     - if (!entry->sample)
2825     + if (!entry->event)
2826     return NULL;
2827     -
2828     + entry->sample = ring_buffer_event_data(entry->event);
2829     entry->size = size;
2830     entry->data = entry->sample->data;
2831    
2832     @@ -180,25 +153,16 @@ struct op_sample
2833    
2834     int op_cpu_buffer_write_commit(struct op_entry *entry)
2835     {
2836     - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
2837     + return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
2838     }
2839    
2840     struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
2841     {
2842     struct ring_buffer_event *e;
2843     - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
2844     - if (e)
2845     - goto event;
2846     - if (ring_buffer_swap_cpu(op_ring_buffer_read,
2847     - op_ring_buffer_write,
2848     - cpu))
2849     + e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
2850     + if (!e)
2851     return NULL;
2852     - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
2853     - if (e)
2854     - goto event;
2855     - return NULL;
2856    
2857     -event:
2858     entry->event = e;
2859     entry->sample = ring_buffer_event_data(e);
2860     entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
2861     @@ -209,8 +173,7 @@ event:
2862    
2863     unsigned long op_cpu_buffer_entries(int cpu)
2864     {
2865     - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
2866     - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
2867     + return ring_buffer_entries_cpu(op_ring_buffer, cpu);
2868     }
2869    
2870     static int
2871     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2872     index 456c265..197e38f 100644
2873     --- a/drivers/pci/quirks.c
2874     +++ b/drivers/pci/quirks.c
2875     @@ -1461,7 +1461,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
2876     conf5 &= ~(1 << 24); /* Clear bit 24 */
2877    
2878     switch (pdev->device) {
2879     - case PCI_DEVICE_ID_JMICRON_JMB360:
2880     + case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
2881     + case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
2882     /* The controller should be in single function ahci mode */
2883     conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
2884     break;
2885     @@ -1497,12 +1498,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
2886     }
2887     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
2888     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
2889     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
2890     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
2891     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
2892     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
2893     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
2894     DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
2895     DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
2896     +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
2897     DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
2898     DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
2899     DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
2900     @@ -2113,6 +2116,8 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
2901     }
2902     }
2903     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2904     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2905     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2906    
2907     /* Go through the list of Hypertransport capabilities and
2908     * return 1 if a HT MSI capability is found and enabled */
2909     @@ -2204,15 +2209,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2910     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2911     ht_enable_msi_mapping);
2912    
2913     -/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
2914     +/* The P5N32-SLI motherboards from Asus have a problem with msi
2915     * for the MCP55 NIC. It is not yet determined whether the msi problem
2916     * also affects other devices. As for now, turn off msi for this device.
2917     */
2918     static void __devinit nvenet_msi_disable(struct pci_dev *dev)
2919     {
2920     - if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
2921     + if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
2922     + dmi_name_in_vendors("P5N32-E SLI")) {
2923     dev_info(&dev->dev,
2924     - "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
2925     + "Disabling msi for MCP55 NIC on P5N32-SLI\n");
2926     dev->no_msi = 1;
2927     }
2928     }
2929     diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
2930     index 6a47bb7..9a844ca 100644
2931     --- a/drivers/platform/x86/eeepc-laptop.c
2932     +++ b/drivers/platform/x86/eeepc-laptop.c
2933     @@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
2934     struct pci_dev *dev;
2935     struct pci_bus *bus;
2936     bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
2937     + bool absent;
2938     + u32 l;
2939    
2940     if (eeepc->wlan_rfkill)
2941     rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
2942     @@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
2943     goto out_unlock;
2944     }
2945    
2946     + if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
2947     + pr_err("Unable to read PCI config space?\n");
2948     + goto out_unlock;
2949     + }
2950     + absent = (l == 0xffffffff);
2951     +
2952     + if (blocked != absent) {
2953     + pr_warning("BIOS says wireless lan is %s, "
2954     + "but the pci device is %s\n",
2955     + blocked ? "blocked" : "unblocked",
2956     + absent ? "absent" : "present");
2957     + pr_warning("skipped wireless hotplug as probably "
2958     + "inappropriate for this model\n");
2959     + goto out_unlock;
2960     + }
2961     +
2962     if (!blocked) {
2963     dev = pci_get_slot(bus, 0);
2964     if (dev) {
2965     diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
2966     index e9aa814..aa13875 100644
2967     --- a/drivers/rtc/rtc-cmos.c
2968     +++ b/drivers/rtc/rtc-cmos.c
2969     @@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
2970     }
2971     }
2972    
2973     + cmos_rtc.dev = dev;
2974     + dev_set_drvdata(dev, &cmos_rtc);
2975     +
2976     cmos_rtc.rtc = rtc_device_register(driver_name, dev,
2977     &cmos_rtc_ops, THIS_MODULE);
2978     if (IS_ERR(cmos_rtc.rtc)) {
2979     @@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
2980     goto cleanup0;
2981     }
2982    
2983     - cmos_rtc.dev = dev;
2984     - dev_set_drvdata(dev, &cmos_rtc);
2985     rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
2986    
2987     spin_lock_irq(&rtc_lock);
2988     diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
2989     index e0d7b99..43bfffe 100644
2990     --- a/drivers/rtc/rtc-s3c.c
2991     +++ b/drivers/rtc/rtc-s3c.c
2992     @@ -456,8 +456,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
2993     pr_debug("s3c2410_rtc: RTCCON=%02x\n",
2994     readb(s3c_rtc_base + S3C2410_RTCCON));
2995    
2996     - s3c_rtc_setfreq(&pdev->dev, 1);
2997     -
2998     device_init_wakeup(&pdev->dev, 1);
2999    
3000     /* register RTC and exit */
3001     @@ -474,6 +472,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
3002     rtc->max_user_freq = 128;
3003    
3004     platform_set_drvdata(pdev, rtc);
3005     +
3006     + s3c_rtc_setfreq(&pdev->dev, 1);
3007     +
3008     return 0;
3009    
3010     err_nortc:
3011     diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3012     index 6fc63b9..40807e4 100644
3013     --- a/drivers/scsi/qla2xxx/qla_isr.c
3014     +++ b/drivers/scsi/qla2xxx/qla_isr.c
3015     @@ -2169,30 +2169,28 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3016    
3017     /* If possible, enable MSI-X. */
3018     if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3019     - !IS_QLA8432(ha) && !IS_QLA8001(ha))
3020     - goto skip_msix;
3021     + !IS_QLA8432(ha) && !IS_QLA8001(ha))
3022     + goto skip_msi;
3023     +
3024     + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3025     + (ha->pdev->subsystem_device == 0x7040 ||
3026     + ha->pdev->subsystem_device == 0x7041 ||
3027     + ha->pdev->subsystem_device == 0x1705)) {
3028     + DEBUG2(qla_printk(KERN_WARNING, ha,
3029     + "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
3030     + ha->pdev->subsystem_vendor,
3031     + ha->pdev->subsystem_device));
3032     + goto skip_msi;
3033     + }
3034    
3035     if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
3036     !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
3037     DEBUG2(qla_printk(KERN_WARNING, ha,
3038     "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
3039     ha->pdev->revision, ha->fw_attributes));
3040     -
3041     goto skip_msix;
3042     }
3043    
3044     - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3045     - (ha->pdev->subsystem_device == 0x7040 ||
3046     - ha->pdev->subsystem_device == 0x7041 ||
3047     - ha->pdev->subsystem_device == 0x1705)) {
3048     - DEBUG2(qla_printk(KERN_WARNING, ha,
3049     - "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
3050     - ha->pdev->subsystem_vendor,
3051     - ha->pdev->subsystem_device));
3052     -
3053     - goto skip_msi;
3054     - }
3055     -
3056     ret = qla24xx_enable_msix(ha, rsp);
3057     if (!ret) {
3058     DEBUG2(qla_printk(KERN_INFO, ha,
3059     diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
3060     index dc4849a..9855608 100644
3061     --- a/drivers/staging/comedi/drivers/ni_mio_cs.c
3062     +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
3063     @@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = {
3064     .adbits = 12,
3065     .ai_fifo_depth = 1024,
3066     .alwaysdither = 0,
3067     - .gainlkup = ai_gain_16,
3068     + .gainlkup = ai_gain_4,
3069     .ai_speed = 5000,
3070     .n_aochan = 2,
3071     .aobits = 12,
3072     diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
3073     index 925a236..2ebc599 100644
3074     --- a/drivers/staging/rt2860/usb_main_dev.c
3075     +++ b/drivers/staging/rt2860/usb_main_dev.c
3076     @@ -97,6 +97,7 @@ struct usb_device_id rtusb_usb_id[] = {
3077     {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
3078     {USB_DEVICE(0x7392, 0x7718)},
3079     {USB_DEVICE(0x7392, 0x7717)},
3080     + {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
3081     {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
3082     {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
3083     {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
3084     diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
3085     index ccb9d5b..55337c8 100644
3086     --- a/drivers/staging/rtl8192su/r8192U_core.c
3087     +++ b/drivers/staging/rtl8192su/r8192U_core.c
3088     @@ -120,6 +120,7 @@ static struct usb_device_id rtl8192_usb_id_tbl[] = {
3089     {USB_DEVICE(0x050d, 0x805E)},
3090     /* Sitecom */
3091     {USB_DEVICE(0x0df6, 0x0031)},
3092     + {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */
3093     /* EnGenius */
3094     {USB_DEVICE(0x1740, 0x9201)},
3095     /* Dlink */
3096     diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
3097     index 0db8d7b..433c403 100644
3098     --- a/drivers/staging/vt6655/device_main.c
3099     +++ b/drivers/staging/vt6655/device_main.c
3100     @@ -1089,11 +1089,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
3101     }
3102     //2008-07-21-01<Add>by MikeLiu
3103     //register wpadev
3104     +#if 0
3105     if(wpa_set_wpadev(pDevice, 1)!=0) {
3106     printk("Fail to Register WPADEV?\n");
3107     unregister_netdev(pDevice->dev);
3108     free_netdev(dev);
3109     }
3110     +#endif
3111     device_print_info(pDevice);
3112     pci_set_drvdata(pcid, pDevice);
3113     return 0;
3114     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3115     index db6b071..d1c2108 100644
3116     --- a/drivers/usb/class/cdc-acm.c
3117     +++ b/drivers/usb/class/cdc-acm.c
3118     @@ -1201,7 +1201,7 @@ made_compressed_probe:
3119     if (rcv->urb == NULL) {
3120     dev_dbg(&intf->dev,
3121     "out of memory (read urbs usb_alloc_urb)\n");
3122     - goto alloc_fail7;
3123     + goto alloc_fail6;
3124     }
3125    
3126     rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
3127     @@ -1225,7 +1225,7 @@ made_compressed_probe:
3128     if (snd->urb == NULL) {
3129     dev_dbg(&intf->dev,
3130     "out of memory (write urbs usb_alloc_urb)");
3131     - goto alloc_fail7;
3132     + goto alloc_fail8;
3133     }
3134    
3135     if (usb_endpoint_xfer_int(epwrite))
3136     @@ -1264,6 +1264,7 @@ made_compressed_probe:
3137     i = device_create_file(&intf->dev,
3138     &dev_attr_iCountryCodeRelDate);
3139     if (i < 0) {
3140     + device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
3141     kfree(acm->country_codes);
3142     goto skip_countries;
3143     }
3144     @@ -1300,6 +1301,7 @@ alloc_fail8:
3145     usb_free_urb(acm->wb[i].urb);
3146     alloc_fail7:
3147     acm_read_buffers_free(acm);
3148     +alloc_fail6:
3149     for (i = 0; i < num_rx_buf; i++)
3150     usb_free_urb(acm->ru[i].urb);
3151     usb_free_urb(acm->ctrlurb);
3152     diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
3153     index fa3d142..08a9a62 100644
3154     --- a/drivers/usb/gadget/fsl_udc_core.c
3155     +++ b/drivers/usb/gadget/fsl_udc_core.c
3156     @@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
3157     case USB_ENDPOINT_XFER_ISOC:
3158     /* Calculate transactions needed for high bandwidth iso */
3159     mult = (unsigned char)(1 + ((max >> 11) & 0x03));
3160     - max = max & 0x8ff; /* bit 0~10 */
3161     + max = max & 0x7ff; /* bit 0~10 */
3162     /* 3 transactions at most */
3163     if (mult > 3)
3164     goto en_done;
3165     diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
3166     index 1937267..ec45293 100644
3167     --- a/drivers/usb/host/ehci-hub.c
3168     +++ b/drivers/usb/host/ehci-hub.c
3169     @@ -294,6 +294,16 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
3170     /* manually resume the ports we suspended during bus_suspend() */
3171     i = HCS_N_PORTS (ehci->hcs_params);
3172     while (i--) {
3173     + /* clear phy low power mode before resume */
3174     + if (ehci->has_hostpc) {
3175     + u32 __iomem *hostpc_reg =
3176     + (u32 __iomem *)((u8 *)ehci->regs
3177     + + HOSTPC0 + 4 * (i & 0xff));
3178     + temp = ehci_readl(ehci, hostpc_reg);
3179     + ehci_writel(ehci, temp & ~HOSTPC_PHCD,
3180     + hostpc_reg);
3181     + mdelay(5);
3182     + }
3183     temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
3184     temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
3185     if (test_bit(i, &ehci->bus_suspended) &&
3186     @@ -678,6 +688,13 @@ static int ehci_hub_control (
3187     if (temp & PORT_SUSPEND) {
3188     if ((temp & PORT_PE) == 0)
3189     goto error;
3190     + /* clear phy low power mode before resume */
3191     + if (hostpc_reg) {
3192     + temp1 = ehci_readl(ehci, hostpc_reg);
3193     + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
3194     + hostpc_reg);
3195     + mdelay(5);
3196     + }
3197     /* resume signaling for 20 msec */
3198     temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
3199     ehci_writel(ehci, temp | PORT_RESUME,
3200     diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
3201     index 72dae1c..3b6e864 100644
3202     --- a/drivers/usb/host/fhci.h
3203     +++ b/drivers/usb/host/fhci.h
3204     @@ -20,6 +20,7 @@
3205    
3206     #include <linux/kernel.h>
3207     #include <linux/types.h>
3208     +#include <linux/bug.h>
3209     #include <linux/spinlock.h>
3210     #include <linux/interrupt.h>
3211     #include <linux/kfifo.h>
3212     @@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
3213    
3214     static inline void *cq_get(struct kfifo *kfifo)
3215     {
3216     - void *p = NULL;
3217     + unsigned int sz;
3218     + void *p;
3219     +
3220     + sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
3221     + if (sz != sizeof(p))
3222     + return NULL;
3223    
3224     - kfifo_out(kfifo, (void *)&p, sizeof(p));
3225     return p;
3226     }
3227    
3228     diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
3229     index fa920c7..53d90ca 100644
3230     --- a/drivers/usb/host/xhci-hcd.c
3231     +++ b/drivers/usb/host/xhci-hcd.c
3232     @@ -104,6 +104,33 @@ int xhci_halt(struct xhci_hcd *xhci)
3233     }
3234    
3235     /*
3236     + * Set the run bit and wait for the host to be running.
3237     + */
3238     +int xhci_start(struct xhci_hcd *xhci)
3239     +{
3240     + u32 temp;
3241     + int ret;
3242     +
3243     + temp = xhci_readl(xhci, &xhci->op_regs->command);
3244     + temp |= (CMD_RUN);
3245     + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
3246     + temp);
3247     + xhci_writel(xhci, temp, &xhci->op_regs->command);
3248     +
3249     + /*
3250     + * Wait for the HCHalted Status bit to be 0 to indicate the host is
3251     + * running.
3252     + */
3253     + ret = handshake(xhci, &xhci->op_regs->status,
3254     + STS_HALT, 0, XHCI_MAX_HALT_USEC);
3255     + if (ret == -ETIMEDOUT)
3256     + xhci_err(xhci, "Host took too long to start, "
3257     + "waited %u microseconds.\n",
3258     + XHCI_MAX_HALT_USEC);
3259     + return ret;
3260     +}
3261     +
3262     +/*
3263     * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
3264     *
3265     * This resets pipelines, timers, counters, state machines, etc.
3266     @@ -114,6 +141,7 @@ int xhci_reset(struct xhci_hcd *xhci)
3267     {
3268     u32 command;
3269     u32 state;
3270     + int ret;
3271    
3272     state = xhci_readl(xhci, &xhci->op_regs->status);
3273     if ((state & STS_HALT) == 0) {
3274     @@ -128,7 +156,17 @@ int xhci_reset(struct xhci_hcd *xhci)
3275     /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
3276     xhci_to_hcd(xhci)->state = HC_STATE_HALT;
3277    
3278     - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
3279     + ret = handshake(xhci, &xhci->op_regs->command,
3280     + CMD_RESET, 0, 250 * 1000);
3281     + if (ret)
3282     + return ret;
3283     +
3284     + xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
3285     + /*
3286     + * xHCI cannot write to any doorbells or operational registers other
3287     + * than status until the "Controller Not Ready" flag is cleared.
3288     + */
3289     + return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
3290     }
3291    
3292    
3293     @@ -451,13 +489,11 @@ int xhci_run(struct usb_hcd *hcd)
3294     if (NUM_TEST_NOOPS > 0)
3295     doorbell = xhci_setup_one_noop(xhci);
3296    
3297     - temp = xhci_readl(xhci, &xhci->op_regs->command);
3298     - temp |= (CMD_RUN);
3299     - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
3300     - temp);
3301     - xhci_writel(xhci, temp, &xhci->op_regs->command);
3302     - /* Flush PCI posted writes */
3303     - temp = xhci_readl(xhci, &xhci->op_regs->command);
3304     + if (xhci_start(xhci)) {
3305     + xhci_halt(xhci);
3306     + return -ENODEV;
3307     + }
3308     +
3309     xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
3310     if (doorbell)
3311     (*doorbell)(xhci);
3312     @@ -1452,6 +1488,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
3313     kfree(virt_ep->stopped_td);
3314     xhci_ring_cmd_db(xhci);
3315     }
3316     + virt_ep->stopped_td = NULL;
3317     + virt_ep->stopped_trb = NULL;
3318     spin_unlock_irqrestore(&xhci->lock, flags);
3319    
3320     if (ret)
3321     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3322     index e097008..4361b2d 100644
3323     --- a/drivers/usb/host/xhci-pci.c
3324     +++ b/drivers/usb/host/xhci-pci.c
3325     @@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
3326     struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
3327     int retval;
3328    
3329     - hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
3330     + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
3331    
3332     xhci->cap_regs = hcd->regs;
3333     xhci->op_regs = hcd->regs +
3334     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3335     index ee7bc7e..2b26b5e 100644
3336     --- a/drivers/usb/host/xhci-ring.c
3337     +++ b/drivers/usb/host/xhci-ring.c
3338     @@ -241,10 +241,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
3339     int i;
3340     union xhci_trb *enq = ring->enqueue;
3341     struct xhci_segment *enq_seg = ring->enq_seg;
3342     + struct xhci_segment *cur_seg;
3343     + unsigned int left_on_ring;
3344    
3345     /* Check if ring is empty */
3346     - if (enq == ring->dequeue)
3347     + if (enq == ring->dequeue) {
3348     + /* Can't use link trbs */
3349     + left_on_ring = TRBS_PER_SEGMENT - 1;
3350     + for (cur_seg = enq_seg->next; cur_seg != enq_seg;
3351     + cur_seg = cur_seg->next)
3352     + left_on_ring += TRBS_PER_SEGMENT - 1;
3353     +
3354     + /* Always need one TRB free in the ring. */
3355     + left_on_ring -= 1;
3356     + if (num_trbs > left_on_ring) {
3357     + xhci_warn(xhci, "Not enough room on ring; "
3358     + "need %u TRBs, %u TRBs left\n",
3359     + num_trbs, left_on_ring);
3360     + return 0;
3361     + }
3362     return 1;
3363     + }
3364     /* Make sure there's an extra empty TRB available */
3365     for (i = 0; i <= num_trbs; ++i) {
3366     if (enq == ring->dequeue)
3367     @@ -333,7 +350,8 @@ static struct xhci_segment *find_trb_seg(
3368     while (cur_seg->trbs > trb ||
3369     &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
3370     generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
3371     - if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
3372     + if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
3373     + TRB_TYPE(TRB_LINK) &&
3374     (generic_trb->field[3] & LINK_TOGGLE))
3375     *cycle_state = ~(*cycle_state) & 0x1;
3376     cur_seg = cur_seg->next;
3377     @@ -389,7 +407,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
3378     BUG();
3379    
3380     trb = &state->new_deq_ptr->generic;
3381     - if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
3382     + if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
3383     (trb->field[3] & LINK_TOGGLE))
3384     state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
3385     next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
3386     @@ -577,6 +595,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
3387     /* Otherwise just ring the doorbell to restart the ring */
3388     ring_ep_doorbell(xhci, slot_id, ep_index);
3389     }
3390     + ep->stopped_td = NULL;
3391     + ep->stopped_trb = NULL;
3392    
3393     /*
3394     * Drop the lock and complete the URBs in the cancelled TD list.
3395     @@ -1049,8 +1069,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
3396     ep->ep_state |= EP_HALTED;
3397     ep->stopped_td = td;
3398     ep->stopped_trb = event_trb;
3399     +
3400     xhci_queue_reset_ep(xhci, slot_id, ep_index);
3401     xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
3402     +
3403     + ep->stopped_td = NULL;
3404     + ep->stopped_trb = NULL;
3405     +
3406     xhci_ring_cmd_db(xhci);
3407     }
3408    
3409     @@ -1370,8 +1395,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
3410     for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
3411     cur_trb != event_trb;
3412     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
3413     - if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
3414     - TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
3415     + if ((cur_trb->generic.field[3] &
3416     + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
3417     + (cur_trb->generic.field[3] &
3418     + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
3419     td->urb->actual_length +=
3420     TRB_LEN(cur_trb->generic.field[2]);
3421     }
3422     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
3423     index 7d920f2..e3e087e 100644
3424     --- a/drivers/usb/serial/cp210x.c
3425     +++ b/drivers/usb/serial/cp210x.c
3426     @@ -61,6 +61,8 @@ static struct usb_device_id id_table [] = {
3427     { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
3428     { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
3429     { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
3430     + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
3431     + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
3432     { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
3433     { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
3434     { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
3435     @@ -72,9 +74,12 @@ static struct usb_device_id id_table [] = {
3436     { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
3437     { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
3438     { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
3439     + { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
3440     + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
3441     { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
3442     { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
3443     { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
3444     + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
3445     { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
3446     { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
3447     { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
3448     @@ -82,12 +87,15 @@ static struct usb_device_id id_table [] = {
3449     { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
3450     { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
3451     { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
3452     + { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
3453     { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
3454     { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
3455     { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
3456     + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
3457     { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
3458     { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
3459     { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
3460     + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
3461     { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
3462     { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
3463     { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
3464     @@ -105,6 +113,7 @@ static struct usb_device_id id_table [] = {
3465     { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
3466     { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
3467     { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
3468     + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
3469     { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
3470     { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
3471     { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
3472     @@ -115,6 +124,8 @@ static struct usb_device_id id_table [] = {
3473     { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
3474     { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
3475     { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
3476     + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
3477     + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
3478     { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
3479     { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
3480     { } /* Terminating Entry */
3481     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3482     index ca9e3ba..8c19ad5 100644
3483     --- a/drivers/usb/serial/ftdi_sio.c
3484     +++ b/drivers/usb/serial/ftdi_sio.c
3485     @@ -2312,6 +2312,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
3486     "urb failed to set to rts/cts flow control\n");
3487     }
3488    
3489     + /* raise DTR/RTS */
3490     + set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
3491     } else {
3492     /*
3493     * Xon/Xoff code
3494     @@ -2359,6 +2361,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
3495     }
3496     }
3497    
3498     + /* lower DTR/RTS */
3499     + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
3500     }
3501     return;
3502     }
3503     diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
3504     index 95d8d26..2e0497b 100644
3505     --- a/drivers/usb/serial/ir-usb.c
3506     +++ b/drivers/usb/serial/ir-usb.c
3507     @@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
3508     kfree(port->read_urb->transfer_buffer);
3509     port->read_urb->transfer_buffer = buffer;
3510     port->read_urb->transfer_buffer_length = buffer_size;
3511     + port->bulk_in_buffer = buffer;
3512    
3513     buffer = kmalloc(buffer_size, GFP_KERNEL);
3514     if (!buffer) {
3515     @@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
3516     kfree(port->write_urb->transfer_buffer);
3517     port->write_urb->transfer_buffer = buffer;
3518     port->write_urb->transfer_buffer_length = buffer_size;
3519     + port->bulk_out_buffer = buffer;
3520     port->bulk_out_size = buffer_size;
3521     }
3522    
3523     diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
3524     index 3a78738..6825082 100644
3525     --- a/drivers/usb/serial/kl5kusb105.c
3526     +++ b/drivers/usb/serial/kl5kusb105.c
3527     @@ -310,6 +310,7 @@ err_cleanup:
3528     usb_free_urb(priv->write_urb_pool[j]);
3529     }
3530     }
3531     + kfree(priv);
3532     usb_set_serial_port_data(serial->port[i], NULL);
3533     }
3534     return -ENOMEM;
3535     diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
3536     index 45ea694..9d99e68 100644
3537     --- a/drivers/usb/serial/kobil_sct.c
3538     +++ b/drivers/usb/serial/kobil_sct.c
3539     @@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)
3540    
3541     /* FIXME: Add rts/dtr methods */
3542     if (port->write_urb) {
3543     - usb_kill_urb(port->write_urb);
3544     + usb_poison_urb(port->write_urb);
3545     + kfree(port->write_urb->transfer_buffer);
3546     usb_free_urb(port->write_urb);
3547     port->write_urb = NULL;
3548     }
3549     diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
3550     index 2cfe245..fb472dd 100644
3551     --- a/drivers/usb/serial/mos7840.c
3552     +++ b/drivers/usb/serial/mos7840.c
3553     @@ -717,7 +717,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
3554     mos7840_port = urb->context;
3555     if (!mos7840_port) {
3556     dbg("%s", "NULL mos7840_port pointer");
3557     - mos7840_port->read_urb_busy = false;
3558     return;
3559     }
3560    
3561     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3562     index d93283d..64b50f6 100644
3563     --- a/drivers/usb/serial/option.c
3564     +++ b/drivers/usb/serial/option.c
3565     @@ -334,6 +334,24 @@ static int option_resume(struct usb_serial *serial);
3566     #define ALCATEL_VENDOR_ID 0x1bbb
3567     #define ALCATEL_PRODUCT_X060S 0x0000
3568    
3569     +#define PIRELLI_VENDOR_ID 0x1266
3570     +#define PIRELLI_PRODUCT_C100_1 0x1002
3571     +#define PIRELLI_PRODUCT_C100_2 0x1003
3572     +#define PIRELLI_PRODUCT_1004 0x1004
3573     +#define PIRELLI_PRODUCT_1005 0x1005
3574     +#define PIRELLI_PRODUCT_1006 0x1006
3575     +#define PIRELLI_PRODUCT_1007 0x1007
3576     +#define PIRELLI_PRODUCT_1008 0x1008
3577     +#define PIRELLI_PRODUCT_1009 0x1009
3578     +#define PIRELLI_PRODUCT_100A 0x100a
3579     +#define PIRELLI_PRODUCT_100B 0x100b
3580     +#define PIRELLI_PRODUCT_100C 0x100c
3581     +#define PIRELLI_PRODUCT_100D 0x100d
3582     +#define PIRELLI_PRODUCT_100E 0x100e
3583     +#define PIRELLI_PRODUCT_100F 0x100f
3584     +#define PIRELLI_PRODUCT_1011 0x1011
3585     +#define PIRELLI_PRODUCT_1012 0x1012
3586     +
3587     /* Airplus products */
3588     #define AIRPLUS_VENDOR_ID 0x1011
3589     #define AIRPLUS_PRODUCT_MCD650 0x3198
3590     @@ -346,6 +364,12 @@ static int option_resume(struct usb_serial *serial);
3591     #define HAIER_VENDOR_ID 0x201e
3592     #define HAIER_PRODUCT_CE100 0x2009
3593    
3594     +#define CINTERION_VENDOR_ID 0x0681
3595     +
3596     +/* Olivetti products */
3597     +#define OLIVETTI_VENDOR_ID 0x0b3c
3598     +#define OLIVETTI_PRODUCT_OLICARD100 0xc000
3599     +
3600     static struct usb_device_id option_ids[] = {
3601     { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
3602     { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
3603     @@ -621,6 +645,180 @@ static struct usb_device_id option_ids[] = {
3604     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
3605     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
3606     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
3607     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
3608     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
3609     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
3610     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
3611     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
3612     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
3613     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
3614     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
3615     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
3616     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
3617     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
3618     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
3619     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
3620     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
3621     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
3622     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
3623     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
3624     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
3625     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
3626     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
3627     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
3628     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
3629     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
3630     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
3631     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
3632     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
3633     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
3634     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
3635     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
3636     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
3637     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
3638     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
3639     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
3640     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
3641     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
3642     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
3643     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
3644     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
3645     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
3646     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
3647     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
3648     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
3649     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
3650     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
3651     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
3652     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
3653     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
3654     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
3655     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
3656     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
3657     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
3658     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
3659     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
3660     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
3661     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
3662     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
3663     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
3664     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
3665     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
3666     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
3667     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
3668     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
3669     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
3670     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
3671     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
3672     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
3673     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
3674     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
3675     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
3676     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
3677     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
3678     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
3679     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
3680     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
3681     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
3682     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
3683     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
3684     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
3685     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
3686     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
3687     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
3688     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
3689     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
3690     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
3691     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
3692     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
3693     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
3694     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
3695     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
3696     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
3697     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
3698     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
3699     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
3700     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
3701     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
3702     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
3703     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
3704     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
3705     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
3706     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
3707     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
3708     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
3709     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
3710     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
3711     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
3712     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
3713     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
3714     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
3715     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
3716     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
3717     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
3718     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
3719     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
3720     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
3721     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
3722     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
3723     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
3724     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
3725     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
3726     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
3727     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
3728     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
3729     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
3730     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
3731     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
3732     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
3733     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
3734     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
3735     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
3736     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
3737     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
3738     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
3739     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
3740     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
3741     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
3742     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
3743     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
3744     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
3745     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
3746     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
3747     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
3748     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
3749     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
3750     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
3751     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
3752     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
3753     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
3754     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
3755     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
3756     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
3757     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
3758     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
3759     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
3760     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
3761     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
3762     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
3763     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
3764     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
3765     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
3766     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
3767     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
3768     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
3769     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
3770     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
3771     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
3772     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
3773     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
3774     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
3775     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
3776     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
3777     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
3778     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
3779     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
3780     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
3781     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
3782     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
3783     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
3784     @@ -649,6 +847,27 @@ static struct usb_device_id option_ids[] = {
3785     { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
3786     { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
3787     { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
3788     + /* Pirelli */
3789     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
3790     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
3791     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)},
3792     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)},
3793     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)},
3794     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)},
3795     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)},
3796     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)},
3797     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)},
3798     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) },
3799     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) },
3800     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) },
3801     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) },
3802     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
3803     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
3804     + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
3805     +
3806     + { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
3807     +
3808     + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
3809     { } /* Terminating entry */
3810     };
3811     MODULE_DEVICE_TABLE(usb, option_ids);
3812     @@ -771,12 +990,19 @@ static int option_probe(struct usb_serial *serial,
3813     const struct usb_device_id *id)
3814     {
3815     struct option_intf_private *data;
3816     +
3817     /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
3818     if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
3819     serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
3820     serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
3821     return -ENODEV;
3822    
3823     + /* Bandrich modem and AT command interface is 0xff */
3824     + if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
3825     + serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
3826     + serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
3827     + return -ENODEV;
3828     +
3829     data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
3830     if (!data)
3831     return -ENOMEM;
3832     diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
3833     index ad1f923..c140870 100644
3834     --- a/drivers/usb/serial/visor.c
3835     +++ b/drivers/usb/serial/visor.c
3836     @@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = {
3837     .throttle = visor_throttle,
3838     .unthrottle = visor_unthrottle,
3839     .attach = clie_3_5_startup,
3840     + .release = visor_release,
3841     .write = visor_write,
3842     .write_room = visor_write_room,
3843     .write_bulk_callback = visor_write_bulk_callback,
3844     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
3845     index 98b549b..b6e0e0e 100644
3846     --- a/drivers/usb/storage/unusual_devs.h
3847     +++ b/drivers/usb/storage/unusual_devs.h
3848     @@ -1858,6 +1858,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
3849     US_SC_DEVICE, US_PR_DEVICE, NULL,
3850     US_FL_IGNORE_RESIDUE ),
3851    
3852     +/* Reported by Hans de Goede <hdegoede@redhat.com>
3853     + * These Appotech controllers are found in Picture Frames, they provide a
3854     + * (buggy) emulation of a cdrom drive which contains the windows software
3855     + * Uploading of pictures happens over the corresponding /dev/sg device. */
3856     +UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
3857     + "BUILDWIN",
3858     + "Photo Frame",
3859     + US_SC_DEVICE, US_PR_DEVICE, NULL,
3860     + US_FL_BAD_SENSE ),
3861     +UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
3862     + "BUILDWIN",
3863     + "Photo Frame",
3864     + US_SC_DEVICE, US_PR_DEVICE, NULL,
3865     + US_FL_BAD_SENSE ),
3866     +
3867     UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
3868     "ST",
3869     "2A",
3870     diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
3871     index eab33f1..7b547f5 100644
3872     --- a/drivers/xen/xenbus/xenbus_xs.c
3873     +++ b/drivers/xen/xenbus/xenbus_xs.c
3874     @@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
3875     #define PRINTF_BUFFER_SIZE 4096
3876     char *printf_buffer;
3877    
3878     - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
3879     + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
3880     if (printf_buffer == NULL)
3881     return -ENOMEM;
3882    
3883     diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
3884     index 6df6d6e..9be949a 100644
3885     --- a/fs/btrfs/acl.c
3886     +++ b/fs/btrfs/acl.c
3887     @@ -159,6 +159,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
3888     int ret;
3889     struct posix_acl *acl = NULL;
3890    
3891     + if (!is_owner_or_cap(dentry->d_inode))
3892     + return -EPERM;
3893     +
3894     if (value) {
3895     acl = posix_acl_from_xattr(value, size);
3896     if (acl == NULL) {
3897     diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
3898     index 5646727..05a9b77 100644
3899     --- a/fs/cifs/cifsproto.h
3900     +++ b/fs/cifs/cifsproto.h
3901     @@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
3902     __u16 fileHandle, struct file *file,
3903     struct vfsmount *mnt, unsigned int oflags);
3904     extern int cifs_posix_open(char *full_path, struct inode **pinode,
3905     - struct vfsmount *mnt, int mode, int oflags,
3906     - __u32 *poplock, __u16 *pnetfid, int xid);
3907     + struct vfsmount *mnt,
3908     + struct super_block *sb,
3909     + int mode, int oflags,
3910     + __u32 *poplock, __u16 *pnetfid, int xid);
3911     extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
3912     FILE_UNIX_BASIC_INFO *info,
3913     struct cifs_sb_info *cifs_sb);
3914     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
3915     index 6ccf726..9e9d48f 100644
3916     --- a/fs/cifs/dir.c
3917     +++ b/fs/cifs/dir.c
3918     @@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
3919     }
3920    
3921     int cifs_posix_open(char *full_path, struct inode **pinode,
3922     - struct vfsmount *mnt, int mode, int oflags,
3923     - __u32 *poplock, __u16 *pnetfid, int xid)
3924     + struct vfsmount *mnt, struct super_block *sb,
3925     + int mode, int oflags,
3926     + __u32 *poplock, __u16 *pnetfid, int xid)
3927     {
3928     int rc;
3929     FILE_UNIX_BASIC_INFO *presp_data;
3930     __u32 posix_flags = 0;
3931     - struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
3932     + struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
3933     struct cifs_fattr fattr;
3934    
3935     cFYI(1, ("posix open %s", full_path));
3936     @@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
3937    
3938     /* get new inode and set it up */
3939     if (*pinode == NULL) {
3940     - *pinode = cifs_iget(mnt->mnt_sb, &fattr);
3941     + *pinode = cifs_iget(sb, &fattr);
3942     if (!*pinode) {
3943     rc = -ENOMEM;
3944     goto posix_open_ret;
3945     @@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
3946     cifs_fattr_to_inode(*pinode, &fattr);
3947     }
3948    
3949     - cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
3950     + if (mnt)
3951     + cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
3952    
3953     posix_open_ret:
3954     kfree(presp_data);
3955     @@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
3956     if (nd && (nd->flags & LOOKUP_OPEN))
3957     oflags = nd->intent.open.flags;
3958     else
3959     - oflags = FMODE_READ;
3960     + oflags = FMODE_READ | SMB_O_CREAT;
3961    
3962     if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
3963     (CIFS_UNIX_POSIX_PATH_OPS_CAP &
3964     le64_to_cpu(tcon->fsUnixInfo.Capability))) {
3965     - rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
3966     - mode, oflags, &oplock, &fileHandle, xid);
3967     + rc = cifs_posix_open(full_path, &newinode,
3968     + nd ? nd->path.mnt : NULL,
3969     + inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
3970     /* EIO could indicate that (posix open) operation is not
3971     supported, despite what server claimed in capability
3972     negotation. EREMOTE indicates DFS junction, which is not
3973     @@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
3974     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
3975     (nd->intent.open.flags & O_CREAT)) {
3976     rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
3977     + parent_dir_inode->i_sb,
3978     nd->intent.open.create_mode,
3979     nd->intent.open.flags, &oplock,
3980     &fileHandle, xid);
3981     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3982     index 057e1da..8639dd7 100644
3983     --- a/fs/cifs/file.c
3984     +++ b/fs/cifs/file.c
3985     @@ -297,10 +297,12 @@ int cifs_open(struct inode *inode, struct file *file)
3986     (CIFS_UNIX_POSIX_PATH_OPS_CAP &
3987     le64_to_cpu(tcon->fsUnixInfo.Capability))) {
3988     int oflags = (int) cifs_posix_convert_flags(file->f_flags);
3989     + oflags |= SMB_O_CREAT;
3990     /* can not refresh inode info since size could be stale */
3991     rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
3992     - cifs_sb->mnt_file_mode /* ignored */,
3993     - oflags, &oplock, &netfid, xid);
3994     + inode->i_sb,
3995     + cifs_sb->mnt_file_mode /* ignored */,
3996     + oflags, &oplock, &netfid, xid);
3997     if (rc == 0) {
3998     cFYI(1, ("posix open succeeded"));
3999     /* no need for special case handling of setting mode
4000     @@ -512,8 +514,9 @@ reopen_error_exit:
4001     int oflags = (int) cifs_posix_convert_flags(file->f_flags);
4002     /* can not refresh inode info since size could be stale */
4003     rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
4004     - cifs_sb->mnt_file_mode /* ignored */,
4005     - oflags, &oplock, &netfid, xid);
4006     + inode->i_sb,
4007     + cifs_sb->mnt_file_mode /* ignored */,
4008     + oflags, &oplock, &netfid, xid);
4009     if (rc == 0) {
4010     cFYI(1, ("posix reopen succeeded"));
4011     goto reopen_success;
4012     diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
4013     index 4cfab1c..d91e9d8 100644
4014     --- a/fs/exofs/dir.c
4015     +++ b/fs/exofs/dir.c
4016     @@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
4017     de->inode_no = cpu_to_le64(parent->i_ino);
4018     memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
4019     exofs_set_de_type(de, inode);
4020     - kunmap_atomic(page, KM_USER0);
4021     + kunmap_atomic(kaddr, KM_USER0);
4022     err = exofs_commit_chunk(page, 0, chunk_size);
4023     fail:
4024     page_cache_release(page);
4025     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
4026     index 82c415b..e86f91e 100644
4027     --- a/fs/ext4/move_extent.c
4028     +++ b/fs/ext4/move_extent.c
4029     @@ -964,6 +964,9 @@ mext_check_arguments(struct inode *orig_inode,
4030     return -EINVAL;
4031     }
4032    
4033     + if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
4034     + return -EPERM;
4035     +
4036     /* Ext4 move extent does not support swapfile */
4037     if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
4038     ext4_debug("ext4 move extent: The argument files should "
4039     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4040     index 3b2c554..433ea27 100644
4041     --- a/fs/ext4/resize.c
4042     +++ b/fs/ext4/resize.c
4043     @@ -930,7 +930,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
4044     percpu_counter_add(&sbi->s_freeinodes_counter,
4045     EXT4_INODES_PER_GROUP(sb));
4046    
4047     - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
4048     + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
4049     + sbi->s_log_groups_per_flex) {
4050     ext4_group_t flex_group;
4051     flex_group = ext4_flex_group(sbi, input->group);
4052     atomic_add(input->free_blocks_count,
4053     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
4054     index 1a7c42c..e7fb19c 100644
4055     --- a/fs/fs-writeback.c
4056     +++ b/fs/fs-writeback.c
4057     @@ -834,6 +834,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
4058     unsigned long expired;
4059     long nr_pages;
4060    
4061     + /*
4062     + * When set to zero, disable periodic writeback
4063     + */
4064     + if (!dirty_writeback_interval)
4065     + return 0;
4066     +
4067     expired = wb->last_old_flush +
4068     msecs_to_jiffies(dirty_writeback_interval * 10);
4069     if (time_before(jiffies, expired))
4070     @@ -929,8 +935,12 @@ int bdi_writeback_task(struct bdi_writeback *wb)
4071     break;
4072     }
4073    
4074     - wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
4075     - schedule_timeout_interruptible(wait_jiffies);
4076     + if (dirty_writeback_interval) {
4077     + wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
4078     + schedule_timeout_interruptible(wait_jiffies);
4079     + } else
4080     + schedule();
4081     +
4082     try_to_freeze();
4083     }
4084    
4085     diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
4086     index e6dd2ae..b20bfcc 100644
4087     --- a/fs/gfs2/file.c
4088     +++ b/fs/gfs2/file.c
4089     @@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
4090     if (error)
4091     goto out_drop_write;
4092    
4093     + error = -EACCES;
4094     + if (!is_owner_or_cap(inode))
4095     + goto out;
4096     +
4097     + error = 0;
4098     flags = ip->i_diskflags;
4099     new_flags = (flags & ~mask) | (reqflags & mask);
4100     if ((new_flags ^ flags) == 0)
4101     @@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
4102     {
4103     struct inode *inode = filp->f_path.dentry->d_inode;
4104     u32 fsflags, gfsflags;
4105     +
4106     if (get_user(fsflags, ptr))
4107     return -EFAULT;
4108     +
4109     gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
4110     if (!S_ISDIR(inode->i_mode)) {
4111     if (gfsflags & GFS2_DIF_INHERIT_JDATA)
4112     diff --git a/fs/libfs.c b/fs/libfs.c
4113     index 6e8d17e..9e0fc76 100644
4114     --- a/fs/libfs.c
4115     +++ b/fs/libfs.c
4116     @@ -415,7 +415,8 @@ int simple_write_end(struct file *file, struct address_space *mapping,
4117     * unique inode values later for this filesystem, then you must take care
4118     * to pass it an appropriate max_reserved value to avoid collisions.
4119     */
4120     -int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
4121     +int simple_fill_super(struct super_block *s, unsigned long magic,
4122     + struct tree_descr *files)
4123     {
4124     struct inode *inode;
4125     struct dentry *root;
4126     diff --git a/fs/namespace.c b/fs/namespace.c
4127     index c768f73..bf6b6fe 100644
4128     --- a/fs/namespace.c
4129     +++ b/fs/namespace.c
4130     @@ -1121,8 +1121,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
4131     {
4132     struct path path;
4133     int retval;
4134     + int lookup_flags = 0;
4135    
4136     - retval = user_path(name, &path);
4137     + if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
4138     + return -EINVAL;
4139     +
4140     + if (!(flags & UMOUNT_NOFOLLOW))
4141     + lookup_flags |= LOOKUP_FOLLOW;
4142     +
4143     + retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
4144     if (retval)
4145     goto out;
4146     retval = -EINVAL;
4147     diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
4148     index 171699e..06b2a26 100644
4149     --- a/fs/nfsd/nfssvc.c
4150     +++ b/fs/nfsd/nfssvc.c
4151     @@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion;
4152     int nfsd_vers(int vers, enum vers_op change)
4153     {
4154     if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
4155     - return -1;
4156     + return 0;
4157     switch(change) {
4158     case NFSD_SET:
4159     nfsd_versions[vers] = nfsd_version[vers];
4160     diff --git a/include/linux/fs.h b/include/linux/fs.h
4161     index 66b0705..899a4d6 100644
4162     --- a/include/linux/fs.h
4163     +++ b/include/linux/fs.h
4164     @@ -1308,6 +1308,8 @@ extern int send_sigurg(struct fown_struct *fown);
4165     #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
4166     #define MNT_DETACH 0x00000002 /* Just detach from the tree */
4167     #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
4168     +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
4169     +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
4170    
4171     extern struct list_head super_blocks;
4172     extern spinlock_t sb_lock;
4173     @@ -2359,7 +2361,7 @@ extern const struct file_operations simple_dir_operations;
4174     extern const struct inode_operations simple_dir_inode_operations;
4175     struct tree_descr { char *name; const struct file_operations *ops; int mode; };
4176     struct dentry *d_alloc_name(struct dentry *, const char *);
4177     -extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
4178     +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
4179     extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
4180     extern void simple_release_fs(struct vfsmount **mount, int *count);
4181    
4182     diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4183     index 0be8243..be29806 100644
4184     --- a/include/linux/pci_ids.h
4185     +++ b/include/linux/pci_ids.h
4186     @@ -2322,6 +2322,7 @@
4187     #define PCI_VENDOR_ID_JMICRON 0x197B
4188     #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
4189     #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
4190     +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
4191     #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
4192     #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
4193     #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
4194     diff --git a/include/linux/swap.h b/include/linux/swap.h
4195     index a2602a8..d8029c2 100644
4196     --- a/include/linux/swap.h
4197     +++ b/include/linux/swap.h
4198     @@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(struct page *page)
4199     __lru_cache_add(page, LRU_INACTIVE_ANON);
4200     }
4201    
4202     -static inline void lru_cache_add_active_anon(struct page *page)
4203     -{
4204     - __lru_cache_add(page, LRU_ACTIVE_ANON);
4205     -}
4206     -
4207     static inline void lru_cache_add_file(struct page *page)
4208     {
4209     __lru_cache_add(page, LRU_INACTIVE_FILE);
4210     }
4211    
4212     -static inline void lru_cache_add_active_file(struct page *page)
4213     -{
4214     - __lru_cache_add(page, LRU_ACTIVE_FILE);
4215     -}
4216     -
4217     /* linux/mm/vmscan.c */
4218     extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
4219     gfp_t gfp_mask, nodemask_t *mask);
4220     diff --git a/include/linux/tboot.h b/include/linux/tboot.h
4221     index bf2a0c7..1dba6ee 100644
4222     --- a/include/linux/tboot.h
4223     +++ b/include/linux/tboot.h
4224     @@ -150,6 +150,7 @@ extern int tboot_force_iommu(void);
4225    
4226     #else
4227    
4228     +#define tboot_enabled() 0
4229     #define tboot_probe() do { } while (0)
4230     #define tboot_shutdown(shutdown_type) do { } while (0)
4231     #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
4232     diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
4233     index ff30177..597f8e2 100644
4234     --- a/include/net/sctp/structs.h
4235     +++ b/include/net/sctp/structs.h
4236     @@ -778,6 +778,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
4237     struct iovec *data);
4238     void sctp_chunk_free(struct sctp_chunk *);
4239     void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
4240     +void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
4241     struct sctp_chunk *sctp_chunkify(struct sk_buff *,
4242     const struct sctp_association *,
4243     struct sock *);
4244     diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
4245     index a510b75..32c0697 100644
4246     --- a/include/trace/events/signal.h
4247     +++ b/include/trace/events/signal.h
4248     @@ -10,7 +10,8 @@
4249    
4250     #define TP_STORE_SIGINFO(__entry, info) \
4251     do { \
4252     - if (info == SEND_SIG_NOINFO) { \
4253     + if (info == SEND_SIG_NOINFO || \
4254     + info == SEND_SIG_FORCED) { \
4255     __entry->errno = 0; \
4256     __entry->code = SI_USER; \
4257     } else if (info == SEND_SIG_PRIV) { \
4258     diff --git a/kernel/compat.c b/kernel/compat.c
4259     index f6c204f..180d188 100644
4260     --- a/kernel/compat.c
4261     +++ b/kernel/compat.c
4262     @@ -494,29 +494,26 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
4263     {
4264     int ret;
4265     cpumask_var_t mask;
4266     - unsigned long *k;
4267     - unsigned int min_length = cpumask_size();
4268     -
4269     - if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
4270     - min_length = sizeof(compat_ulong_t);
4271    
4272     - if (len < min_length)
4273     + if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4274     + return -EINVAL;
4275     + if (len & (sizeof(compat_ulong_t)-1))
4276     return -EINVAL;
4277    
4278     if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4279     return -ENOMEM;
4280    
4281     ret = sched_getaffinity(pid, mask);
4282     - if (ret < 0)
4283     - goto out;
4284     + if (ret == 0) {
4285     + size_t retlen = min_t(size_t, len, cpumask_size());
4286    
4287     - k = cpumask_bits(mask);
4288     - ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
4289     - if (ret == 0)
4290     - ret = min_length;
4291     -
4292     -out:
4293     + if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
4294     + ret = -EFAULT;
4295     + else
4296     + ret = retlen;
4297     + }
4298     free_cpumask_var(mask);
4299     +
4300     return ret;
4301     }
4302    
4303     diff --git a/kernel/mutex.c b/kernel/mutex.c
4304     index 632f04c..4c0b7b3 100644
4305     --- a/kernel/mutex.c
4306     +++ b/kernel/mutex.c
4307     @@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4308     struct thread_info *owner;
4309    
4310     /*
4311     + * If we own the BKL, then don't spin. The owner of
4312     + * the mutex might be waiting on us to release the BKL.
4313     + */
4314     + if (unlikely(current->lock_depth >= 0))
4315     + break;
4316     +
4317     + /*
4318     * If there's an owner, wait for it to either
4319     * release the lock or go to sleep.
4320     */
4321     diff --git a/kernel/perf_event.c b/kernel/perf_event.c
4322     index e928e1a..fbbe79b 100644
4323     --- a/kernel/perf_event.c
4324     +++ b/kernel/perf_event.c
4325     @@ -1417,13 +1417,16 @@ do { \
4326     divisor = nsec * frequency;
4327     }
4328    
4329     + if (!divisor)
4330     + return dividend;
4331     +
4332     return div64_u64(dividend, divisor);
4333     }
4334    
4335     static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
4336     {
4337     struct hw_perf_event *hwc = &event->hw;
4338     - u64 period, sample_period;
4339     + s64 period, sample_period;
4340     s64 delta;
4341    
4342     period = perf_calculate_period(event, nsec, count);
4343     @@ -4712,8 +4715,8 @@ SYSCALL_DEFINE5(perf_event_open,
4344     struct perf_event_context *ctx;
4345     struct file *event_file = NULL;
4346     struct file *group_file = NULL;
4347     + int event_fd;
4348     int fput_needed = 0;
4349     - int fput_needed2 = 0;
4350     int err;
4351    
4352     /* for future expandability... */
4353     @@ -4734,12 +4737,18 @@ SYSCALL_DEFINE5(perf_event_open,
4354     return -EINVAL;
4355     }
4356    
4357     + event_fd = get_unused_fd_flags(O_RDWR);
4358     + if (event_fd < 0)
4359     + return event_fd;
4360     +
4361     /*
4362     * Get the target context (task or percpu):
4363     */
4364     ctx = find_get_context(pid, cpu);
4365     - if (IS_ERR(ctx))
4366     - return PTR_ERR(ctx);
4367     + if (IS_ERR(ctx)) {
4368     + err = PTR_ERR(ctx);
4369     + goto err_fd;
4370     + }
4371    
4372     /*
4373     * Look up the group leader (we will attach this event to it):
4374     @@ -4779,13 +4788,11 @@ SYSCALL_DEFINE5(perf_event_open,
4375     if (IS_ERR(event))
4376     goto err_put_context;
4377    
4378     - err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
4379     - if (err < 0)
4380     - goto err_free_put_context;
4381     -
4382     - event_file = fget_light(err, &fput_needed2);
4383     - if (!event_file)
4384     + event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
4385     + if (IS_ERR(event_file)) {
4386     + err = PTR_ERR(event_file);
4387     goto err_free_put_context;
4388     + }
4389    
4390     if (flags & PERF_FLAG_FD_OUTPUT) {
4391     err = perf_event_set_output(event, group_fd);
4392     @@ -4806,19 +4813,19 @@ SYSCALL_DEFINE5(perf_event_open,
4393     list_add_tail(&event->owner_entry, &current->perf_event_list);
4394     mutex_unlock(&current->perf_event_mutex);
4395    
4396     -err_fput_free_put_context:
4397     - fput_light(event_file, fput_needed2);
4398     + fput_light(group_file, fput_needed);
4399     + fd_install(event_fd, event_file);
4400     + return event_fd;
4401    
4402     +err_fput_free_put_context:
4403     + fput(event_file);
4404     err_free_put_context:
4405     - if (err < 0)
4406     - free_event(event);
4407     -
4408     + free_event(event);
4409     err_put_context:
4410     - if (err < 0)
4411     - put_ctx(ctx);
4412     -
4413     fput_light(group_file, fput_needed);
4414     -
4415     + put_ctx(ctx);
4416     +err_fd:
4417     + put_unused_fd(event_fd);
4418     return err;
4419     }
4420    
4421     diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
4422     index 4954407..5e76d22 100644
4423     --- a/kernel/posix-timers.c
4424     +++ b/kernel/posix-timers.c
4425     @@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
4426     new_timer->it_id = (timer_t) new_timer_id;
4427     new_timer->it_clock = which_clock;
4428     new_timer->it_overrun = -1;
4429     - error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
4430     - if (error)
4431     - goto out;
4432    
4433     - /*
4434     - * return the timer_id now. The next step is hard to
4435     - * back out if there is an error.
4436     - */
4437     if (copy_to_user(created_timer_id,
4438     &new_timer_id, sizeof (new_timer_id))) {
4439     error = -EFAULT;
4440     @@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
4441     new_timer->sigq->info.si_tid = new_timer->it_id;
4442     new_timer->sigq->info.si_code = SI_TIMER;
4443    
4444     + error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
4445     + if (error)
4446     + goto out;
4447     +
4448     spin_lock_irq(&current->sighand->siglock);
4449     new_timer->it_signal = current->signal;
4450     list_add(&new_timer->list, &current->signal->posix_timers);
4451     diff --git a/kernel/signal.c b/kernel/signal.c
4452     index 934ae5e..6ca731b 100644
4453     --- a/kernel/signal.c
4454     +++ b/kernel/signal.c
4455     @@ -625,7 +625,7 @@ static inline bool si_fromuser(const struct siginfo *info)
4456     static int check_kill_permission(int sig, struct siginfo *info,
4457     struct task_struct *t)
4458     {
4459     - const struct cred *cred = current_cred(), *tcred;
4460     + const struct cred *cred, *tcred;
4461     struct pid *sid;
4462     int error;
4463    
4464     @@ -639,8 +639,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
4465     if (error)
4466     return error;
4467    
4468     + cred = current_cred();
4469     tcred = __task_cred(t);
4470     - if ((cred->euid ^ tcred->suid) &&
4471     + if (!same_thread_group(current, t) &&
4472     + (cred->euid ^ tcred->suid) &&
4473     (cred->euid ^ tcred->uid) &&
4474     (cred->uid ^ tcred->suid) &&
4475     (cred->uid ^ tcred->uid) &&
4476     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4477     index 45cfb6d..7d12c17 100644
4478     --- a/kernel/trace/trace.c
4479     +++ b/kernel/trace/trace.c
4480     @@ -32,6 +32,7 @@
4481     #include <linux/splice.h>
4482     #include <linux/kdebug.h>
4483     #include <linux/string.h>
4484     +#include <linux/rwsem.h>
4485     #include <linux/ctype.h>
4486     #include <linux/init.h>
4487     #include <linux/poll.h>
4488     @@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void)
4489    
4490     static cpumask_var_t __read_mostly tracing_buffer_mask;
4491    
4492     -/* Define which cpu buffers are currently read in trace_pipe */
4493     -static cpumask_var_t tracing_reader_cpumask;
4494     -
4495     #define for_each_tracing_cpu(cpu) \
4496     for_each_cpu(cpu, tracing_buffer_mask)
4497    
4498     @@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly;
4499    
4500     /*
4501     * trace_types_lock is used to protect the trace_types list.
4502     - * This lock is also used to keep user access serialized.
4503     - * Accesses from userspace will grab this lock while userspace
4504     - * activities happen inside the kernel.
4505     */
4506     static DEFINE_MUTEX(trace_types_lock);
4507    
4508     +/*
4509     + * serialize the access of the ring buffer
4510     + *
4511     + * ring buffer serializes readers, but it is low level protection.
4512     + * The validity of the events (which returns by ring_buffer_peek() ..etc)
4513     + * are not protected by ring buffer.
4514     + *
4515     + * The content of events may become garbage if we allow other process consumes
4516     + * these events concurrently:
4517     + * A) the page of the consumed events may become a normal page
4518     + * (not reader page) in ring buffer, and this page will be rewrited
4519     + * by events producer.
4520     + * B) The page of the consumed events may become a page for splice_read,
4521     + * and this page will be returned to system.
4522     + *
4523     + * These primitives allow multi process access to different cpu ring buffer
4524     + * concurrently.
4525     + *
4526     + * These primitives don't distinguish read-only and read-consume access.
4527     + * Multi read-only access are also serialized.
4528     + */
4529     +
4530     +#ifdef CONFIG_SMP
4531     +static DECLARE_RWSEM(all_cpu_access_lock);
4532     +static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
4533     +
4534     +static inline void trace_access_lock(int cpu)
4535     +{
4536     + if (cpu == TRACE_PIPE_ALL_CPU) {
4537     + /* gain it for accessing the whole ring buffer. */
4538     + down_write(&all_cpu_access_lock);
4539     + } else {
4540     + /* gain it for accessing a cpu ring buffer. */
4541     +
4542     + /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
4543     + down_read(&all_cpu_access_lock);
4544     +
4545     + /* Secondly block other access to this @cpu ring buffer. */
4546     + mutex_lock(&per_cpu(cpu_access_lock, cpu));
4547     + }
4548     +}
4549     +
4550     +static inline void trace_access_unlock(int cpu)
4551     +{
4552     + if (cpu == TRACE_PIPE_ALL_CPU) {
4553     + up_write(&all_cpu_access_lock);
4554     + } else {
4555     + mutex_unlock(&per_cpu(cpu_access_lock, cpu));
4556     + up_read(&all_cpu_access_lock);
4557     + }
4558     +}
4559     +
4560     +static inline void trace_access_lock_init(void)
4561     +{
4562     + int cpu;
4563     +
4564     + for_each_possible_cpu(cpu)
4565     + mutex_init(&per_cpu(cpu_access_lock, cpu));
4566     +}
4567     +
4568     +#else
4569     +
4570     +static DEFINE_MUTEX(access_lock);
4571     +
4572     +static inline void trace_access_lock(int cpu)
4573     +{
4574     + (void)cpu;
4575     + mutex_lock(&access_lock);
4576     +}
4577     +
4578     +static inline void trace_access_unlock(int cpu)
4579     +{
4580     + (void)cpu;
4581     + mutex_unlock(&access_lock);
4582     +}
4583     +
4584     +static inline void trace_access_lock_init(void)
4585     +{
4586     +}
4587     +
4588     +#endif
4589     +
4590     /* trace_wait is a waitqueue for tasks blocked on trace_poll */
4591     static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
4592    
4593     @@ -1601,12 +1678,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4594     }
4595    
4596     /*
4597     - * No necessary locking here. The worst thing which can
4598     - * happen is loosing events consumed at the same time
4599     - * by a trace_pipe reader.
4600     - * Other than that, we don't risk to crash the ring buffer
4601     - * because it serializes the readers.
4602     - *
4603     * The current tracer is copied to avoid a global locking
4604     * all around.
4605     */
4606     @@ -1662,12 +1733,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4607     }
4608    
4609     trace_event_read_lock();
4610     + trace_access_lock(cpu_file);
4611     return p;
4612     }
4613    
4614     static void s_stop(struct seq_file *m, void *p)
4615     {
4616     + struct trace_iterator *iter = m->private;
4617     +
4618     atomic_dec(&trace_record_cmdline_disabled);
4619     + trace_access_unlock(iter->cpu_file);
4620     trace_event_read_unlock();
4621     }
4622    
4623     @@ -2858,22 +2933,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4624    
4625     mutex_lock(&trace_types_lock);
4626    
4627     - /* We only allow one reader per cpu */
4628     - if (cpu_file == TRACE_PIPE_ALL_CPU) {
4629     - if (!cpumask_empty(tracing_reader_cpumask)) {
4630     - ret = -EBUSY;
4631     - goto out;
4632     - }
4633     - cpumask_setall(tracing_reader_cpumask);
4634     - } else {
4635     - if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
4636     - cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
4637     - else {
4638     - ret = -EBUSY;
4639     - goto out;
4640     - }
4641     - }
4642     -
4643     /* create a buffer to store the information to pass to userspace */
4644     iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4645     if (!iter) {
4646     @@ -2929,12 +2988,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
4647    
4648     mutex_lock(&trace_types_lock);
4649    
4650     - if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
4651     - cpumask_clear(tracing_reader_cpumask);
4652     - else
4653     - cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
4654     -
4655     -
4656     if (iter->trace->pipe_close)
4657     iter->trace->pipe_close(iter);
4658    
4659     @@ -3096,6 +3149,7 @@ waitagain:
4660     iter->pos = -1;
4661    
4662     trace_event_read_lock();
4663     + trace_access_lock(iter->cpu_file);
4664     while (find_next_entry_inc(iter) != NULL) {
4665     enum print_line_t ret;
4666     int len = iter->seq.len;
4667     @@ -3112,6 +3166,7 @@ waitagain:
4668     if (iter->seq.len >= cnt)
4669     break;
4670     }
4671     + trace_access_unlock(iter->cpu_file);
4672     trace_event_read_unlock();
4673    
4674     /* Now copy what we have to the user */
4675     @@ -3237,6 +3292,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4676     }
4677    
4678     trace_event_read_lock();
4679     + trace_access_lock(iter->cpu_file);
4680    
4681     /* Fill as many pages as possible. */
4682     for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
4683     @@ -3260,6 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
4684     trace_seq_init(&iter->seq);
4685     }
4686    
4687     + trace_access_unlock(iter->cpu_file);
4688     trace_event_read_unlock();
4689     mutex_unlock(&iter->mutex);
4690    
4691     @@ -3561,10 +3618,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
4692    
4693     info->read = 0;
4694    
4695     + trace_access_lock(info->cpu);
4696     ret = ring_buffer_read_page(info->tr->buffer,
4697     &info->spare,
4698     count,
4699     info->cpu, 0);
4700     + trace_access_unlock(info->cpu);
4701     if (ret < 0)
4702     return 0;
4703    
4704     @@ -3692,6 +3751,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4705     len &= PAGE_MASK;
4706     }
4707    
4708     + trace_access_lock(info->cpu);
4709     entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4710    
4711     for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
4712     @@ -3739,6 +3799,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4713     entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4714     }
4715    
4716     + trace_access_unlock(info->cpu);
4717     spd.nr_pages = i;
4718    
4719     /* did we read anything? */
4720     @@ -4175,6 +4236,8 @@ static __init int tracer_init_debugfs(void)
4721     struct dentry *d_tracer;
4722     int cpu;
4723    
4724     + trace_access_lock_init();
4725     +
4726     d_tracer = tracing_init_dentry();
4727    
4728     trace_create_file("tracing_enabled", 0644, d_tracer,
4729     @@ -4409,9 +4472,6 @@ __init static int tracer_alloc_buffers(void)
4730     if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4731     goto out_free_buffer_mask;
4732    
4733     - if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4734     - goto out_free_tracing_cpumask;
4735     -
4736     /* To save memory, keep the ring buffer size to its minimum */
4737     if (ring_buffer_expanded)
4738     ring_buf_size = trace_buf_size;
4739     @@ -4469,8 +4529,6 @@ __init static int tracer_alloc_buffers(void)
4740     return 0;
4741    
4742     out_free_cpumask:
4743     - free_cpumask_var(tracing_reader_cpumask);
4744     -out_free_tracing_cpumask:
4745     free_cpumask_var(tracing_cpumask);
4746     out_free_buffer_mask:
4747     free_cpumask_var(tracing_buffer_mask);
4748     diff --git a/mm/filemap.c b/mm/filemap.c
4749     index 698ea80..96398d3 100644
4750     --- a/mm/filemap.c
4751     +++ b/mm/filemap.c
4752     @@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
4753     /*
4754     * Splice_read and readahead add shmem/tmpfs pages into the page cache
4755     * before shmem_readpage has a chance to mark them as SwapBacked: they
4756     - * need to go on the active_anon lru below, and mem_cgroup_cache_charge
4757     + * need to go on the anon lru below, and mem_cgroup_cache_charge
4758     * (called in add_to_page_cache) needs to know where they're going too.
4759     */
4760     if (mapping_cap_swap_backed(mapping))
4761     @@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
4762     if (page_is_file_cache(page))
4763     lru_cache_add_file(page);
4764     else
4765     - lru_cache_add_active_anon(page);
4766     + lru_cache_add_anon(page);
4767     }
4768     return ret;
4769     }
4770     @@ -1099,6 +1099,12 @@ page_not_up_to_date_locked:
4771     }
4772    
4773     readpage:
4774     + /*
4775     + * A previous I/O error may have been due to temporary
4776     + * failures, eg. multipath errors.
4777     + * PG_error will be set again if readpage fails.
4778     + */
4779     + ClearPageError(page);
4780     /* Start the actual read. The read will unlock the page. */
4781     error = mapping->a_ops->readpage(filp, page);
4782    
4783     diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
4784     index a10d508..9f6f8d3 100644
4785     --- a/net/mac80211/Kconfig
4786     +++ b/net/mac80211/Kconfig
4787     @@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
4788    
4789     if MAC80211 != n
4790    
4791     +config MAC80211_HAS_RC
4792     + def_bool n
4793     +
4794     config MAC80211_RC_PID
4795     bool "PID controller based rate control algorithm" if EMBEDDED
4796     + select MAC80211_HAS_RC
4797     ---help---
4798     This option enables a TX rate control algorithm for
4799     mac80211 that uses a PID controller to select the TX
4800     @@ -24,12 +28,14 @@ config MAC80211_RC_PID
4801    
4802     config MAC80211_RC_MINSTREL
4803     bool "Minstrel" if EMBEDDED
4804     + select MAC80211_HAS_RC
4805     default y
4806     ---help---
4807     This option enables the 'minstrel' TX rate control algorithm
4808    
4809     choice
4810     prompt "Default rate control algorithm"
4811     + depends on MAC80211_HAS_RC
4812     default MAC80211_RC_DEFAULT_MINSTREL
4813     ---help---
4814     This option selects the default rate control algorithm
4815     @@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
4816    
4817     endif
4818    
4819     +comment "Some wireless drivers require a rate control algorithm"
4820     + depends on MAC80211_HAS_RC=n
4821     +
4822     config MAC80211_MESH
4823     bool "Enable mac80211 mesh networking (pre-802.11s) support"
4824     depends on MAC80211 && EXPERIMENTAL
4825     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4826     index 9ae1a47..e2b4345 100644
4827     --- a/net/mac80211/cfg.c
4828     +++ b/net/mac80211/cfg.c
4829     @@ -98,9 +98,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
4830     params->mesh_id_len,
4831     params->mesh_id);
4832    
4833     - if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
4834     - return 0;
4835     -
4836     if (type == NL80211_IFTYPE_AP_VLAN &&
4837     params && params->use_4addr == 0)
4838     rcu_assign_pointer(sdata->u.vlan.sta, NULL);
4839     @@ -108,7 +105,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
4840     params && params->use_4addr >= 0)
4841     sdata->u.mgd.use_4addr = params->use_4addr;
4842    
4843     - sdata->u.mntr_flags = *flags;
4844     + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags)
4845     + sdata->u.mntr_flags = *flags;
4846     +
4847     return 0;
4848     }
4849    
4850     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4851     index edfa036..48d8fdf 100644
4852     --- a/net/mac80211/rx.c
4853     +++ b/net/mac80211/rx.c
4854     @@ -1399,7 +1399,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
4855     (rx->key || rx->sdata->drop_unencrypted)))
4856     return -EACCES;
4857     if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
4858     - if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
4859     + if (unlikely(!ieee80211_has_protected(fc) &&
4860     + ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
4861     rx->key))
4862     return -EACCES;
4863     /* BIP does not use Protected field, so need to check MMIE */
4864     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4865     index 1fdc0a5..9b50183 100644
4866     --- a/net/mac80211/tx.c
4867     +++ b/net/mac80211/tx.c
4868     @@ -501,7 +501,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
4869     struct ieee80211_hdr *hdr = (void *)tx->skb->data;
4870     struct ieee80211_supported_band *sband;
4871     struct ieee80211_rate *rate;
4872     - int i, len;
4873     + int i;
4874     + u32 len;
4875     bool inval = false, rts = false, short_preamble = false;
4876     struct ieee80211_tx_rate_control txrc;
4877     u32 sta_flags;
4878     @@ -510,7 +511,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
4879    
4880     sband = tx->local->hw.wiphy->bands[tx->channel->band];
4881    
4882     - len = min_t(int, tx->skb->len + FCS_LEN,
4883     + len = min_t(u32, tx->skb->len + FCS_LEN,
4884     tx->local->hw.wiphy->frag_threshold);
4885    
4886     /* set up the tx rate control struct we give the RC algo */
4887     diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
4888     index 224db01..35fa684 100644
4889     --- a/net/sctp/sm_make_chunk.c
4890     +++ b/net/sctp/sm_make_chunk.c
4891     @@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp_param = {
4892     cpu_to_be16(sizeof(struct sctp_paramhdr)),
4893     };
4894    
4895     -/* A helper to initialize to initialize an op error inside a
4896     +/* A helper to initialize an op error inside a
4897     * provided chunk, as most cause codes will be embedded inside an
4898     * abort chunk.
4899     */
4900     @@ -124,6 +124,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
4901     chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
4902     }
4903    
4904     +/* A helper to initialize an op error inside a
4905     + * provided chunk, as most cause codes will be embedded inside an
4906     + * abort chunk. Differs from sctp_init_cause in that it won't oops
4907     + * if there isn't enough space in the op error chunk
4908     + */
4909     +int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
4910     + size_t paylen)
4911     +{
4912     + sctp_errhdr_t err;
4913     + __u16 len;
4914     +
4915     + /* Cause code constants are now defined in network order. */
4916     + err.cause = cause_code;
4917     + len = sizeof(sctp_errhdr_t) + paylen;
4918     + err.length = htons(len);
4919     +
4920     + if (skb_tailroom(chunk->skb) < len)
4921     + return -ENOSPC;
4922     + chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
4923     + sizeof(sctp_errhdr_t),
4924     + &err);
4925     + return 0;
4926     +}
4927     /* 3.3.2 Initiation (INIT) (1)
4928     *
4929     * This chunk is used to initiate a SCTP association between two
4930     @@ -1131,6 +1154,24 @@ nodata:
4931     return retval;
4932     }
4933    
4934     +/* Create an Operation Error chunk of a fixed size,
4935     + * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
4936     + * This is a helper function to allocate an error chunk for
4937     + * for those invalid parameter codes in which we may not want
4938     + * to report all the errors, if the incomming chunk is large
4939     + */
4940     +static inline struct sctp_chunk *sctp_make_op_error_fixed(
4941     + const struct sctp_association *asoc,
4942     + const struct sctp_chunk *chunk)
4943     +{
4944     + size_t size = asoc ? asoc->pathmtu : 0;
4945     +
4946     + if (!size)
4947     + size = SCTP_DEFAULT_MAXSEGMENT;
4948     +
4949     + return sctp_make_op_error_space(asoc, chunk, size);
4950     +}
4951     +
4952     /* Create an Operation Error chunk. */
4953     struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
4954     const struct sctp_chunk *chunk,
4955     @@ -1373,6 +1414,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
4956     return target;
4957     }
4958    
4959     +/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
4960     + * space in the chunk
4961     + */
4962     +void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
4963     + int len, const void *data)
4964     +{
4965     + if (skb_tailroom(chunk->skb) >= len)
4966     + return sctp_addto_chunk(chunk, len, data);
4967     + else
4968     + return NULL;
4969     +}
4970     +
4971     /* Append bytes from user space to the end of a chunk. Will panic if
4972     * chunk is not big enough.
4973     * Returns a kernel err value.
4974     @@ -1976,13 +2029,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
4975     * returning multiple unknown parameters.
4976     */
4977     if (NULL == *errp)
4978     - *errp = sctp_make_op_error_space(asoc, chunk,
4979     - ntohs(chunk->chunk_hdr->length));
4980     + *errp = sctp_make_op_error_fixed(asoc, chunk);
4981    
4982     if (*errp) {
4983     - sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
4984     + sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
4985     WORD_ROUND(ntohs(param.p->length)));
4986     - sctp_addto_chunk(*errp,
4987     + sctp_addto_chunk_fixed(*errp,
4988     WORD_ROUND(ntohs(param.p->length)),
4989     param.v);
4990     } else {
4991     diff --git a/net/wireless/core.h b/net/wireless/core.h
4992     index 4ef3efc..35b7121 100644
4993     --- a/net/wireless/core.h
4994     +++ b/net/wireless/core.h
4995     @@ -378,6 +378,8 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev,
4996     struct wireless_dev *for_wdev,
4997     int freq, enum nl80211_channel_type channel_type);
4998    
4999     +u16 cfg80211_calculate_bitrate(struct rate_info *rate);
5000     +
5001     #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
5002     #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
5003     #else
5004     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5005     index a602843..7cb0d64 100644
5006     --- a/net/wireless/nl80211.c
5007     +++ b/net/wireless/nl80211.c
5008     @@ -1637,39 +1637,6 @@ static int parse_station_flags(struct genl_info *info,
5009     return 0;
5010     }
5011    
5012     -static u16 nl80211_calculate_bitrate(struct rate_info *rate)
5013     -{
5014     - int modulation, streams, bitrate;
5015     -
5016     - if (!(rate->flags & RATE_INFO_FLAGS_MCS))
5017     - return rate->legacy;
5018     -
5019     - /* the formula below does only work for MCS values smaller than 32 */
5020     - if (rate->mcs >= 32)
5021     - return 0;
5022     -
5023     - modulation = rate->mcs & 7;
5024     - streams = (rate->mcs >> 3) + 1;
5025     -
5026     - bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
5027     - 13500000 : 6500000;
5028     -
5029     - if (modulation < 4)
5030     - bitrate *= (modulation + 1);
5031     - else if (modulation == 4)
5032     - bitrate *= (modulation + 2);
5033     - else
5034     - bitrate *= (modulation + 3);
5035     -
5036     - bitrate *= streams;
5037     -
5038     - if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
5039     - bitrate = (bitrate / 9) * 10;
5040     -
5041     - /* do NOT round down here */
5042     - return (bitrate + 50000) / 100000;
5043     -}
5044     -
5045     static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
5046     int flags, struct net_device *dev,
5047     u8 *mac_addr, struct station_info *sinfo)
5048     @@ -1716,8 +1683,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
5049     if (!txrate)
5050     goto nla_put_failure;
5051    
5052     - /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */
5053     - bitrate = nl80211_calculate_bitrate(&sinfo->txrate);
5054     + /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
5055     + bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
5056     if (bitrate > 0)
5057     NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
5058    
5059     diff --git a/net/wireless/util.c b/net/wireless/util.c
5060     index 59361fd..a3c841a 100644
5061     --- a/net/wireless/util.c
5062     +++ b/net/wireless/util.c
5063     @@ -720,3 +720,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
5064    
5065     return err;
5066     }
5067     +
5068     +u16 cfg80211_calculate_bitrate(struct rate_info *rate)
5069     +{
5070     + int modulation, streams, bitrate;
5071     +
5072     + if (!(rate->flags & RATE_INFO_FLAGS_MCS))
5073     + return rate->legacy;
5074     +
5075     + /* the formula below does only work for MCS values smaller than 32 */
5076     + if (rate->mcs >= 32)
5077     + return 0;
5078     +
5079     + modulation = rate->mcs & 7;
5080     + streams = (rate->mcs >> 3) + 1;
5081     +
5082     + bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
5083     + 13500000 : 6500000;
5084     +
5085     + if (modulation < 4)
5086     + bitrate *= (modulation + 1);
5087     + else if (modulation == 4)
5088     + bitrate *= (modulation + 2);
5089     + else
5090     + bitrate *= (modulation + 3);
5091     +
5092     + bitrate *= streams;
5093     +
5094     + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
5095     + bitrate = (bitrate / 9) * 10;
5096     +
5097     + /* do NOT round down here */
5098     + return (bitrate + 50000) / 100000;
5099     +}
5100     diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
5101     index 54face3..4198243 100644
5102     --- a/net/wireless/wext-compat.c
5103     +++ b/net/wireless/wext-compat.c
5104     @@ -1257,10 +1257,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
5105     if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
5106     return -EOPNOTSUPP;
5107    
5108     - rate->value = 0;
5109     -
5110     - if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
5111     - rate->value = 100000 * sinfo.txrate.legacy;
5112     + rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
5113    
5114     return 0;
5115     }
5116     diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
5117     index 999e8a7..25a5183 100644
5118     --- a/scripts/kconfig/Makefile
5119     +++ b/scripts/kconfig/Makefile
5120     @@ -198,7 +198,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
5121     HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
5122     HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK
5123    
5124     -HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
5125     +HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl
5126     HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
5127     -D LKC_DIRECT_LINK
5128    
5129     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
5130     index 8ec0274..e031952 100644
5131     --- a/security/keys/keyring.c
5132     +++ b/security/keys/keyring.c
5133     @@ -524,9 +524,8 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
5134     struct key *keyring;
5135     int bucket;
5136    
5137     - keyring = ERR_PTR(-EINVAL);
5138     if (!name)
5139     - goto error;
5140     + return ERR_PTR(-EINVAL);
5141    
5142     bucket = keyring_hash(name);
5143    
5144     @@ -553,17 +552,18 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
5145     KEY_SEARCH) < 0)
5146     continue;
5147    
5148     - /* we've got a match */
5149     - atomic_inc(&keyring->usage);
5150     - read_unlock(&keyring_name_lock);
5151     - goto error;
5152     + /* we've got a match but we might end up racing with
5153     + * key_cleanup() if the keyring is currently 'dead'
5154     + * (ie. it has a zero usage count) */
5155     + if (!atomic_inc_not_zero(&keyring->usage))
5156     + continue;
5157     + goto out;
5158     }
5159     }
5160    
5161     - read_unlock(&keyring_name_lock);
5162     keyring = ERR_PTR(-ENOKEY);
5163     -
5164     - error:
5165     +out:
5166     + read_unlock(&keyring_name_lock);
5167     return keyring;
5168    
5169     } /* end find_keyring_by_name() */
5170     diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
5171     index 5c23afb..931cfda 100644
5172     --- a/security/keys/process_keys.c
5173     +++ b/security/keys/process_keys.c
5174     @@ -509,7 +509,7 @@ try_again:
5175    
5176     ret = install_thread_keyring();
5177     if (ret < 0) {
5178     - key = ERR_PTR(ret);
5179     + key_ref = ERR_PTR(ret);
5180     goto error;
5181     }
5182     goto reget_creds;
5183     @@ -527,7 +527,7 @@ try_again:
5184    
5185     ret = install_process_keyring();
5186     if (ret < 0) {
5187     - key = ERR_PTR(ret);
5188     + key_ref = ERR_PTR(ret);
5189     goto error;
5190     }
5191     goto reget_creds;
5192     @@ -586,7 +586,7 @@ try_again:
5193    
5194     case KEY_SPEC_GROUP_KEYRING:
5195     /* group keyrings are not yet supported */
5196     - key = ERR_PTR(-EINVAL);
5197     + key_ref = ERR_PTR(-EINVAL);
5198     goto error;
5199    
5200     case KEY_SPEC_REQKEY_AUTH_KEY:
5201     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5202     index a747871..0dc3418 100644
5203     --- a/sound/pci/hda/hda_intel.c
5204     +++ b/sound/pci/hda/hda_intel.c
5205     @@ -2263,16 +2263,23 @@ static int azx_dev_free(struct snd_device *device)
5206     * white/black-listing for position_fix
5207     */
5208     static struct snd_pci_quirk position_fix_list[] __devinitdata = {
5209     + SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
5210     SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
5211     SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
5212     SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
5213     SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
5214     - SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
5215     SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
5216     + SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
5217     + SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
5218     + SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
5219     + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
5220     + SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
5221     + SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
5222     SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
5223     SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
5224     SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
5225     SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
5226     + SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
5227     SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
5228     SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
5229     {}
5230     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5231     index abfc558..187a467 100644
5232     --- a/sound/pci/hda/patch_realtek.c
5233     +++ b/sound/pci/hda/patch_realtek.c
5234     @@ -9116,6 +9116,7 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
5235     SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
5236     SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
5237     SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
5238     + SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31),
5239     SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
5240     SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
5241     SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
5242     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5243     index cb474c0..256ad3a 100644
5244     --- a/sound/pci/hda/patch_sigmatel.c
5245     +++ b/sound/pci/hda/patch_sigmatel.c
5246     @@ -2070,12 +2070,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
5247     SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
5248     "Intel D965", STAC_D965_3ST),
5249     /* Dell 3 stack systems */
5250     - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
5251     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
5252     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
5253     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
5254     /* Dell 3 stack systems with verb table in BIOS */
5255     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
5256     + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
5257     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
5258     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
5259     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
5260     diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
5261     index 718ef91..b1be92a 100644
5262     --- a/sound/soc/codecs/wm8350.c
5263     +++ b/sound/soc/codecs/wm8350.c
5264     @@ -423,8 +423,8 @@ static const struct soc_enum wm8350_enum[] = {
5265     SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
5266     };
5267    
5268     -static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525);
5269     -static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600);
5270     +static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
5271     +static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
5272     static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
5273     static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
5274     static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
5275     diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
5276     index b432f4d..6390ffe 100644
5277     --- a/sound/soc/codecs/wm8400.c
5278     +++ b/sound/soc/codecs/wm8400.c
5279     @@ -106,21 +106,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec)
5280     wm8400_reset_codec_reg_cache(wm8400->wm8400);
5281     }
5282    
5283     -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
5284     +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
5285    
5286     -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
5287     +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
5288    
5289     -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0);
5290     +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);
5291    
5292     -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
5293     +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
5294    
5295     -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
5296     +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
5297    
5298     -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
5299     +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
5300    
5301     -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
5302     +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
5303    
5304     -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
5305     +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
5306    
5307     static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
5308     struct snd_ctl_elem_value *ucontrol)
5309     @@ -439,7 +439,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
5310     /* INMIX dB values */
5311     static const unsigned int in_mix_tlv[] = {
5312     TLV_DB_RANGE_HEAD(1),
5313     - 0,7, TLV_DB_LINEAR_ITEM(-1200, 600),
5314     + 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
5315     };
5316    
5317     /* Left In PGA Connections */
5318     diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
5319     index 341481e..2f76f4d 100644
5320     --- a/sound/soc/codecs/wm8990.c
5321     +++ b/sound/soc/codecs/wm8990.c
5322     @@ -110,21 +110,21 @@ static const u16 wm8990_reg[] = {
5323    
5324     #define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
5325    
5326     -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
5327     +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
5328    
5329     -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
5330     +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
5331    
5332     -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
5333     +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
5334    
5335     -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
5336     +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
5337    
5338     -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
5339     +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
5340    
5341     -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
5342     +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
5343    
5344     -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
5345     +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
5346    
5347     -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
5348     +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
5349    
5350     static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
5351     struct snd_ctl_elem_value *ucontrol)
5352     @@ -450,7 +450,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
5353     /* INMIX dB values */
5354     static const unsigned int in_mix_tlv[] = {
5355     TLV_DB_RANGE_HEAD(1),
5356     - 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
5357     + 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
5358     };
5359    
5360     /* Left In PGA Connections */