Magellan Linux

Contents of /trunk/kernel-alx/patches-3.4/0133-3.4.34-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2110 - (show annotations) (download)
Tue Mar 12 12:15:23 2013 UTC (11 years, 1 month ago) by niro
File size: 116201 byte(s)
-sync with upstream
1 diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
2 index 27d186a..6bbf936 100644
3 --- a/arch/arm/kernel/sched_clock.c
4 +++ b/arch/arm/kernel/sched_clock.c
5 @@ -84,11 +84,11 @@ static void notrace update_sched_clock(void)
6 * detectable in cyc_to_fixed_sched_clock().
7 */
8 raw_local_irq_save(flags);
9 - cd.epoch_cyc = cyc;
10 + cd.epoch_cyc_copy = cyc;
11 smp_wmb();
12 cd.epoch_ns = ns;
13 smp_wmb();
14 - cd.epoch_cyc_copy = cyc;
15 + cd.epoch_cyc = cyc;
16 raw_local_irq_restore(flags);
17 }
18
19 diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
20 index b7de471..b802f28 100644
21 --- a/arch/arm/mach-pxa/include/mach/smemc.h
22 +++ b/arch/arm/mach-pxa/include/mach/smemc.h
23 @@ -37,6 +37,7 @@
24 #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */
25 #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */
26 #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */
27 +#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */
28
29 /*
30 * More handy macros for PCMCIA
31 diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
32 index 7992305..f38aa89 100644
33 --- a/arch/arm/mach-pxa/smemc.c
34 +++ b/arch/arm/mach-pxa/smemc.c
35 @@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
36 __raw_writel(csadrcfg[1], CSADRCFG1);
37 __raw_writel(csadrcfg[2], CSADRCFG2);
38 __raw_writel(csadrcfg[3], CSADRCFG3);
39 + /* CSMSADRCFG wakes up in its default state (0), so we need to set it */
40 + __raw_writel(0x2, CSMSADRCFG);
41 }
42
43 static struct syscore_ops smemc_syscore_ops = {
44 @@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
45
46 static int __init smemc_init(void)
47 {
48 - if (cpu_is_pxa3xx())
49 + if (cpu_is_pxa3xx()) {
50 + /*
51 + * The only documentation we have on the
52 + * Chip Select Configuration Register (CSMSADRCFG) is that
53 + * it must be programmed to 0x2.
54 + * Moreover, in the bit definitions, the second bit
55 + * (CSMSADRCFG[1]) is called "SETALWAYS".
56 + * Other bits are reserved in this register.
57 + */
58 + __raw_writel(0x2, CSMSADRCFG);
59 +
60 register_syscore_ops(&smemc_syscore_ops);
61 + }
62
63 return 0;
64 }
65 diff --git a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
66 index 4135de8..13ed33c 100644
67 --- a/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
68 +++ b/arch/arm/mach-s3c24xx/include/mach/debug-macro.S
69 @@ -40,17 +40,17 @@
70 addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
71 addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
72 bic \rd, \rd, #0xff000
73 - ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
74 + ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
75 and \rd, \rd, #0x00ff0000
76 teq \rd, #0x00440000 @ is it 2440?
77 1004:
78 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
79 + ldr \rd, [\rx, # S3C2410_UFSTAT]
80 moveq \rd, \rd, lsr #SHIFT_2440TXF
81 tst \rd, #S3C2410_UFSTAT_TXFULL
82 .endm
83
84 .macro fifo_full_s3c2410 rd, rx
85 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
86 + ldr \rd, [\rx, # S3C2410_UFSTAT]
87 tst \rd, #S3C2410_UFSTAT_TXFULL
88 .endm
89
90 @@ -68,18 +68,18 @@
91 addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
92 addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
93 bic \rd, \rd, #0xff000
94 - ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
95 + ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
96 and \rd, \rd, #0x00ff0000
97 teq \rd, #0x00440000 @ is it 2440?
98
99 10000:
100 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
101 + ldr \rd, [\rx, # S3C2410_UFSTAT]
102 andne \rd, \rd, #S3C2410_UFSTAT_TXMASK
103 andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK
104 .endm
105
106 .macro fifo_level_s3c2410 rd, rx
107 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
108 + ldr \rd, [\rx, # S3C2410_UFSTAT]
109 and \rd, \rd, #S3C2410_UFSTAT_TXMASK
110 .endm
111
112 diff --git a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
113 index 7615a14..6a21bee 100644
114 --- a/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
115 +++ b/arch/arm/mach-s3c24xx/include/mach/entry-macro.S
116 @@ -31,10 +31,10 @@
117
118 @@ try the interrupt offset register, since it is there
119
120 - ldr \irqstat, [ \base, #INTPND ]
121 + ldr \irqstat, [\base, #INTPND ]
122 teq \irqstat, #0
123 beq 1002f
124 - ldr \irqnr, [ \base, #INTOFFSET ]
125 + ldr \irqnr, [\base, #INTOFFSET ]
126 mov \tmp, #1
127 tst \irqstat, \tmp, lsl \irqnr
128 bne 1001f
129 diff --git a/arch/arm/mach-s3c24xx/pm-h1940.S b/arch/arm/mach-s3c24xx/pm-h1940.S
130 index c93bf2d..6183a68 100644
131 --- a/arch/arm/mach-s3c24xx/pm-h1940.S
132 +++ b/arch/arm/mach-s3c24xx/pm-h1940.S
133 @@ -30,4 +30,4 @@
134
135 h1940_pm_return:
136 mov r0, #S3C2410_PA_GPIO
137 - ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ]
138 + ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO]
139 diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
140 index dd5b638..65200ae 100644
141 --- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
142 +++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
143 @@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend)
144 ldr r4, =S3C2410_REFRESH
145 ldr r5, =S3C24XX_MISCCR
146 ldr r6, =S3C2410_CLKCON
147 - ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB)
148 - ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB)
149 - ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB)
150 + ldr r7, [r4] @ get REFRESH (and ensure in TLB)
151 + ldr r8, [r5] @ get MISCCR (and ensure in TLB)
152 + ldr r9, [r6] @ get CLKCON (and ensure in TLB)
153
154 orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
155 orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
156 @@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend)
157 @@ align next bit of code to cache line
158 .align 5
159 s3c2410_do_sleep:
160 - streq r7, [ r4 ] @ SDRAM sleep command
161 - streq r8, [ r5 ] @ SDRAM power-down config
162 - streq r9, [ r6 ] @ CPU sleep
163 + streq r7, [r4] @ SDRAM sleep command
164 + streq r8, [r5] @ SDRAM power-down config
165 + streq r9, [r6] @ CPU sleep
166 1: beq 1b
167 mov pc, r14
168 diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
169 index c82418e..5adaceb 100644
170 --- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
171 +++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
172 @@ -57,12 +57,12 @@ s3c2412_sleep_enter1:
173 * retry, as simply returning causes the system to lock.
174 */
175
176 - ldrne r9, [ r1 ]
177 - strne r9, [ r1 ]
178 - ldrne r9, [ r2 ]
179 - strne r9, [ r2 ]
180 - ldrne r9, [ r3 ]
181 - strne r9, [ r3 ]
182 + ldrne r9, [r1]
183 + strne r9, [r1]
184 + ldrne r9, [r2]
185 + strne r9, [r2]
186 + ldrne r9, [r3]
187 + strne r9, [r3]
188 bne s3c2412_sleep_enter1
189
190 mov pc, r14
191 diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
192 index 207e275..f3a9cff 100644
193 --- a/arch/arm/plat-samsung/include/plat/debug-macro.S
194 +++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
195 @@ -14,12 +14,12 @@
196 /* The S5PV210/S5PC110 implementations are as belows. */
197
198 .macro fifo_level_s5pv210 rd, rx
199 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
200 + ldr \rd, [\rx, # S3C2410_UFSTAT]
201 and \rd, \rd, #S5PV210_UFSTAT_TXMASK
202 .endm
203
204 .macro fifo_full_s5pv210 rd, rx
205 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
206 + ldr \rd, [\rx, # S3C2410_UFSTAT]
207 tst \rd, #S5PV210_UFSTAT_TXFULL
208 .endm
209
210 @@ -27,7 +27,7 @@
211 * most widely re-used */
212
213 .macro fifo_level_s3c2440 rd, rx
214 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
215 + ldr \rd, [\rx, # S3C2410_UFSTAT]
216 and \rd, \rd, #S3C2440_UFSTAT_TXMASK
217 .endm
218
219 @@ -36,7 +36,7 @@
220 #endif
221
222 .macro fifo_full_s3c2440 rd, rx
223 - ldr \rd, [ \rx, # S3C2410_UFSTAT ]
224 + ldr \rd, [\rx, # S3C2410_UFSTAT]
225 tst \rd, #S3C2440_UFSTAT_TXFULL
226 .endm
227
228 @@ -45,11 +45,11 @@
229 #endif
230
231 .macro senduart,rd,rx
232 - strb \rd, [\rx, # S3C2410_UTXH ]
233 + strb \rd, [\rx, # S3C2410_UTXH]
234 .endm
235
236 .macro busyuart, rd, rx
237 - ldr \rd, [ \rx, # S3C2410_UFCON ]
238 + ldr \rd, [\rx, # S3C2410_UFCON]
239 tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
240 beq 1001f @
241 @ FIFO enabled...
242 @@ -60,7 +60,7 @@
243
244 1001:
245 @ busy waiting for non fifo
246 - ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
247 + ldr \rd, [\rx, # S3C2410_UTRSTAT]
248 tst \rd, #S3C2410_UTRSTAT_TXFE
249 beq 1001b
250
251 @@ -68,7 +68,7 @@
252 .endm
253
254 .macro waituart,rd,rx
255 - ldr \rd, [ \rx, # S3C2410_UFCON ]
256 + ldr \rd, [\rx, # S3C2410_UFCON]
257 tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
258 beq 1001f @
259 @ FIFO enabled...
260 @@ -79,7 +79,7 @@
261 b 1002f
262 1001:
263 @ idle waiting for non fifo
264 - ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
265 + ldr \rd, [\rx, # S3C2410_UTRSTAT]
266 tst \rd, #S3C2410_UTRSTAT_TXFE
267 beq 1001b
268
269 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
270 index ee99f23..7df49fa 100644
271 --- a/arch/parisc/include/asm/pgtable.h
272 +++ b/arch/parisc/include/asm/pgtable.h
273 @@ -12,11 +12,10 @@
274
275 #include <linux/bitops.h>
276 #include <linux/spinlock.h>
277 +#include <linux/mm_types.h>
278 #include <asm/processor.h>
279 #include <asm/cache.h>
280
281 -struct vm_area_struct;
282 -
283 /*
284 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
285 * memory. For the return value to be meaningful, ADDR must be >=
286 @@ -40,7 +39,14 @@ struct vm_area_struct;
287 do{ \
288 *(pteptr) = (pteval); \
289 } while(0)
290 -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
291 +
292 +extern void purge_tlb_entries(struct mm_struct *, unsigned long);
293 +
294 +#define set_pte_at(mm, addr, ptep, pteval) \
295 + do { \
296 + set_pte(ptep, pteval); \
297 + purge_tlb_entries(mm, addr); \
298 + } while (0)
299
300 #endif /* !__ASSEMBLY__ */
301
302 @@ -466,6 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
303 old = pte_val(*ptep);
304 new = pte_val(pte_wrprotect(__pte (old)));
305 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
306 + purge_tlb_entries(mm, addr);
307 #else
308 pte_t old_pte = *ptep;
309 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
310 diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
311 index 9d18189..fa21463 100644
312 --- a/arch/parisc/kernel/cache.c
313 +++ b/arch/parisc/kernel/cache.c
314 @@ -420,6 +420,24 @@ void kunmap_parisc(void *addr)
315 EXPORT_SYMBOL(kunmap_parisc);
316 #endif
317
318 +void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
319 +{
320 + unsigned long flags;
321 +
322 + /* Note: purge_tlb_entries can be called at startup with
323 + no context. */
324 +
325 + /* Disable preemption while we play with %sr1. */
326 + preempt_disable();
327 + mtsp(mm->context, 1);
328 + purge_tlb_start(flags);
329 + pdtlb(addr);
330 + pitlb(addr);
331 + purge_tlb_end(flags);
332 + preempt_enable();
333 +}
334 +EXPORT_SYMBOL(purge_tlb_entries);
335 +
336 void __flush_tlb_range(unsigned long sid, unsigned long start,
337 unsigned long end)
338 {
339 diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
340 index d7f6090..39833e0 100644
341 --- a/arch/powerpc/kernel/machine_kexec_64.c
342 +++ b/arch/powerpc/kernel/machine_kexec_64.c
343 @@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0;
344 static void kexec_smp_down(void *arg)
345 {
346 local_irq_disable();
347 + hard_irq_disable();
348 +
349 mb(); /* make sure our irqs are disabled before we say they are */
350 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
351 while(kexec_all_irq_disabled == 0)
352 @@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void)
353 wake_offline_cpus();
354 smp_call_function(kexec_smp_down, NULL, /* wait */0);
355 local_irq_disable();
356 + hard_irq_disable();
357 +
358 mb(); /* make sure IRQs are disabled before we say they are */
359 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
360
361 @@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void)
362 if (ppc_md.kexec_cpu_down)
363 ppc_md.kexec_cpu_down(0, 0);
364 local_irq_disable();
365 + hard_irq_disable();
366 }
367
368 #endif /* SMP */
369 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
370 index 217ce44..e00accf 100644
371 --- a/arch/s390/kvm/kvm-s390.c
372 +++ b/arch/s390/kvm/kvm-s390.c
373 @@ -677,6 +677,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
374 } else
375 prefix = 0;
376
377 + /*
378 + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
379 + * copying in vcpu load/put. Lets update our copies before we save
380 + * it into the save area
381 + */
382 + save_fp_regs(&vcpu->arch.guest_fpregs);
383 + save_access_regs(vcpu->run->s.regs.acrs);
384 +
385 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
386 vcpu->arch.guest_fpregs.fprs, 128, prefix))
387 return -EFAULT;
388 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
389 index c9866b0..b1478f4 100644
390 --- a/arch/x86/Kconfig
391 +++ b/arch/x86/Kconfig
392 @@ -1243,10 +1243,6 @@ config HAVE_ARCH_BOOTMEM
393 def_bool y
394 depends on X86_32 && NUMA
395
396 -config HAVE_ARCH_ALLOC_REMAP
397 - def_bool y
398 - depends on X86_32 && NUMA
399 -
400 config ARCH_HAVE_MEMORY_PRESENT
401 def_bool y
402 depends on X86_32 && DISCONTIGMEM
403 diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
404 index 55728e1..5e0286f 100644
405 --- a/arch/x86/include/asm/mmzone_32.h
406 +++ b/arch/x86/include/asm/mmzone_32.h
407 @@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
408
409 #include <asm/numaq.h>
410
411 -extern void resume_map_numa_kva(pgd_t *pgd);
412 -
413 -#else /* !CONFIG_NUMA */
414 -
415 -static inline void resume_map_numa_kva(pgd_t *pgd) {}
416 -
417 #endif /* CONFIG_NUMA */
418
419 #ifdef CONFIG_DISCONTIGMEM
420 diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
421 index 0a630dd..646d192 100644
422 --- a/arch/x86/kernel/cpu/mshyperv.c
423 +++ b/arch/x86/kernel/cpu/mshyperv.c
424 @@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void)
425 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
426 ms_hyperv.features, ms_hyperv.hints);
427
428 - clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
429 + if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
430 + clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
431 }
432
433 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
434 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
435 index 19d3fa0..c1e8394 100644
436 --- a/arch/x86/mm/numa.c
437 +++ b/arch/x86/mm/numa.c
438 @@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
439 static void __init setup_node_data(int nid, u64 start, u64 end)
440 {
441 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
442 - bool remapped = false;
443 u64 nd_pa;
444 void *nd;
445 int tnid;
446 @@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
447 if (end && (end - start) < NODE_MIN_SIZE)
448 return;
449
450 - /* initialize remap allocator before aligning to ZONE_ALIGN */
451 - init_alloc_remap(nid, start, end);
452 -
453 start = roundup(start, ZONE_ALIGN);
454
455 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
456 nid, start, end);
457
458 /*
459 - * Allocate node data. Try remap allocator first, node-local
460 - * memory and then any node. Never allocate in DMA zone.
461 + * Allocate node data. Try node-local memory and then any node.
462 + * Never allocate in DMA zone.
463 */
464 - nd = alloc_remap(nid, nd_size);
465 - if (nd) {
466 - nd_pa = __pa(nd);
467 - remapped = true;
468 - } else {
469 - nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
470 - if (!nd_pa) {
471 - pr_err("Cannot find %zu bytes in node %d\n",
472 - nd_size, nid);
473 - return;
474 - }
475 - nd = __va(nd_pa);
476 + nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
477 + if (!nd_pa) {
478 + pr_err("Cannot find %zu bytes in node %d\n",
479 + nd_size, nid);
480 + return;
481 }
482 + nd = __va(nd_pa);
483
484 /* report and initialize */
485 - printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n",
486 - nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
487 + printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n",
488 + nd_pa, nd_pa + nd_size - 1);
489 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
490 - if (!remapped && tnid != nid)
491 + if (tnid != nid)
492 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
493
494 node_data[nid] = nd;
495 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
496 index 534255a..73a6d73 100644
497 --- a/arch/x86/mm/numa_32.c
498 +++ b/arch/x86/mm/numa_32.c
499 @@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
500
501 extern unsigned long highend_pfn, highstart_pfn;
502
503 -#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
504 -
505 -static void *node_remap_start_vaddr[MAX_NUMNODES];
506 -void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
507 -
508 -/*
509 - * Remap memory allocator
510 - */
511 -static unsigned long node_remap_start_pfn[MAX_NUMNODES];
512 -static void *node_remap_end_vaddr[MAX_NUMNODES];
513 -static void *node_remap_alloc_vaddr[MAX_NUMNODES];
514 -
515 -/**
516 - * alloc_remap - Allocate remapped memory
517 - * @nid: NUMA node to allocate memory from
518 - * @size: The size of allocation
519 - *
520 - * Allocate @size bytes from the remap area of NUMA node @nid. The
521 - * size of the remap area is predetermined by init_alloc_remap() and
522 - * only the callers considered there should call this function. For
523 - * more info, please read the comment on top of init_alloc_remap().
524 - *
525 - * The caller must be ready to handle allocation failure from this
526 - * function and fall back to regular memory allocator in such cases.
527 - *
528 - * CONTEXT:
529 - * Single CPU early boot context.
530 - *
531 - * RETURNS:
532 - * Pointer to the allocated memory on success, %NULL on failure.
533 - */
534 -void *alloc_remap(int nid, unsigned long size)
535 -{
536 - void *allocation = node_remap_alloc_vaddr[nid];
537 -
538 - size = ALIGN(size, L1_CACHE_BYTES);
539 -
540 - if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
541 - return NULL;
542 -
543 - node_remap_alloc_vaddr[nid] += size;
544 - memset(allocation, 0, size);
545 -
546 - return allocation;
547 -}
548 -
549 -#ifdef CONFIG_HIBERNATION
550 -/**
551 - * resume_map_numa_kva - add KVA mapping to the temporary page tables created
552 - * during resume from hibernation
553 - * @pgd_base - temporary resume page directory
554 - */
555 -void resume_map_numa_kva(pgd_t *pgd_base)
556 -{
557 - int node;
558 -
559 - for_each_online_node(node) {
560 - unsigned long start_va, start_pfn, nr_pages, pfn;
561 -
562 - start_va = (unsigned long)node_remap_start_vaddr[node];
563 - start_pfn = node_remap_start_pfn[node];
564 - nr_pages = (node_remap_end_vaddr[node] -
565 - node_remap_start_vaddr[node]) >> PAGE_SHIFT;
566 -
567 - printk(KERN_DEBUG "%s: node %d\n", __func__, node);
568 -
569 - for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) {
570 - unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
571 - pgd_t *pgd = pgd_base + pgd_index(vaddr);
572 - pud_t *pud = pud_offset(pgd, vaddr);
573 - pmd_t *pmd = pmd_offset(pud, vaddr);
574 -
575 - set_pmd(pmd, pfn_pmd(start_pfn + pfn,
576 - PAGE_KERNEL_LARGE_EXEC));
577 -
578 - printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
579 - __func__, vaddr, start_pfn + pfn);
580 - }
581 - }
582 -}
583 -#endif
584 -
585 -/**
586 - * init_alloc_remap - Initialize remap allocator for a NUMA node
587 - * @nid: NUMA node to initizlie remap allocator for
588 - *
589 - * NUMA nodes may end up without any lowmem. As allocating pgdat and
590 - * memmap on a different node with lowmem is inefficient, a special
591 - * remap allocator is implemented which can be used by alloc_remap().
592 - *
593 - * For each node, the amount of memory which will be necessary for
594 - * pgdat and memmap is calculated and two memory areas of the size are
595 - * allocated - one in the node and the other in lowmem; then, the area
596 - * in the node is remapped to the lowmem area.
597 - *
598 - * As pgdat and memmap must be allocated in lowmem anyway, this
599 - * doesn't waste lowmem address space; however, the actual lowmem
600 - * which gets remapped over is wasted. The amount shouldn't be
601 - * problematic on machines this feature will be used.
602 - *
603 - * Initialization failure isn't fatal. alloc_remap() is used
604 - * opportunistically and the callers will fall back to other memory
605 - * allocation mechanisms on failure.
606 - */
607 -void __init init_alloc_remap(int nid, u64 start, u64 end)
608 -{
609 - unsigned long start_pfn = start >> PAGE_SHIFT;
610 - unsigned long end_pfn = end >> PAGE_SHIFT;
611 - unsigned long size, pfn;
612 - u64 node_pa, remap_pa;
613 - void *remap_va;
614 -
615 - /*
616 - * The acpi/srat node info can show hot-add memroy zones where
617 - * memory could be added but not currently present.
618 - */
619 - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
620 - nid, start_pfn, end_pfn);
621 -
622 - /* calculate the necessary space aligned to large page size */
623 - size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
624 - size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
625 - size = ALIGN(size, LARGE_PAGE_BYTES);
626 -
627 - /* allocate node memory and the lowmem remap area */
628 - node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
629 - if (!node_pa) {
630 - pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
631 - size, nid);
632 - return;
633 - }
634 - memblock_reserve(node_pa, size);
635 -
636 - remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
637 - max_low_pfn << PAGE_SHIFT,
638 - size, LARGE_PAGE_BYTES);
639 - if (!remap_pa) {
640 - pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
641 - size, nid);
642 - memblock_free(node_pa, size);
643 - return;
644 - }
645 - memblock_reserve(remap_pa, size);
646 - remap_va = phys_to_virt(remap_pa);
647 -
648 - /* perform actual remap */
649 - for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
650 - set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
651 - (node_pa >> PAGE_SHIFT) + pfn,
652 - PAGE_KERNEL_LARGE);
653 -
654 - /* initialize remap allocator parameters */
655 - node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
656 - node_remap_start_vaddr[nid] = remap_va;
657 - node_remap_end_vaddr[nid] = remap_va + size;
658 - node_remap_alloc_vaddr[nid] = remap_va;
659 -
660 - printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
661 - nid, node_pa, node_pa + size, remap_va, remap_va + size);
662 -}
663 -
664 void __init initmem_init(void)
665 {
666 x86_numa_init();
667 diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h
668 index 7178c3a..ad86ec9 100644
669 --- a/arch/x86/mm/numa_internal.h
670 +++ b/arch/x86/mm/numa_internal.h
671 @@ -21,12 +21,6 @@ void __init numa_reset_distance(void);
672
673 void __init x86_numa_init(void);
674
675 -#ifdef CONFIG_X86_64
676 -static inline void init_alloc_remap(int nid, u64 start, u64 end) { }
677 -#else
678 -void __init init_alloc_remap(int nid, u64 start, u64 end);
679 -#endif
680 -
681 #ifdef CONFIG_NUMA_EMU
682 void __init numa_emulation(struct numa_meminfo *numa_meminfo,
683 int numa_dist_cnt);
684 diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
685 index 74202c1..7d28c88 100644
686 --- a/arch/x86/power/hibernate_32.c
687 +++ b/arch/x86/power/hibernate_32.c
688 @@ -129,8 +129,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
689 }
690 }
691
692 - resume_map_numa_kva(pgd_base);
693 -
694 return 0;
695 }
696
697 diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
698 index d69cc6c..67bc7ba 100644
699 --- a/arch/x86/xen/spinlock.c
700 +++ b/arch/x86/xen/spinlock.c
701 @@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
702 if (per_cpu(lock_spinners, cpu) == xl) {
703 ADD_STATS(released_slow_kicked, 1);
704 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
705 - break;
706 }
707 }
708 }
709 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
710 index 26a06b8..b850cec 100644
711 --- a/drivers/base/bus.c
712 +++ b/drivers/base/bus.c
713 @@ -294,7 +294,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
714 struct device *dev;
715 int error = 0;
716
717 - if (!bus)
718 + if (!bus || !bus->p)
719 return -EINVAL;
720
721 klist_iter_init_node(&bus->p->klist_devices, &i,
722 @@ -328,7 +328,7 @@ struct device *bus_find_device(struct bus_type *bus,
723 struct klist_iter i;
724 struct device *dev;
725
726 - if (!bus)
727 + if (!bus || !bus->p)
728 return NULL;
729
730 klist_iter_init_node(&bus->p->klist_devices, &i,
731 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
732 index 1b1cbb5..97fc774 100644
733 --- a/drivers/base/dd.c
734 +++ b/drivers/base/dd.c
735 @@ -160,6 +160,8 @@ static int deferred_probe_initcall(void)
736
737 driver_deferred_probe_enable = true;
738 driver_deferred_probe_trigger();
739 + /* Sort as many dependencies as possible before exiting initcalls */
740 + flush_workqueue(deferred_wq);
741 return 0;
742 }
743 late_initcall(deferred_probe_initcall);
744 diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
745 index 9dcf76a..31dd451 100644
746 --- a/drivers/block/sunvdc.c
747 +++ b/drivers/block/sunvdc.c
748 @@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
749 int op_len, err;
750 void *req_buf;
751
752 - if (!(((u64)1 << ((u64)op - 1)) & port->operations))
753 + if (!(((u64)1 << (u64)op) & port->operations))
754 return -EOPNOTSUPP;
755
756 switch (op) {
757 diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
758 index bc6f5fa..819dfda 100644
759 --- a/drivers/dca/dca-core.c
760 +++ b/drivers/dca/dca-core.c
761 @@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
762
763 raw_spin_lock_irqsave(&dca_lock, flags);
764
765 + if (list_empty(&dca_domains)) {
766 + raw_spin_unlock_irqrestore(&dca_lock, flags);
767 + return;
768 + }
769 +
770 list_del(&dca->node);
771
772 pci_rc = dca_pci_rc_from_dev(dev);
773 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
774 index 4fd363f..c61e672 100644
775 --- a/drivers/gpu/drm/drm_crtc.c
776 +++ b/drivers/gpu/drm/drm_crtc.c
777 @@ -2023,7 +2023,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
778
779 switch (bpp) {
780 case 8:
781 - fmt = DRM_FORMAT_RGB332;
782 + fmt = DRM_FORMAT_C8;
783 break;
784 case 16:
785 if (depth == 15)
786 @@ -3409,6 +3409,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
787 int *bpp)
788 {
789 switch (format) {
790 + case DRM_FORMAT_C8:
791 case DRM_FORMAT_RGB332:
792 case DRM_FORMAT_BGR233:
793 *depth = 8;
794 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
795 index 9d9835a..384edc6 100644
796 --- a/drivers/gpu/drm/drm_edid.c
797 +++ b/drivers/gpu/drm/drm_edid.c
798 @@ -1769,7 +1769,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
799 num_modes += add_cvt_modes(connector, edid);
800 num_modes += add_standard_modes(connector, edid);
801 num_modes += add_established_modes(connector, edid);
802 - num_modes += add_inferred_modes(connector, edid);
803 + if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
804 + num_modes += add_inferred_modes(connector, edid);
805 num_modes += add_cea_modes(connector, edid);
806
807 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
808 diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
809 index 37c9a52..767782a 100644
810 --- a/drivers/gpu/drm/drm_usb.c
811 +++ b/drivers/gpu/drm/drm_usb.c
812 @@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
813
814 usbdev = interface_to_usbdev(interface);
815 dev->usbdev = usbdev;
816 - dev->dev = &usbdev->dev;
817 + dev->dev = &interface->dev;
818
819 mutex_lock(&drm_global_mutex);
820
821 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
822 index 3c9b9c5..67f6db5 100644
823 --- a/drivers/gpu/drm/i915/intel_display.c
824 +++ b/drivers/gpu/drm/i915/intel_display.c
825 @@ -142,8 +142,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
826 .vco = { .min = 1400000, .max = 2800000 },
827 .n = { .min = 1, .max = 6 },
828 .m = { .min = 70, .max = 120 },
829 - .m1 = { .min = 10, .max = 22 },
830 - .m2 = { .min = 5, .max = 9 },
831 + .m1 = { .min = 8, .max = 18 },
832 + .m2 = { .min = 3, .max = 7 },
833 .p = { .min = 5, .max = 80 },
834 .p1 = { .min = 1, .max = 8 },
835 .p2 = { .dot_limit = 200000,
836 @@ -3303,6 +3303,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
837 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
838 int pipe = intel_crtc->pipe;
839 int plane = intel_crtc->plane;
840 + u32 pctl;
841
842 if (!intel_crtc->active)
843 return;
844 @@ -3318,6 +3319,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
845
846 intel_disable_plane(dev_priv, plane, pipe);
847 intel_disable_pipe(dev_priv, pipe);
848 +
849 + /* Disable pannel fitter if it is on this pipe. */
850 + pctl = I915_READ(PFIT_CONTROL);
851 + if ((pctl & PFIT_ENABLE) &&
852 + ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
853 + I915_WRITE(PFIT_CONTROL, 0);
854 +
855 intel_disable_pll(dev_priv, pipe);
856
857 intel_crtc->active = false;
858 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
859 index 15594a3..ebbfbd2 100644
860 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
861 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
862 @@ -258,8 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
863 radeon_crtc->enabled = true;
864 /* adjust pm to dpms changes BEFORE enabling crtcs */
865 radeon_pm_compute_clocks(rdev);
866 - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
867 - atombios_powergate_crtc(crtc, ATOM_DISABLE);
868 atombios_enable_crtc(crtc, ATOM_ENABLE);
869 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
870 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
871 @@ -277,8 +275,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
872 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
873 atombios_enable_crtc(crtc, ATOM_DISABLE);
874 radeon_crtc->enabled = false;
875 - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
876 - atombios_powergate_crtc(crtc, ATOM_ENABLE);
877 /* adjust pm to dpms changes AFTER disabling crtcs */
878 radeon_pm_compute_clocks(rdev);
879 break;
880 @@ -1670,6 +1666,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
881 int i;
882
883 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
884 + if (ASIC_IS_DCE6(rdev))
885 + atombios_powergate_crtc(crtc, ATOM_ENABLE);
886
887 for (i = 0; i < rdev->num_crtc; i++) {
888 if (rdev->mode_info.crtcs[i] &&
889 diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
890 index e760575..2b8c4fd 100644
891 --- a/drivers/gpu/drm/udl/udl_drv.h
892 +++ b/drivers/gpu/drm/udl/udl_drv.h
893 @@ -74,6 +74,8 @@ struct udl_framebuffer {
894 struct drm_framebuffer base;
895 struct udl_gem_object *obj;
896 bool active_16; /* active on the 16-bit channel */
897 + int x1, y1, x2, y2; /* dirty rect */
898 + spinlock_t dirty_lock;
899 };
900
901 #define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
902 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
903 index b9282cf..f02d223 100644
904 --- a/drivers/gpu/drm/udl/udl_fb.c
905 +++ b/drivers/gpu/drm/udl/udl_fb.c
906 @@ -22,9 +22,9 @@
907
908 #include "drm_fb_helper.h"
909
910 -#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
911 +#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
912
913 -static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */
914 +static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
915 static int fb_bpp = 16;
916
917 module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
918 @@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
919 struct urb *urb;
920 int aligned_x;
921 int bpp = (fb->base.bits_per_pixel / 8);
922 + int x2, y2;
923 + bool store_for_later = false;
924 + unsigned long flags;
925
926 if (!fb->active_16)
927 return 0;
928 @@ -160,8 +163,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
929 if (!fb->obj->vmapping)
930 udl_gem_vmap(fb->obj);
931
932 - start_cycles = get_cycles();
933 -
934 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
935 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
936 x = aligned_x;
937 @@ -171,19 +172,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
938 (y + height > fb->base.height))
939 return -EINVAL;
940
941 + /* if we are in atomic just store the info
942 + can't test inside spin lock */
943 + if (in_atomic())
944 + store_for_later = true;
945 +
946 + x2 = x + width - 1;
947 + y2 = y + height - 1;
948 +
949 + spin_lock_irqsave(&fb->dirty_lock, flags);
950 +
951 + if (fb->y1 < y)
952 + y = fb->y1;
953 + if (fb->y2 > y2)
954 + y2 = fb->y2;
955 + if (fb->x1 < x)
956 + x = fb->x1;
957 + if (fb->x2 > x2)
958 + x2 = fb->x2;
959 +
960 + if (store_for_later) {
961 + fb->x1 = x;
962 + fb->x2 = x2;
963 + fb->y1 = y;
964 + fb->y2 = y2;
965 + spin_unlock_irqrestore(&fb->dirty_lock, flags);
966 + return 0;
967 + }
968 +
969 + fb->x1 = fb->y1 = INT_MAX;
970 + fb->x2 = fb->y2 = 0;
971 +
972 + spin_unlock_irqrestore(&fb->dirty_lock, flags);
973 + start_cycles = get_cycles();
974 +
975 urb = udl_get_urb(dev);
976 if (!urb)
977 return 0;
978 cmd = urb->transfer_buffer;
979
980 - for (i = y; i < y + height ; i++) {
981 + for (i = y; i <= y2 ; i++) {
982 const int line_offset = fb->base.pitches[0] * i;
983 const int byte_offset = line_offset + (x * bpp);
984 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
985 if (udl_render_hline(dev, bpp, &urb,
986 (char *) fb->obj->vmapping,
987 &cmd, byte_offset, dev_byte_offset,
988 - width * bpp,
989 + (x2 - x + 1) * bpp,
990 &bytes_identical, &bytes_sent))
991 goto error;
992 }
993 @@ -408,6 +443,7 @@ udl_framebuffer_init(struct drm_device *dev,
994 {
995 int ret;
996
997 + spin_lock_init(&ufb->dirty_lock);
998 ufb->obj = obj;
999 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
1000 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
1001 diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
1002 index aa95870..9e57285 100644
1003 --- a/drivers/hid/hid-wiimote-ext.c
1004 +++ b/drivers/hid/hid-wiimote-ext.c
1005 @@ -378,14 +378,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
1006
1007 if (ext->motionp) {
1008 input_report_key(ext->input,
1009 - wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
1010 + wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
1011 input_report_key(ext->input,
1012 - wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
1013 + wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
1014 } else {
1015 input_report_key(ext->input,
1016 - wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
1017 + wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
1018 input_report_key(ext->input,
1019 - wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
1020 + wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
1021 }
1022
1023 input_sync(ext->input);
1024 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1025 index 5d71873..1711924 100644
1026 --- a/drivers/iommu/intel-iommu.c
1027 +++ b/drivers/iommu/intel-iommu.c
1028 @@ -4212,13 +4212,19 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
1029 {
1030 /*
1031 * Mobile 4 Series Chipset neglects to set RWBF capability,
1032 - * but needs it:
1033 + * but needs it. Same seems to hold for the desktop versions.
1034 */
1035 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
1036 rwbf_quirk = 1;
1037 }
1038
1039 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
1040 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
1041 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
1042 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
1043 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
1044 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
1045 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
1046
1047 #define GGC 0x52
1048 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
1049 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
1050 index 8abdaf6..be46052 100644
1051 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
1052 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
1053 @@ -232,15 +232,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
1054
1055 static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
1056 {
1057 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1058 + struct pltfm_imx_data *imx_data = pltfm_host->priv;
1059 +
1060 if (unlikely(reg == SDHCI_HOST_VERSION)) {
1061 - u16 val = readw(host->ioaddr + (reg ^ 2));
1062 - /*
1063 - * uSDHC supports SDHCI v3.0, but it's encoded as value
1064 - * 0x3 in host controller version register, which violates
1065 - * SDHCI_SPEC_300 definition. Work it around here.
1066 - */
1067 - if ((val & SDHCI_SPEC_VER_MASK) == 3)
1068 - return --val;
1069 + reg ^= 2;
1070 + if (is_imx6q_usdhc(imx_data)) {
1071 + /*
1072 + * The usdhc register returns a wrong host version.
1073 + * Correct it here.
1074 + */
1075 + return SDHCI_SPEC_300;
1076 + }
1077 }
1078
1079 return readw(host->ioaddr + reg);
1080 diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
1081 index 315b96e..9fdd198 100644
1082 --- a/drivers/net/wireless/b43/dma.h
1083 +++ b/drivers/net/wireless/b43/dma.h
1084 @@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
1085
1086 /* DMA engine tuning knobs */
1087 #define B43_TXRING_SLOTS 256
1088 -#define B43_RXRING_SLOTS 64
1089 +#define B43_RXRING_SLOTS 256
1090 #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
1091 #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
1092
1093 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
1094 index be20cf7..af30777 100644
1095 --- a/drivers/net/wireless/p54/p54usb.c
1096 +++ b/drivers/net/wireless/p54/p54usb.c
1097 @@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
1098 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
1099 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
1100 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
1101 - {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
1102 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
1103 + {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
1104 {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
1105 {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
1106 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
1107 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1108 index 61e5768..8cf41bb 100644
1109 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1110 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1111 @@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
1112 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
1113 /* RTL8188CUS-VL */
1114 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
1115 + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
1116 /* 8188 Combo for BC4 */
1117 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
1118
1119 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
1120 index 17cd028..6ce8484 100644
1121 --- a/drivers/net/wireless/rtlwifi/usb.c
1122 +++ b/drivers/net/wireless/rtlwifi/usb.c
1123 @@ -42,8 +42,12 @@
1124
1125 static void usbctrl_async_callback(struct urb *urb)
1126 {
1127 - if (urb)
1128 - kfree(urb->context);
1129 + if (urb) {
1130 + /* free dr */
1131 + kfree(urb->setup_packet);
1132 + /* free databuf */
1133 + kfree(urb->transfer_buffer);
1134 + }
1135 }
1136
1137 static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
1138 @@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
1139 u8 reqtype;
1140 struct usb_ctrlrequest *dr;
1141 struct urb *urb;
1142 - struct rtl819x_async_write_data {
1143 - u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
1144 - struct usb_ctrlrequest dr;
1145 - } *buf;
1146 + const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
1147 + u8 *databuf;
1148 +
1149 + if (WARN_ON_ONCE(len > databuf_maxlen))
1150 + len = databuf_maxlen;
1151
1152 pipe = usb_sndctrlpipe(udev, 0); /* write_out */
1153 reqtype = REALTEK_USB_VENQT_WRITE;
1154
1155 - buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
1156 - if (!buf)
1157 + dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
1158 + if (!dr)
1159 return -ENOMEM;
1160
1161 + databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
1162 + if (!databuf) {
1163 + kfree(dr);
1164 + return -ENOMEM;
1165 + }
1166 +
1167 urb = usb_alloc_urb(0, GFP_ATOMIC);
1168 if (!urb) {
1169 - kfree(buf);
1170 + kfree(databuf);
1171 + kfree(dr);
1172 return -ENOMEM;
1173 }
1174
1175 - dr = &buf->dr;
1176 -
1177 dr->bRequestType = reqtype;
1178 dr->bRequest = request;
1179 dr->wValue = cpu_to_le16(value);
1180 dr->wIndex = cpu_to_le16(index);
1181 dr->wLength = cpu_to_le16(len);
1182 /* data are already in little-endian order */
1183 - memcpy(buf, pdata, len);
1184 + memcpy(databuf, pdata, len);
1185 usb_fill_control_urb(urb, udev, pipe,
1186 - (unsigned char *)dr, buf, len,
1187 - usbctrl_async_callback, buf);
1188 + (unsigned char *)dr, databuf, len,
1189 + usbctrl_async_callback, NULL);
1190 rc = usb_submit_urb(urb, GFP_ATOMIC);
1191 - if (rc < 0)
1192 - kfree(buf);
1193 + if (rc < 0) {
1194 + kfree(databuf);
1195 + kfree(dr);
1196 + }
1197 usb_free_urb(urb);
1198 return rc;
1199 }
1200 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1201 index b8c5193..221f426 100644
1202 --- a/drivers/net/xen-netback/interface.c
1203 +++ b/drivers/net/xen-netback/interface.c
1204 @@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
1205 static void xenvif_down(struct xenvif *vif)
1206 {
1207 disable_irq(vif->irq);
1208 + del_timer_sync(&vif->credit_timeout);
1209 xen_netbk_deschedule_xenvif(vif);
1210 xen_netbk_remove_xenvif(vif);
1211 }
1212 @@ -363,8 +364,6 @@ void xenvif_disconnect(struct xenvif *vif)
1213 atomic_dec(&vif->refcnt);
1214 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
1215
1216 - del_timer_sync(&vif->credit_timeout);
1217 -
1218 if (vif->irq)
1219 unbind_from_irqhandler(vif->irq, vif);
1220
1221 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1222 index e2793d0..2bdf798 100644
1223 --- a/drivers/net/xen-netback/netback.c
1224 +++ b/drivers/net/xen-netback/netback.c
1225 @@ -883,13 +883,13 @@ static int netbk_count_requests(struct xenvif *vif,
1226 if (frags >= work_to_do) {
1227 netdev_err(vif->dev, "Need more frags\n");
1228 netbk_fatal_tx_err(vif);
1229 - return -frags;
1230 + return -ENODATA;
1231 }
1232
1233 if (unlikely(frags >= MAX_SKB_FRAGS)) {
1234 netdev_err(vif->dev, "Too many frags\n");
1235 netbk_fatal_tx_err(vif);
1236 - return -frags;
1237 + return -E2BIG;
1238 }
1239
1240 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
1241 @@ -897,7 +897,7 @@ static int netbk_count_requests(struct xenvif *vif,
1242 if (txp->size > first->size) {
1243 netdev_err(vif->dev, "Frag is bigger than frame.\n");
1244 netbk_fatal_tx_err(vif);
1245 - return -frags;
1246 + return -EIO;
1247 }
1248
1249 first->size -= txp->size;
1250 @@ -907,7 +907,7 @@ static int netbk_count_requests(struct xenvif *vif,
1251 netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
1252 txp->offset, txp->size);
1253 netbk_fatal_tx_err(vif);
1254 - return -frags;
1255 + return -EINVAL;
1256 }
1257 } while ((txp++)->flags & XEN_NETTXF_more_data);
1258 return frags;
1259 diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
1260 index 86e4a1a..6bb02ab 100644
1261 --- a/drivers/pcmcia/vrc4171_card.c
1262 +++ b/drivers/pcmcia/vrc4171_card.c
1263 @@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
1264 socket = &vrc4171_sockets[slot];
1265 socket->csc_irq = search_nonuse_irq();
1266 socket->io_irq = search_nonuse_irq();
1267 + spin_lock_init(&socket->lock);
1268
1269 return 0;
1270 }
1271 diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
1272 index d74e9ae..f97b2aa 100644
1273 --- a/drivers/s390/kvm/kvm_virtio.c
1274 +++ b/drivers/s390/kvm/kvm_virtio.c
1275 @@ -418,6 +418,26 @@ static void kvm_extint_handler(struct ext_code ext_code,
1276 }
1277
1278 /*
1279 + * For s390-virtio, we expect a page above main storage containing
1280 + * the virtio configuration. Try to actually load from this area
1281 + * in order to figure out if the host provides this page.
1282 + */
1283 +static int __init test_devices_support(unsigned long addr)
1284 +{
1285 + int ret = -EIO;
1286 +
1287 + asm volatile(
1288 + "0: lura 0,%1\n"
1289 + "1: xgr %0,%0\n"
1290 + "2:\n"
1291 + EX_TABLE(0b,2b)
1292 + EX_TABLE(1b,2b)
1293 + : "+d" (ret)
1294 + : "a" (addr)
1295 + : "0", "cc");
1296 + return ret;
1297 +}
1298 +/*
1299 * Init function for virtio
1300 * devices are in a single page above top of "normal" mem
1301 */
1302 @@ -428,21 +448,23 @@ static int __init kvm_devices_init(void)
1303 if (!MACHINE_IS_KVM)
1304 return -ENODEV;
1305
1306 + if (test_devices_support(real_memory_size) < 0)
1307 + return -ENODEV;
1308 +
1309 + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
1310 + if (rc)
1311 + return rc;
1312 +
1313 + kvm_devices = (void *) real_memory_size;
1314 +
1315 kvm_root = root_device_register("kvm_s390");
1316 if (IS_ERR(kvm_root)) {
1317 rc = PTR_ERR(kvm_root);
1318 printk(KERN_ERR "Could not register kvm_s390 root device");
1319 + vmem_remove_mapping(real_memory_size, PAGE_SIZE);
1320 return rc;
1321 }
1322
1323 - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
1324 - if (rc) {
1325 - root_device_unregister(kvm_root);
1326 - return rc;
1327 - }
1328 -
1329 - kvm_devices = (void *) real_memory_size;
1330 -
1331 INIT_WORK(&hotplug_work, hotplug_devices);
1332
1333 service_subclass_irq_register();
1334 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
1335 index b67c107..cf67ce5 100644
1336 --- a/drivers/staging/comedi/comedi_fops.c
1337 +++ b/drivers/staging/comedi/comedi_fops.c
1338 @@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
1339 /* Device config is special, because it must work on
1340 * an unconfigured device. */
1341 if (cmd == COMEDI_DEVCONFIG) {
1342 + if (minor >= COMEDI_NUM_BOARD_MINORS) {
1343 + /* Device config not appropriate on non-board minors. */
1344 + rc = -ENOTTY;
1345 + goto done;
1346 + }
1347 rc = do_devconfig_ioctl(dev,
1348 (struct comedi_devconfig __user *)arg);
1349 if (rc == 0)
1350 diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
1351 index c612ab5..f759352 100644
1352 --- a/drivers/staging/vt6656/usbpipe.c
1353 +++ b/drivers/staging/vt6656/usbpipe.c
1354 @@ -168,6 +168,11 @@ int PIPEnsControlOut(
1355 if (pDevice->Flags & fMP_CONTROL_WRITES)
1356 return STATUS_FAILURE;
1357
1358 + if (pDevice->Flags & fMP_CONTROL_READS)
1359 + return STATUS_FAILURE;
1360 +
1361 + MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
1362 +
1363 pDevice->sUsbCtlRequest.bRequestType = 0x40;
1364 pDevice->sUsbCtlRequest.bRequest = byRequest;
1365 pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
1366 @@ -182,12 +187,13 @@ int PIPEnsControlOut(
1367
1368 ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
1369 if (ntStatus != 0) {
1370 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
1371 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
1372 + "control send request submission failed: %d\n",
1373 + ntStatus);
1374 + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES);
1375 return STATUS_FAILURE;
1376 }
1377 - else {
1378 - MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES);
1379 - }
1380 +
1381 spin_unlock_irq(&pDevice->lock);
1382 for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
1383
1384 @@ -227,6 +233,11 @@ int PIPEnsControlIn(
1385 if (pDevice->Flags & fMP_CONTROL_READS)
1386 return STATUS_FAILURE;
1387
1388 + if (pDevice->Flags & fMP_CONTROL_WRITES)
1389 + return STATUS_FAILURE;
1390 +
1391 + MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
1392 +
1393 pDevice->sUsbCtlRequest.bRequestType = 0xC0;
1394 pDevice->sUsbCtlRequest.bRequest = byRequest;
1395 pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue);
1396 @@ -240,10 +251,11 @@ int PIPEnsControlIn(
1397
1398 ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
1399 if (ntStatus != 0) {
1400 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
1401 - }else {
1402 - MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
1403 - }
1404 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
1405 + "control request submission failed: %d\n", ntStatus);
1406 + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS);
1407 + return STATUS_FAILURE;
1408 + }
1409
1410 spin_unlock_irq(&pDevice->lock);
1411 for (ii = 0; ii <= USB_CTL_WAIT; ii ++) {
1412 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1413 index 26c62f0..4ecf9d6 100644
1414 --- a/drivers/target/target_core_device.c
1415 +++ b/drivers/target/target_core_device.c
1416 @@ -1230,6 +1230,8 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1417
1418 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1419 {
1420 + int block_size = dev->se_sub_dev->se_dev_attrib.block_size;
1421 +
1422 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1423 pr_err("dev[%p]: Unable to change SE Device"
1424 " fabric_max_sectors while dev_export_obj: %d count exists\n",
1425 @@ -1267,8 +1269,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1426 /*
1427 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1428 */
1429 + if (!block_size) {
1430 + block_size = 512;
1431 + pr_warn("Defaulting to 512 for zero block_size\n");
1432 + }
1433 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1434 - dev->se_sub_dev->se_dev_attrib.block_size);
1435 + block_size);
1436
1437 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
1438 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1439 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1440 index 90dff82..4a418e4 100644
1441 --- a/drivers/tty/n_gsm.c
1442 +++ b/drivers/tty/n_gsm.c
1443 @@ -1692,6 +1692,8 @@ static inline void dlci_put(struct gsm_dlci *dlci)
1444 kref_put(&dlci->ref, gsm_dlci_free);
1445 }
1446
1447 +static void gsm_destroy_network(struct gsm_dlci *dlci);
1448 +
1449 /**
1450 * gsm_dlci_release - release DLCI
1451 * @dlci: DLCI to destroy
1452 @@ -1705,9 +1707,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
1453 {
1454 struct tty_struct *tty = tty_port_tty_get(&dlci->port);
1455 if (tty) {
1456 + mutex_lock(&dlci->mutex);
1457 + gsm_destroy_network(dlci);
1458 + mutex_unlock(&dlci->mutex);
1459 +
1460 + /* tty_vhangup needs the tty_lock, so unlock and
1461 + relock after doing the hangup. */
1462 + tty_unlock();
1463 tty_vhangup(tty);
1464 + tty_lock();
1465 + tty_port_tty_set(&dlci->port, NULL);
1466 tty_kref_put(tty);
1467 }
1468 + dlci->state = DLCI_CLOSED;
1469 dlci_put(dlci);
1470 }
1471
1472 @@ -2933,6 +2945,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
1473
1474 if (dlci == NULL)
1475 return;
1476 + if (dlci->state == DLCI_CLOSED)
1477 + return;
1478 mutex_lock(&dlci->mutex);
1479 gsm_destroy_network(dlci);
1480 mutex_unlock(&dlci->mutex);
1481 @@ -2951,6 +2965,8 @@ out:
1482 static void gsmtty_hangup(struct tty_struct *tty)
1483 {
1484 struct gsm_dlci *dlci = tty->driver_data;
1485 + if (dlci->state == DLCI_CLOSED)
1486 + return;
1487 tty_port_hangup(&dlci->port);
1488 gsm_dlci_begin_close(dlci);
1489 }
1490 @@ -2958,9 +2974,12 @@ static void gsmtty_hangup(struct tty_struct *tty)
1491 static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
1492 int len)
1493 {
1494 + int sent;
1495 struct gsm_dlci *dlci = tty->driver_data;
1496 + if (dlci->state == DLCI_CLOSED)
1497 + return -EINVAL;
1498 /* Stuff the bytes into the fifo queue */
1499 - int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
1500 + sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
1501 /* Need to kick the channel */
1502 gsm_dlci_data_kick(dlci);
1503 return sent;
1504 @@ -2969,18 +2988,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
1505 static int gsmtty_write_room(struct tty_struct *tty)
1506 {
1507 struct gsm_dlci *dlci = tty->driver_data;
1508 + if (dlci->state == DLCI_CLOSED)
1509 + return -EINVAL;
1510 return TX_SIZE - kfifo_len(dlci->fifo);
1511 }
1512
1513 static int gsmtty_chars_in_buffer(struct tty_struct *tty)
1514 {
1515 struct gsm_dlci *dlci = tty->driver_data;
1516 + if (dlci->state == DLCI_CLOSED)
1517 + return -EINVAL;
1518 return kfifo_len(dlci->fifo);
1519 }
1520
1521 static void gsmtty_flush_buffer(struct tty_struct *tty)
1522 {
1523 struct gsm_dlci *dlci = tty->driver_data;
1524 + if (dlci->state == DLCI_CLOSED)
1525 + return;
1526 /* Caution needed: If we implement reliable transport classes
1527 then the data being transmitted can't simply be junked once
1528 it has first hit the stack. Until then we can just blow it
1529 @@ -2999,6 +3024,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
1530 static int gsmtty_tiocmget(struct tty_struct *tty)
1531 {
1532 struct gsm_dlci *dlci = tty->driver_data;
1533 + if (dlci->state == DLCI_CLOSED)
1534 + return -EINVAL;
1535 return dlci->modem_rx;
1536 }
1537
1538 @@ -3008,6 +3035,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
1539 struct gsm_dlci *dlci = tty->driver_data;
1540 unsigned int modem_tx = dlci->modem_tx;
1541
1542 + if (dlci->state == DLCI_CLOSED)
1543 + return -EINVAL;
1544 modem_tx &= ~clear;
1545 modem_tx |= set;
1546
1547 @@ -3026,6 +3055,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
1548 struct gsm_netconfig nc;
1549 int index;
1550
1551 + if (dlci->state == DLCI_CLOSED)
1552 + return -EINVAL;
1553 switch (cmd) {
1554 case GSMIOC_ENABLE_NET:
1555 if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
1556 @@ -3052,6 +3083,9 @@ static int gsmtty_ioctl(struct tty_struct *tty,
1557
1558 static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
1559 {
1560 + struct gsm_dlci *dlci = tty->driver_data;
1561 + if (dlci->state == DLCI_CLOSED)
1562 + return;
1563 /* For the moment its fixed. In actual fact the speed information
1564 for the virtual channel can be propogated in both directions by
1565 the RPN control message. This however rapidly gets nasty as we
1566 @@ -3063,6 +3097,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
1567 static void gsmtty_throttle(struct tty_struct *tty)
1568 {
1569 struct gsm_dlci *dlci = tty->driver_data;
1570 + if (dlci->state == DLCI_CLOSED)
1571 + return;
1572 if (tty->termios->c_cflag & CRTSCTS)
1573 dlci->modem_tx &= ~TIOCM_DTR;
1574 dlci->throttled = 1;
1575 @@ -3073,6 +3109,8 @@ static void gsmtty_throttle(struct tty_struct *tty)
1576 static void gsmtty_unthrottle(struct tty_struct *tty)
1577 {
1578 struct gsm_dlci *dlci = tty->driver_data;
1579 + if (dlci->state == DLCI_CLOSED)
1580 + return;
1581 if (tty->termios->c_cflag & CRTSCTS)
1582 dlci->modem_tx |= TIOCM_DTR;
1583 dlci->throttled = 0;
1584 @@ -3084,6 +3122,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
1585 {
1586 struct gsm_dlci *dlci = tty->driver_data;
1587 int encode = 0; /* Off */
1588 + if (dlci->state == DLCI_CLOSED)
1589 + return -EINVAL;
1590
1591 if (state == -1) /* "On indefinitely" - we can't encode this
1592 properly */
1593 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
1594 index a1b9a2f..f8d03da 100644
1595 --- a/drivers/tty/tty_ioctl.c
1596 +++ b/drivers/tty/tty_ioctl.c
1597 @@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
1598 if (opt & TERMIOS_WAIT) {
1599 tty_wait_until_sent(tty, 0);
1600 if (signal_pending(current))
1601 - return -EINTR;
1602 + return -ERESTARTSYS;
1603 }
1604
1605 tty_set_termios(tty, &tmp_termios);
1606 @@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
1607 if (opt & TERMIOS_WAIT) {
1608 tty_wait_until_sent(tty, 0);
1609 if (signal_pending(current))
1610 - return -EINTR;
1611 + return -ERESTARTSYS;
1612 }
1613
1614 mutex_lock(&tty->termios_mutex);
1615 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1616 index 18d06be..268294c 100644
1617 --- a/drivers/tty/vt/vt.c
1618 +++ b/drivers/tty/vt/vt.c
1619 @@ -656,7 +656,7 @@ static inline void save_screen(struct vc_data *vc)
1620 * Redrawing of screen
1621 */
1622
1623 -static void clear_buffer_attributes(struct vc_data *vc)
1624 +void clear_buffer_attributes(struct vc_data *vc)
1625 {
1626 unsigned short *p = (unsigned short *)vc->vc_origin;
1627 int count = vc->vc_screenbuf_size / 2;
1628 @@ -3017,7 +3017,7 @@ int __init vty_init(const struct file_operations *console_fops)
1629
1630 static struct class *vtconsole_class;
1631
1632 -static int bind_con_driver(const struct consw *csw, int first, int last,
1633 +static int do_bind_con_driver(const struct consw *csw, int first, int last,
1634 int deflt)
1635 {
1636 struct module *owner = csw->owner;
1637 @@ -3028,7 +3028,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
1638 if (!try_module_get(owner))
1639 return -ENODEV;
1640
1641 - console_lock();
1642 + WARN_CONSOLE_UNLOCKED();
1643
1644 /* check if driver is registered */
1645 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
1646 @@ -3113,11 +3113,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
1647
1648 retval = 0;
1649 err:
1650 - console_unlock();
1651 module_put(owner);
1652 return retval;
1653 };
1654
1655 +
1656 +static int bind_con_driver(const struct consw *csw, int first, int last,
1657 + int deflt)
1658 +{
1659 + int ret;
1660 +
1661 + console_lock();
1662 + ret = do_bind_con_driver(csw, first, last, deflt);
1663 + console_unlock();
1664 + return ret;
1665 +}
1666 +
1667 #ifdef CONFIG_VT_HW_CONSOLE_BINDING
1668 static int con_is_graphics(const struct consw *csw, int first, int last)
1669 {
1670 @@ -3154,6 +3165,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
1671 */
1672 int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1673 {
1674 + int retval;
1675 +
1676 + console_lock();
1677 + retval = do_unbind_con_driver(csw, first, last, deflt);
1678 + console_unlock();
1679 + return retval;
1680 +}
1681 +EXPORT_SYMBOL(unbind_con_driver);
1682 +
1683 +/* unlocked version of unbind_con_driver() */
1684 +int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1685 +{
1686 struct module *owner = csw->owner;
1687 const struct consw *defcsw = NULL;
1688 struct con_driver *con_driver = NULL, *con_back = NULL;
1689 @@ -3162,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1690 if (!try_module_get(owner))
1691 return -ENODEV;
1692
1693 - console_lock();
1694 + WARN_CONSOLE_UNLOCKED();
1695
1696 /* check if driver is registered and if it is unbindable */
1697 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
1698 @@ -3175,10 +3198,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1699 }
1700 }
1701
1702 - if (retval) {
1703 - console_unlock();
1704 + if (retval)
1705 goto err;
1706 - }
1707
1708 retval = -ENODEV;
1709
1710 @@ -3194,15 +3215,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1711 }
1712 }
1713
1714 - if (retval) {
1715 - console_unlock();
1716 + if (retval)
1717 goto err;
1718 - }
1719
1720 - if (!con_is_bound(csw)) {
1721 - console_unlock();
1722 + if (!con_is_bound(csw))
1723 goto err;
1724 - }
1725
1726 first = max(first, con_driver->first);
1727 last = min(last, con_driver->last);
1728 @@ -3229,15 +3246,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
1729 if (!con_is_bound(csw))
1730 con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
1731
1732 - console_unlock();
1733 /* ignore return value, binding should not fail */
1734 - bind_con_driver(defcsw, first, last, deflt);
1735 + do_bind_con_driver(defcsw, first, last, deflt);
1736 err:
1737 module_put(owner);
1738 return retval;
1739
1740 }
1741 -EXPORT_SYMBOL(unbind_con_driver);
1742 +EXPORT_SYMBOL_GPL(do_unbind_con_driver);
1743
1744 static int vt_bind(struct con_driver *con)
1745 {
1746 @@ -3522,28 +3538,18 @@ int con_debug_leave(void)
1747 }
1748 EXPORT_SYMBOL_GPL(con_debug_leave);
1749
1750 -/**
1751 - * register_con_driver - register console driver to console layer
1752 - * @csw: console driver
1753 - * @first: the first console to take over, minimum value is 0
1754 - * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
1755 - *
1756 - * DESCRIPTION: This function registers a console driver which can later
1757 - * bind to a range of consoles specified by @first and @last. It will
1758 - * also initialize the console driver by calling con_startup().
1759 - */
1760 -int register_con_driver(const struct consw *csw, int first, int last)
1761 +static int do_register_con_driver(const struct consw *csw, int first, int last)
1762 {
1763 struct module *owner = csw->owner;
1764 struct con_driver *con_driver;
1765 const char *desc;
1766 int i, retval = 0;
1767
1768 + WARN_CONSOLE_UNLOCKED();
1769 +
1770 if (!try_module_get(owner))
1771 return -ENODEV;
1772
1773 - console_lock();
1774 -
1775 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
1776 con_driver = &registered_con_driver[i];
1777
1778 @@ -3596,10 +3602,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
1779 }
1780
1781 err:
1782 - console_unlock();
1783 module_put(owner);
1784 return retval;
1785 }
1786 +
1787 +/**
1788 + * register_con_driver - register console driver to console layer
1789 + * @csw: console driver
1790 + * @first: the first console to take over, minimum value is 0
1791 + * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
1792 + *
1793 + * DESCRIPTION: This function registers a console driver which can later
1794 + * bind to a range of consoles specified by @first and @last. It will
1795 + * also initialize the console driver by calling con_startup().
1796 + */
1797 +int register_con_driver(const struct consw *csw, int first, int last)
1798 +{
1799 + int retval;
1800 +
1801 + console_lock();
1802 + retval = do_register_con_driver(csw, first, last);
1803 + console_unlock();
1804 + return retval;
1805 +}
1806 EXPORT_SYMBOL(register_con_driver);
1807
1808 /**
1809 @@ -3615,9 +3640,18 @@ EXPORT_SYMBOL(register_con_driver);
1810 */
1811 int unregister_con_driver(const struct consw *csw)
1812 {
1813 - int i, retval = -ENODEV;
1814 + int retval;
1815
1816 console_lock();
1817 + retval = do_unregister_con_driver(csw);
1818 + console_unlock();
1819 + return retval;
1820 +}
1821 +EXPORT_SYMBOL(unregister_con_driver);
1822 +
1823 +int do_unregister_con_driver(const struct consw *csw)
1824 +{
1825 + int i, retval = -ENODEV;
1826
1827 /* cannot unregister a bound driver */
1828 if (con_is_bound(csw))
1829 @@ -3643,27 +3677,53 @@ int unregister_con_driver(const struct consw *csw)
1830 }
1831 }
1832 err:
1833 - console_unlock();
1834 return retval;
1835 }
1836 -EXPORT_SYMBOL(unregister_con_driver);
1837 +EXPORT_SYMBOL_GPL(do_unregister_con_driver);
1838
1839 /*
1840 * If we support more console drivers, this function is used
1841 * when a driver wants to take over some existing consoles
1842 * and become default driver for newly opened ones.
1843 *
1844 - * take_over_console is basically a register followed by unbind
1845 + * take_over_console is basically a register followed by unbind
1846 + */
1847 +int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
1848 +{
1849 + int err;
1850 +
1851 + err = do_register_con_driver(csw, first, last);
1852 + /*
1853 + * If we get an busy error we still want to bind the console driver
1854 + * and return success, as we may have unbound the console driver
1855 + * but not unregistered it.
1856 + */
1857 + if (err == -EBUSY)
1858 + err = 0;
1859 + if (!err)
1860 + do_bind_con_driver(csw, first, last, deflt);
1861 +
1862 + return err;
1863 +}
1864 +EXPORT_SYMBOL_GPL(do_take_over_console);
1865 +
1866 +/*
1867 + * If we support more console drivers, this function is used
1868 + * when a driver wants to take over some existing consoles
1869 + * and become default driver for newly opened ones.
1870 + *
1871 + * take_over_console is basically a register followed by unbind
1872 */
1873 int take_over_console(const struct consw *csw, int first, int last, int deflt)
1874 {
1875 int err;
1876
1877 err = register_con_driver(csw, first, last);
1878 - /* if we get an busy error we still want to bind the console driver
1879 + /*
1880 + * If we get an busy error we still want to bind the console driver
1881 * and return success, as we may have unbound the console driver
1882 -  * but not unregistered it.
1883 - */
1884 + * but not unregistered it.
1885 + */
1886 if (err == -EBUSY)
1887 err = 0;
1888 if (!err)
1889 diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
1890 index e669c6a..12d3f28 100644
1891 --- a/drivers/usb/host/ehci-omap.c
1892 +++ b/drivers/usb/host/ehci-omap.c
1893 @@ -371,7 +371,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
1894 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
1895 };
1896
1897 -MODULE_ALIAS("platform:omap-ehci");
1898 +MODULE_ALIAS("platform:ehci-omap");
1899 MODULE_AUTHOR("Texas Instruments, Inc.");
1900 MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
1901
1902 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1903 index 7746944..87ef150 100644
1904 --- a/drivers/usb/serial/ftdi_sio.c
1905 +++ b/drivers/usb/serial/ftdi_sio.c
1906 @@ -1919,24 +1919,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1907 {
1908 struct ftdi_private *priv = usb_get_serial_port_data(port);
1909
1910 - mutex_lock(&port->serial->disc_mutex);
1911 - if (!port->serial->disconnected) {
1912 - /* Disable flow control */
1913 - if (!on && usb_control_msg(port->serial->dev,
1914 + /* Disable flow control */
1915 + if (!on) {
1916 + if (usb_control_msg(port->serial->dev,
1917 usb_sndctrlpipe(port->serial->dev, 0),
1918 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
1919 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
1920 0, priv->interface, NULL, 0,
1921 WDR_TIMEOUT) < 0) {
1922 - dev_err(&port->dev, "error from flowcontrol urb\n");
1923 + dev_err(&port->dev, "error from flowcontrol urb\n");
1924 }
1925 - /* drop RTS and DTR */
1926 - if (on)
1927 - set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1928 - else
1929 - clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1930 }
1931 - mutex_unlock(&port->serial->disc_mutex);
1932 + /* drop RTS and DTR */
1933 + if (on)
1934 + set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1935 + else
1936 + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1937 }
1938
1939 /*
1940 diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
1941 index d0bf56d..933dd07 100644
1942 --- a/drivers/usb/serial/mct_u232.c
1943 +++ b/drivers/usb/serial/mct_u232.c
1944 @@ -514,19 +514,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
1945 unsigned int control_state;
1946 struct mct_u232_private *priv = usb_get_serial_port_data(port);
1947
1948 - mutex_lock(&port->serial->disc_mutex);
1949 - if (!port->serial->disconnected) {
1950 - /* drop DTR and RTS */
1951 - spin_lock_irq(&priv->lock);
1952 - if (on)
1953 - priv->control_state |= TIOCM_DTR | TIOCM_RTS;
1954 - else
1955 - priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
1956 - control_state = priv->control_state;
1957 - spin_unlock_irq(&priv->lock);
1958 - mct_u232_set_modem_ctrl(port->serial, control_state);
1959 - }
1960 - mutex_unlock(&port->serial->disc_mutex);
1961 + spin_lock_irq(&priv->lock);
1962 + if (on)
1963 + priv->control_state |= TIOCM_DTR | TIOCM_RTS;
1964 + else
1965 + priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
1966 + control_state = priv->control_state;
1967 + spin_unlock_irq(&priv->lock);
1968 +
1969 + mct_u232_set_modem_ctrl(port->serial, control_state);
1970 }
1971
1972 static void mct_u232_close(struct usb_serial_port *port)
1973 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1974 index 6c077a1..539247b 100644
1975 --- a/drivers/usb/serial/option.c
1976 +++ b/drivers/usb/serial/option.c
1977 @@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
1978
1979 static const struct option_blacklist_info alcatel_x200_blacklist = {
1980 .sendsetup = BIT(0) | BIT(1),
1981 + .reserved = BIT(4),
1982 };
1983
1984 static const struct option_blacklist_info zte_0037_blacklist = {
1985 @@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
1986 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
1987 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
1988 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
1989 + { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
1990 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1991 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
1992 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
1993 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
1994 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
1995 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
1996 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
1997 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
1998 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
1999 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
2000 @@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = {
2001 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
2002 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
2003 },
2004 - { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
2005 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
2006 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
2007 + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
2008 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
2009 + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
2010 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2011 + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
2012 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
2013 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
2014 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2015 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
2016 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
2017 index b622d69..8ec15c2 100644
2018 --- a/drivers/usb/serial/sierra.c
2019 +++ b/drivers/usb/serial/sierra.c
2020 @@ -890,19 +890,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
2021
2022 static void sierra_dtr_rts(struct usb_serial_port *port, int on)
2023 {
2024 - struct usb_serial *serial = port->serial;
2025 struct sierra_port_private *portdata;
2026
2027 portdata = usb_get_serial_port_data(port);
2028 portdata->rts_state = on;
2029 portdata->dtr_state = on;
2030
2031 - if (serial->dev) {
2032 - mutex_lock(&serial->disc_mutex);
2033 - if (!serial->disconnected)
2034 - sierra_send_setup(port);
2035 - mutex_unlock(&serial->disc_mutex);
2036 - }
2037 + sierra_send_setup(port);
2038 }
2039
2040 static int sierra_startup(struct usb_serial *serial)
2041 diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
2042 index 3cdc8a5..b8db69d 100644
2043 --- a/drivers/usb/serial/ssu100.c
2044 +++ b/drivers/usb/serial/ssu100.c
2045 @@ -532,19 +532,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
2046
2047 dbg("%s\n", __func__);
2048
2049 - mutex_lock(&port->serial->disc_mutex);
2050 - if (!port->serial->disconnected) {
2051 - /* Disable flow control */
2052 - if (!on &&
2053 - ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
2054 + /* Disable flow control */
2055 + if (!on) {
2056 + if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
2057 dev_err(&port->dev, "error from flowcontrol urb\n");
2058 - /* drop RTS and DTR */
2059 - if (on)
2060 - set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
2061 - else
2062 - clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
2063 }
2064 - mutex_unlock(&port->serial->disc_mutex);
2065 + /* drop RTS and DTR */
2066 + if (on)
2067 + set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
2068 + else
2069 + clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
2070 }
2071
2072 static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
2073 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
2074 index c627ba2..e4b199c 100644
2075 --- a/drivers/usb/serial/usb-serial.c
2076 +++ b/drivers/usb/serial/usb-serial.c
2077 @@ -699,10 +699,20 @@ static int serial_carrier_raised(struct tty_port *port)
2078 static void serial_dtr_rts(struct tty_port *port, int on)
2079 {
2080 struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
2081 - struct usb_serial_driver *drv = p->serial->type;
2082 + struct usb_serial *serial = p->serial;
2083 + struct usb_serial_driver *drv = serial->type;
2084
2085 - if (drv->dtr_rts)
2086 + if (!drv->dtr_rts)
2087 + return;
2088 + /*
2089 + * Work-around bug in the tty-layer which can result in dtr_rts
2090 + * being called after a disconnect (and tty_unregister_device
2091 + * has returned). Remove once bug has been squashed.
2092 + */
2093 + mutex_lock(&serial->disc_mutex);
2094 + if (!serial->disconnected)
2095 drv->dtr_rts(p, on);
2096 + mutex_unlock(&serial->disc_mutex);
2097 }
2098
2099 static const struct tty_port_operations serial_port_ops = {
2100 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
2101 index c88657d..820436e 100644
2102 --- a/drivers/usb/serial/usb_wwan.c
2103 +++ b/drivers/usb/serial/usb_wwan.c
2104 @@ -41,7 +41,6 @@ static bool debug;
2105
2106 void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
2107 {
2108 - struct usb_serial *serial = port->serial;
2109 struct usb_wwan_port_private *portdata;
2110
2111 struct usb_wwan_intf_private *intfdata;
2112 @@ -54,12 +53,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
2113 return;
2114
2115 portdata = usb_get_serial_port_data(port);
2116 - mutex_lock(&serial->disc_mutex);
2117 + /* FIXME: locking */
2118 portdata->rts_state = on;
2119 portdata->dtr_state = on;
2120 - if (serial->dev)
2121 - intfdata->send_setup(port);
2122 - mutex_unlock(&serial->disc_mutex);
2123 +
2124 + intfdata->send_setup(port);
2125 }
2126 EXPORT_SYMBOL(usb_wwan_dtr_rts);
2127
2128 diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
2129 index 16b0bf0..7ab9046 100644
2130 --- a/drivers/usb/storage/initializers.c
2131 +++ b/drivers/usb/storage/initializers.c
2132 @@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us)
2133 int idProduct;
2134
2135 idesc = &us->pusb_intf->cur_altsetting->desc;
2136 - idProduct = us->pusb_dev->descriptor.idProduct;
2137 + idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
2138 /* The first port is CDROM,
2139 * means the dongle in the single port mode,
2140 * and a switch command is required to be sent. */
2141 @@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us)
2142 int result = 0;
2143
2144 if (usb_stor_huawei_dongles_pid(us)) {
2145 - if (us->pusb_dev->descriptor.idProduct >= 0x1446)
2146 + if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
2147 result = usb_stor_huawei_scsi_init(us);
2148 else
2149 result = usb_stor_huawei_feature_init(us);
2150 diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
2151 index 2c85530..65a6a75 100644
2152 --- a/drivers/usb/storage/unusual_cypress.h
2153 +++ b/drivers/usb/storage/unusual_cypress.h
2154 @@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
2155 "Cypress ISD-300LP",
2156 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
2157
2158 -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
2159 +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
2160 "Super Top",
2161 "USB 2.0 SATA BRIDGE",
2162 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
2163 diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
2164 index 550dbf0..feda482 100644
2165 --- a/drivers/video/backlight/adp8860_bl.c
2166 +++ b/drivers/video/backlight/adp8860_bl.c
2167 @@ -791,7 +791,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
2168
2169 static int adp8860_i2c_resume(struct i2c_client *client)
2170 {
2171 - adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
2172 + adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
2173
2174 return 0;
2175 }
2176 diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
2177 index 9be58c6..c7a2c35 100644
2178 --- a/drivers/video/backlight/adp8870_bl.c
2179 +++ b/drivers/video/backlight/adp8870_bl.c
2180 @@ -965,7 +965,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
2181
2182 static int adp8870_i2c_resume(struct i2c_client *client)
2183 {
2184 - adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
2185 + adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
2186
2187 return 0;
2188 }
2189 diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
2190 index 88e9204..5bf163e 100644
2191 --- a/drivers/video/console/fbcon.c
2192 +++ b/drivers/video/console/fbcon.c
2193 @@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
2194 return retval;
2195 }
2196
2197 +static int do_fbcon_takeover(int show_logo)
2198 +{
2199 + int err, i;
2200 +
2201 + if (!num_registered_fb)
2202 + return -ENODEV;
2203 +
2204 + if (!show_logo)
2205 + logo_shown = FBCON_LOGO_DONTSHOW;
2206 +
2207 + for (i = first_fb_vc; i <= last_fb_vc; i++)
2208 + con2fb_map[i] = info_idx;
2209 +
2210 + err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
2211 + fbcon_is_default);
2212 +
2213 + if (err) {
2214 + for (i = first_fb_vc; i <= last_fb_vc; i++)
2215 + con2fb_map[i] = -1;
2216 + info_idx = -1;
2217 + } else {
2218 + fbcon_has_console_bind = 1;
2219 + }
2220 +
2221 + return err;
2222 +}
2223 +
2224 static int fbcon_takeover(int show_logo)
2225 {
2226 int err, i;
2227 @@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
2228 }
2229
2230 /* Setup default font */
2231 - if (!p->fontdata) {
2232 + if (!p->fontdata && !vc->vc_font.data) {
2233 if (!fontname[0] || !(font = find_font(fontname)))
2234 font = get_default_font(info->var.xres,
2235 info->var.yres,
2236 @@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
2237 vc->vc_font.height = font->height;
2238 vc->vc_font.data = (void *)(p->fontdata = font->data);
2239 vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
2240 + } else {
2241 + p->fontdata = vc->vc_font.data;
2242 }
2243
2244 cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
2245 @@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
2246 ops->p = &fb_display[fg_console];
2247 }
2248
2249 -static void fbcon_free_font(struct display *p)
2250 +static void fbcon_free_font(struct display *p, bool freefont)
2251 {
2252 - if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
2253 + if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
2254 kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
2255 p->fontdata = NULL;
2256 p->userfont = 0;
2257 @@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
2258 struct fb_info *info;
2259 struct fbcon_ops *ops;
2260 int idx;
2261 + bool free_font = true;
2262
2263 - fbcon_free_font(p);
2264 idx = con2fb_map[vc->vc_num];
2265
2266 if (idx == -1)
2267 @@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
2268 if (!info)
2269 goto finished;
2270
2271 + if (info->flags & FBINFO_MISC_FIRMWARE)
2272 + free_font = false;
2273 ops = info->fbcon_par;
2274
2275 if (!ops)
2276 @@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
2277 ops->flags &= ~FBCON_FLAGS_INIT;
2278 finished:
2279
2280 + fbcon_free_font(p, free_font);
2281 +
2282 if (!con_is_bound(&fb_con))
2283 fbcon_exit();
2284
2285 @@ -2977,7 +3010,7 @@ static int fbcon_unbind(void)
2286 {
2287 int ret;
2288
2289 - ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
2290 + ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
2291 fbcon_is_default);
2292
2293 if (!ret)
2294 @@ -3050,7 +3083,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
2295 primary_device = -1;
2296
2297 if (!num_registered_fb)
2298 - unregister_con_driver(&fb_con);
2299 + do_unregister_con_driver(&fb_con);
2300
2301 return 0;
2302 }
2303 @@ -3115,7 +3148,7 @@ static int fbcon_fb_registered(struct fb_info *info)
2304 }
2305
2306 if (info_idx != -1)
2307 - ret = fbcon_takeover(1);
2308 + ret = do_fbcon_takeover(1);
2309 } else {
2310 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2311 if (con2fb_map_boot[i] == idx)
2312 diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
2313 index d449a74..5855d17 100644
2314 --- a/drivers/video/console/vgacon.c
2315 +++ b/drivers/video/console/vgacon.c
2316 @@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
2317 unsigned short video_port_status = vga_video_port_reg + 6;
2318 int font_select = 0x00, beg, i;
2319 char *charmap;
2320 -
2321 + bool clear_attribs = false;
2322 if (vga_video_type != VIDEO_TYPE_EGAM) {
2323 charmap = (char *) VGA_MAP_MEM(colourmap, 0);
2324 beg = 0x0e;
2325 @@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
2326
2327 /* if 512 char mode is already enabled don't re-enable it. */
2328 if ((set) && (ch512 != vga_512_chars)) {
2329 - /* attribute controller */
2330 - for (i = 0; i < MAX_NR_CONSOLES; i++) {
2331 - struct vc_data *c = vc_cons[i].d;
2332 - if (c && c->vc_sw == &vga_con)
2333 - c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
2334 - }
2335 vga_512_chars = ch512;
2336 /* 256-char: enable intensity bit
2337 512-char: disable intensity bit */
2338 @@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
2339 it means, but it works, and it appears necessary */
2340 inb_p(video_port_status);
2341 vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
2342 + clear_attribs = true;
2343 }
2344 raw_spin_unlock_irq(&vga_lock);
2345 +
2346 + if (clear_attribs) {
2347 + for (i = 0; i < MAX_NR_CONSOLES; i++) {
2348 + struct vc_data *c = vc_cons[i].d;
2349 + if (c && c->vc_sw == &vga_con) {
2350 + /* force hi font mask to 0, so we always clear
2351 + the bit on either transition */
2352 + c->vc_hi_font_mask = 0x00;
2353 + clear_buffer_attributes(c);
2354 + c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
2355 + }
2356 + }
2357 + }
2358 return 0;
2359 }
2360
2361 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
2362 index c6ce416..90f1315 100644
2363 --- a/drivers/video/fbmem.c
2364 +++ b/drivers/video/fbmem.c
2365 @@ -1642,7 +1642,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
2366 event.info = fb_info;
2367 if (!lock_fb_info(fb_info))
2368 return -ENODEV;
2369 + console_lock();
2370 fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
2371 + console_unlock();
2372 unlock_fb_info(fb_info);
2373 return 0;
2374 }
2375 @@ -1658,8 +1660,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2376
2377 if (!lock_fb_info(fb_info))
2378 return -ENODEV;
2379 + console_lock();
2380 event.info = fb_info;
2381 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
2382 + console_unlock();
2383 unlock_fb_info(fb_info);
2384
2385 if (ret)
2386 @@ -1674,7 +1678,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
2387 num_registered_fb--;
2388 fb_cleanup_device(fb_info);
2389 event.info = fb_info;
2390 + console_lock();
2391 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
2392 + console_unlock();
2393
2394 /* this may free fb info */
2395 put_fb_info(fb_info);
2396 @@ -1845,11 +1851,8 @@ int fb_new_modelist(struct fb_info *info)
2397 err = 1;
2398
2399 if (!list_empty(&info->modelist)) {
2400 - if (!lock_fb_info(info))
2401 - return -ENODEV;
2402 event.info = info;
2403 err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
2404 - unlock_fb_info(info);
2405 }
2406
2407 return err;
2408 diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
2409 index 67afa9c..303fb9f 100644
2410 --- a/drivers/video/fbsysfs.c
2411 +++ b/drivers/video/fbsysfs.c
2412 @@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device,
2413 if (i * sizeof(struct fb_videomode) != count)
2414 return -EINVAL;
2415
2416 + if (!lock_fb_info(fb_info))
2417 + return -ENODEV;
2418 console_lock();
2419 list_splice(&fb_info->modelist, &old_list);
2420 fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
2421 @@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device,
2422 fb_destroy_modelist(&old_list);
2423
2424 console_unlock();
2425 + unlock_fb_info(fb_info);
2426
2427 return 0;
2428 }
2429 diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
2430 index 6af3f16..d02a538 100644
2431 --- a/drivers/video/fsl-diu-fb.c
2432 +++ b/drivers/video/fsl-diu-fb.c
2433 @@ -923,7 +923,7 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
2434 #define PF_COMP_0_MASK 0x0000000F
2435 #define PF_COMP_0_SHIFT 0
2436
2437 -#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
2438 +#define MAKE_PF(alpha, red, green, blue, size, c0, c1, c2, c3) \
2439 cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
2440 (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
2441 (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
2442 @@ -933,10 +933,10 @@ static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
2443 switch (bits_per_pixel) {
2444 case 32:
2445 /* 0x88883316 */
2446 - return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
2447 + return MAKE_PF(3, 2, 1, 0, 3, 8, 8, 8, 8);
2448 case 24:
2449 /* 0x88082219 */
2450 - return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
2451 + return MAKE_PF(4, 0, 1, 2, 2, 8, 8, 8, 0);
2452 case 16:
2453 /* 0x65053118 */
2454 return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
2455 diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
2456 index b1f60a0..b2db77e 100644
2457 --- a/drivers/xen/evtchn.c
2458 +++ b/drivers/xen/evtchn.c
2459 @@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
2460 u->name, (void *)(unsigned long)port);
2461 if (rc >= 0)
2462 rc = evtchn_make_refcounted(port);
2463 + else {
2464 + /* bind failed, should close the port now */
2465 + struct evtchn_close close;
2466 + close.port = port;
2467 + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
2468 + BUG();
2469 + set_port_user(port, NULL);
2470 + }
2471
2472 return rc;
2473 }
2474 @@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
2475 {
2476 int irq = irq_from_evtchn(port);
2477
2478 + BUG_ON(irq < 0);
2479 +
2480 unbind_from_irqhandler(irq, (void *)(unsigned long)port);
2481
2482 set_port_user(port, NULL);
2483 diff --git a/fs/block_dev.c b/fs/block_dev.c
2484 index ba11c30..b3be92c 100644
2485 --- a/fs/block_dev.c
2486 +++ b/fs/block_dev.c
2487 @@ -1047,6 +1047,7 @@ int revalidate_disk(struct gendisk *disk)
2488
2489 mutex_lock(&bdev->bd_mutex);
2490 check_disk_size_change(disk, bdev);
2491 + bdev->bd_invalidated = 0;
2492 mutex_unlock(&bdev->bd_mutex);
2493 bdput(bdev);
2494 return ret;
2495 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
2496 index 8392cb8..a3a0987 100644
2497 --- a/fs/lockd/clntproc.c
2498 +++ b/fs/lockd/clntproc.c
2499 @@ -551,6 +551,9 @@ again:
2500 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
2501 if (status < 0)
2502 break;
2503 + /* Resend the blocking lock request after a server reboot */
2504 + if (resp->status == nlm_lck_denied_grace_period)
2505 + continue;
2506 if (resp->status != nlm_lck_blocked)
2507 break;
2508 }
2509 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
2510 index d16dae2..3a9c247 100644
2511 --- a/fs/nfs/blocklayout/blocklayout.c
2512 +++ b/fs/nfs/blocklayout/blocklayout.c
2513 @@ -1155,6 +1155,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = {
2514 static struct pnfs_layoutdriver_type blocklayout_type = {
2515 .id = LAYOUT_BLOCK_VOLUME,
2516 .name = "LAYOUT_BLOCK_VOLUME",
2517 + .owner = THIS_MODULE,
2518 .read_pagelist = bl_read_pagelist,
2519 .write_pagelist = bl_write_pagelist,
2520 .alloc_layout_hdr = bl_alloc_layout_hdr,
2521 diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
2522 index 1afe74c..65538f5 100644
2523 --- a/fs/nfs/objlayout/objio_osd.c
2524 +++ b/fs/nfs/objlayout/objio_osd.c
2525 @@ -589,6 +589,7 @@ static struct pnfs_layoutdriver_type objlayout_type = {
2526 .flags = PNFS_LAYOUTRET_ON_SETATTR |
2527 PNFS_LAYOUTRET_ON_ERROR,
2528
2529 + .owner = THIS_MODULE,
2530 .alloc_layout_hdr = objlayout_alloc_layout_hdr,
2531 .free_layout_hdr = objlayout_free_layout_hdr,
2532
2533 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
2534 index 8445fbc..6f292dd 100644
2535 --- a/fs/notify/inotify/inotify_user.c
2536 +++ b/fs/notify/inotify/inotify_user.c
2537 @@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
2538
2539 /* don't allow invalid bits: we don't want flags set */
2540 mask = inotify_arg_to_mask(arg);
2541 - if (unlikely(!(mask & IN_ALL_EVENTS)))
2542 - return -EINVAL;
2543
2544 fsn_mark = fsnotify_find_inode_mark(group, inode);
2545 if (!fsn_mark)
2546 @@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
2547
2548 /* don't allow invalid bits: we don't want flags set */
2549 mask = inotify_arg_to_mask(arg);
2550 - if (unlikely(!(mask & IN_ALL_EVENTS)))
2551 - return -EINVAL;
2552
2553 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
2554 if (unlikely(!tmp_i_mark))
2555 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
2556 index 81a4cd2..231eab2 100644
2557 --- a/fs/ocfs2/dlmglue.c
2558 +++ b/fs/ocfs2/dlmglue.c
2559 @@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
2560 * everything is up to the caller :) */
2561 status = ocfs2_should_refresh_lock_res(lockres);
2562 if (status < 0) {
2563 + ocfs2_cluster_unlock(osb, lockres, level);
2564 mlog_errno(status);
2565 goto bail;
2566 }
2567 @@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
2568
2569 ocfs2_complete_lock_res_refresh(lockres, status);
2570
2571 - if (status < 0)
2572 + if (status < 0) {
2573 + ocfs2_cluster_unlock(osb, lockres, level);
2574 mlog_errno(status);
2575 + }
2576 ocfs2_track_lock_refresh(lockres);
2577 }
2578 bail:
2579 diff --git a/include/linux/console.h b/include/linux/console.h
2580 index 7201ce4..f59e942 100644
2581 --- a/include/linux/console.h
2582 +++ b/include/linux/console.h
2583 @@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
2584 int con_is_bound(const struct consw *csw);
2585 int register_con_driver(const struct consw *csw, int first, int last);
2586 int unregister_con_driver(const struct consw *csw);
2587 +int do_unregister_con_driver(const struct consw *csw);
2588 int take_over_console(const struct consw *sw, int first, int last, int deflt);
2589 +int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
2590 void give_up_console(const struct consw *sw);
2591 #ifdef CONFIG_HW_CONSOLE
2592 int con_debug_enter(struct vc_data *vc);
2593 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
2594 index 561e130..9b0c614 100644
2595 --- a/include/linux/if_vlan.h
2596 +++ b/include/linux/if_vlan.h
2597 @@ -327,7 +327,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
2598 struct vlan_hdr *vhdr)
2599 {
2600 __be16 proto;
2601 - unsigned char *rawp;
2602 + unsigned short *rawp;
2603
2604 /*
2605 * Was a VLAN packet, grab the encapsulated protocol, which the layer
2606 @@ -340,8 +340,8 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
2607 return;
2608 }
2609
2610 - rawp = skb->data;
2611 - if (*(unsigned short *) rawp == 0xFFFF)
2612 + rawp = (unsigned short *)(vhdr + 1);
2613 + if (*rawp == 0xFFFF)
2614 /*
2615 * This is a magic hack to spot IPX packets. Older Novell
2616 * breaks the protocol design and runs IPX over 802.3 without
2617 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
2618 index 1d1b1e1..ee2baf0 100644
2619 --- a/include/linux/mmu_notifier.h
2620 +++ b/include/linux/mmu_notifier.h
2621 @@ -4,6 +4,7 @@
2622 #include <linux/list.h>
2623 #include <linux/spinlock.h>
2624 #include <linux/mm_types.h>
2625 +#include <linux/srcu.h>
2626
2627 struct mmu_notifier;
2628 struct mmu_notifier_ops;
2629 diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
2630 index a54b825..6f8b026 100644
2631 --- a/include/linux/usb/audio.h
2632 +++ b/include/linux/usb/audio.h
2633 @@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
2634 int protocol)
2635 {
2636 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
2637 - return desc->baSourceID[desc->bNrInPins + control_size];
2638 + return *(uac_processing_unit_bmControls(desc, protocol)
2639 + + control_size);
2640 }
2641
2642 static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
2643 int protocol)
2644 {
2645 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
2646 - return &desc->baSourceID[desc->bNrInPins + control_size + 1];
2647 + return uac_processing_unit_bmControls(desc, protocol)
2648 + + control_size + 1;
2649 }
2650
2651 /* 4.5.2 Class-Specific AS Interface Descriptor */
2652 diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
2653 index e33d77f..57eeb14 100644
2654 --- a/include/linux/vt_kern.h
2655 +++ b/include/linux/vt_kern.h
2656 @@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
2657 int con_get_cmap(unsigned char __user *cmap);
2658 void scrollback(struct vc_data *vc, int lines);
2659 void scrollfront(struct vc_data *vc, int lines);
2660 +void clear_buffer_attributes(struct vc_data *vc);
2661 void update_region(struct vc_data *vc, unsigned long start, int count);
2662 void redraw_screen(struct vc_data *vc, int is_switch);
2663 #define update_screen(x) redraw_screen(x, 0)
2664 @@ -131,6 +132,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
2665 int vt_waitactive(int n);
2666 void change_console(struct vc_data *new_vc);
2667 void reset_vc(struct vc_data *vc);
2668 +extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
2669 + int deflt);
2670 extern int unbind_con_driver(const struct consw *csw, int first, int last,
2671 int deflt);
2672 int vty_init(const struct file_operations *console_fops);
2673 diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
2674 index 00cbb43..2da45ce 100644
2675 --- a/include/net/inet6_hashtables.h
2676 +++ b/include/net/inet6_hashtables.h
2677 @@ -28,16 +28,16 @@
2678
2679 struct inet_hashinfo;
2680
2681 -/* I have no idea if this is a good hash for v6 or not. -DaveM */
2682 static inline unsigned int inet6_ehashfn(struct net *net,
2683 const struct in6_addr *laddr, const u16 lport,
2684 const struct in6_addr *faddr, const __be16 fport)
2685 {
2686 - u32 ports = (lport ^ (__force u16)fport);
2687 + u32 ports = (((u32)lport) << 16) | (__force u32)fport;
2688
2689 return jhash_3words((__force u32)laddr->s6_addr32[3],
2690 - (__force u32)faddr->s6_addr32[3],
2691 - ports, inet_ehash_secret + net_hash_mix(net));
2692 + ipv6_addr_jhash(faddr),
2693 + ports,
2694 + inet_ehash_secret + net_hash_mix(net));
2695 }
2696
2697 static inline int inet6_sk_ehashfn(const struct sock *sk)
2698 diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
2699 index ae17e13..8cd2e1d 100644
2700 --- a/include/net/inet_sock.h
2701 +++ b/include/net/inet_sock.h
2702 @@ -202,6 +202,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
2703 extern int inet_sk_rebuild_header(struct sock *sk);
2704
2705 extern u32 inet_ehash_secret;
2706 +extern u32 ipv6_hash_secret;
2707 extern void build_ehash_secret(void);
2708
2709 static inline unsigned int inet_ehashfn(struct net *net,
2710 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
2711 index e4170a2..12a1bd2 100644
2712 --- a/include/net/ipv6.h
2713 +++ b/include/net/ipv6.h
2714 @@ -15,6 +15,7 @@
2715
2716 #include <linux/ipv6.h>
2717 #include <linux/hardirq.h>
2718 +#include <linux/jhash.h>
2719 #include <net/if_inet6.h>
2720 #include <net/ndisc.h>
2721 #include <net/flow.h>
2722 @@ -390,6 +391,17 @@ struct ip6_create_arg {
2723 void ip6_frag_init(struct inet_frag_queue *q, void *a);
2724 int ip6_frag_match(struct inet_frag_queue *q, void *a);
2725
2726 +/* more secured version of ipv6_addr_hash() */
2727 +static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
2728 +{
2729 + u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
2730 +
2731 + return jhash_3words(v,
2732 + (__force u32)a->s6_addr32[2],
2733 + (__force u32)a->s6_addr32[3],
2734 + ipv6_hash_secret);
2735 +}
2736 +
2737 static inline int ipv6_addr_any(const struct in6_addr *a)
2738 {
2739 return (a->s6_addr32[0] | a->s6_addr32[1] |
2740 diff --git a/include/net/sock.h b/include/net/sock.h
2741 index 5878118..59a8947 100644
2742 --- a/include/net/sock.h
2743 +++ b/include/net/sock.h
2744 @@ -944,7 +944,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
2745 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
2746 }
2747
2748 -inline void sk_refcnt_debug_release(const struct sock *sk)
2749 +static inline void sk_refcnt_debug_release(const struct sock *sk)
2750 {
2751 if (atomic_read(&sk->sk_refcnt) != 1)
2752 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
2753 diff --git a/kernel/futex.c b/kernel/futex.c
2754 index 19eb089..8879430 100644
2755 --- a/kernel/futex.c
2756 +++ b/kernel/futex.c
2757 @@ -2471,8 +2471,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
2758 if (!futex_cmpxchg_enabled)
2759 return -ENOSYS;
2760
2761 - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
2762 -
2763 rcu_read_lock();
2764
2765 ret = -ESRCH;
2766 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
2767 index 83e368b..a9642d5 100644
2768 --- a/kernel/futex_compat.c
2769 +++ b/kernel/futex_compat.c
2770 @@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
2771 if (!futex_cmpxchg_enabled)
2772 return -ENOSYS;
2773
2774 - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
2775 -
2776 rcu_read_lock();
2777
2778 ret = -ESRCH;
2779 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
2780 index 6db7a5e..cdd5607 100644
2781 --- a/kernel/hrtimer.c
2782 +++ b/kernel/hrtimer.c
2783 @@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
2784 * and expiry check is done in the hrtimer_interrupt or in the softirq.
2785 */
2786 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
2787 - struct hrtimer_clock_base *base,
2788 - int wakeup)
2789 + struct hrtimer_clock_base *base)
2790 {
2791 - if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
2792 - if (wakeup) {
2793 - raw_spin_unlock(&base->cpu_base->lock);
2794 - raise_softirq_irqoff(HRTIMER_SOFTIRQ);
2795 - raw_spin_lock(&base->cpu_base->lock);
2796 - } else
2797 - __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
2798 -
2799 - return 1;
2800 - }
2801 -
2802 - return 0;
2803 + return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
2804 }
2805
2806 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
2807 @@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
2808 static inline void
2809 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
2810 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
2811 - struct hrtimer_clock_base *base,
2812 - int wakeup)
2813 + struct hrtimer_clock_base *base)
2814 {
2815 return 0;
2816 }
2817 @@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
2818 *
2819 * XXX send_remote_softirq() ?
2820 */
2821 - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
2822 - hrtimer_enqueue_reprogram(timer, new_base, wakeup);
2823 + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
2824 + && hrtimer_enqueue_reprogram(timer, new_base)) {
2825 + if (wakeup) {
2826 + /*
2827 + * We need to drop cpu_base->lock to avoid a
2828 + * lock ordering issue vs. rq->lock.
2829 + */
2830 + raw_spin_unlock(&new_base->cpu_base->lock);
2831 + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
2832 + local_irq_restore(flags);
2833 + return ret;
2834 + } else {
2835 + __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
2836 + }
2837 + }
2838
2839 unlock_hrtimer_base(timer, &flags);
2840
2841 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
2842 index 611cd60..7b5f012 100644
2843 --- a/kernel/irq/spurious.c
2844 +++ b/kernel/irq/spurious.c
2845 @@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
2846
2847 /*
2848 * All handlers must agree on IRQF_SHARED, so we test just the
2849 - * first. Check for action->next as well.
2850 + * first.
2851 */
2852 action = desc->action;
2853 if (!action || !(action->flags & IRQF_SHARED) ||
2854 - (action->flags & __IRQF_TIMER) ||
2855 - (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
2856 - !action->next)
2857 + (action->flags & __IRQF_TIMER))
2858 goto out;
2859
2860 /* Already running on another processor */
2861 @@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
2862 do {
2863 if (handle_irq_event(desc) == IRQ_HANDLED)
2864 ret = IRQ_HANDLED;
2865 + /* Make sure that there is still a valid action */
2866 action = desc->action;
2867 } while ((desc->istate & IRQS_PENDING) && action);
2868 desc->istate &= ~IRQS_POLL_INPROGRESS;
2869 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
2870 index 125cb67..acbb79c 100644
2871 --- a/kernel/posix-cpu-timers.c
2872 +++ b/kernel/posix-cpu-timers.c
2873 @@ -1422,8 +1422,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
2874 while (!signal_pending(current)) {
2875 if (timer.it.cpu.expires.sched == 0) {
2876 /*
2877 - * Our timer fired and was reset.
2878 + * Our timer fired and was reset, below
2879 + * deletion can not fail.
2880 */
2881 + posix_cpu_timer_del(&timer);
2882 spin_unlock_irq(&timer.it_lock);
2883 return 0;
2884 }
2885 @@ -1441,9 +1443,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
2886 * We were interrupted by a signal.
2887 */
2888 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
2889 - posix_cpu_timer_set(&timer, 0, &zero_it, it);
2890 + error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
2891 + if (!error) {
2892 + /*
2893 + * Timer is now unarmed, deletion can not fail.
2894 + */
2895 + posix_cpu_timer_del(&timer);
2896 + }
2897 spin_unlock_irq(&timer.it_lock);
2898
2899 + while (error == TIMER_RETRY) {
2900 + /*
2901 + * We need to handle case when timer was or is in the
2902 + * middle of firing. In other cases we already freed
2903 + * resources.
2904 + */
2905 + spin_lock_irq(&timer.it_lock);
2906 + error = posix_cpu_timer_del(&timer);
2907 + spin_unlock_irq(&timer.it_lock);
2908 + }
2909 +
2910 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
2911 /*
2912 * It actually did fire already.
2913 diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
2914 index eb51d76..3f42652 100644
2915 --- a/kernel/timeconst.pl
2916 +++ b/kernel/timeconst.pl
2917 @@ -369,10 +369,8 @@ if ($hz eq '--can') {
2918 die "Usage: $0 HZ\n";
2919 }
2920
2921 - @val = @{$canned_values{$hz}};
2922 - if (!defined(@val)) {
2923 - @val = compute_values($hz);
2924 - }
2925 + $cv = $canned_values{$hz};
2926 + @val = defined($cv) ? @$cv : compute_values($hz);
2927 output($hz, @val);
2928 }
2929 exit 0;
2930 diff --git a/mm/fadvise.c b/mm/fadvise.c
2931 index 469491e0..dcb9872 100644
2932 --- a/mm/fadvise.c
2933 +++ b/mm/fadvise.c
2934 @@ -17,6 +17,7 @@
2935 #include <linux/fadvise.h>
2936 #include <linux/writeback.h>
2937 #include <linux/syscalls.h>
2938 +#include <linux/swap.h>
2939
2940 #include <asm/unistd.h>
2941
2942 @@ -124,9 +125,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
2943 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
2944 end_index = (endbyte >> PAGE_CACHE_SHIFT);
2945
2946 - if (end_index >= start_index)
2947 - invalidate_mapping_pages(mapping, start_index,
2948 + if (end_index >= start_index) {
2949 + unsigned long count = invalidate_mapping_pages(mapping,
2950 + start_index, end_index);
2951 +
2952 + /*
2953 + * If fewer pages were invalidated than expected then
2954 + * it is possible that some of the pages were on
2955 + * a per-cpu pagevec for a remote CPU. Drain all
2956 + * pagevecs and try again.
2957 + */
2958 + if (count < (end_index - start_index + 1)) {
2959 + lru_add_drain_all();
2960 + invalidate_mapping_pages(mapping, start_index,
2961 end_index);
2962 + }
2963 + }
2964 break;
2965 default:
2966 ret = -EINVAL;
2967 diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
2968 index 862b608..8d1ca2d 100644
2969 --- a/mm/mmu_notifier.c
2970 +++ b/mm/mmu_notifier.c
2971 @@ -14,10 +14,14 @@
2972 #include <linux/export.h>
2973 #include <linux/mm.h>
2974 #include <linux/err.h>
2975 +#include <linux/srcu.h>
2976 #include <linux/rcupdate.h>
2977 #include <linux/sched.h>
2978 #include <linux/slab.h>
2979
2980 +/* global SRCU for all MMs */
2981 +static struct srcu_struct srcu;
2982 +
2983 /*
2984 * This function can't run concurrently against mmu_notifier_register
2985 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
2986 @@ -25,58 +29,61 @@
2987 * in parallel despite there being no task using this mm any more,
2988 * through the vmas outside of the exit_mmap context, such as with
2989 * vmtruncate. This serializes against mmu_notifier_unregister with
2990 - * the mmu_notifier_mm->lock in addition to RCU and it serializes
2991 - * against the other mmu notifiers with RCU. struct mmu_notifier_mm
2992 + * the mmu_notifier_mm->lock in addition to SRCU and it serializes
2993 + * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
2994 * can't go away from under us as exit_mmap holds an mm_count pin
2995 * itself.
2996 */
2997 void __mmu_notifier_release(struct mm_struct *mm)
2998 {
2999 struct mmu_notifier *mn;
3000 - struct hlist_node *n;
3001 + int id;
3002
3003 /*
3004 - * RCU here will block mmu_notifier_unregister until
3005 - * ->release returns.
3006 + * srcu_read_lock() here will block synchronize_srcu() in
3007 + * mmu_notifier_unregister() until all registered
3008 + * ->release() callouts this function makes have
3009 + * returned.
3010 */
3011 - rcu_read_lock();
3012 - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
3013 - /*
3014 - * if ->release runs before mmu_notifier_unregister it
3015 - * must be handled as it's the only way for the driver
3016 - * to flush all existing sptes and stop the driver
3017 - * from establishing any more sptes before all the
3018 - * pages in the mm are freed.
3019 - */
3020 - if (mn->ops->release)
3021 - mn->ops->release(mn, mm);
3022 - rcu_read_unlock();
3023 -
3024 + id = srcu_read_lock(&srcu);
3025 spin_lock(&mm->mmu_notifier_mm->lock);
3026 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
3027 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
3028 struct mmu_notifier,
3029 hlist);
3030 +
3031 /*
3032 - * We arrived before mmu_notifier_unregister so
3033 - * mmu_notifier_unregister will do nothing other than
3034 - * to wait ->release to finish and
3035 - * mmu_notifier_unregister to return.
3036 + * Unlink. This will prevent mmu_notifier_unregister()
3037 + * from also making the ->release() callout.
3038 */
3039 hlist_del_init_rcu(&mn->hlist);
3040 + spin_unlock(&mm->mmu_notifier_mm->lock);
3041 +
3042 + /*
3043 + * Clear sptes. (see 'release' description in mmu_notifier.h)
3044 + */
3045 + if (mn->ops->release)
3046 + mn->ops->release(mn, mm);
3047 +
3048 + spin_lock(&mm->mmu_notifier_mm->lock);
3049 }
3050 spin_unlock(&mm->mmu_notifier_mm->lock);
3051
3052 /*
3053 - * synchronize_rcu here prevents mmu_notifier_release to
3054 - * return to exit_mmap (which would proceed freeing all pages
3055 - * in the mm) until the ->release method returns, if it was
3056 - * invoked by mmu_notifier_unregister.
3057 - *
3058 - * The mmu_notifier_mm can't go away from under us because one
3059 - * mm_count is hold by exit_mmap.
3060 + * All callouts to ->release() which we have done are complete.
3061 + * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
3062 + */
3063 + srcu_read_unlock(&srcu, id);
3064 +
3065 + /*
3066 + * mmu_notifier_unregister() may have unlinked a notifier and may
3067 + * still be calling out to it. Additionally, other notifiers
3068 + * may have been active via vmtruncate() et. al. Block here
3069 + * to ensure that all notifier callouts for this mm have been
3070 + * completed and the sptes are really cleaned up before returning
3071 + * to exit_mmap().
3072 */
3073 - synchronize_rcu();
3074 + synchronize_srcu(&srcu);
3075 }
3076
3077 /*
3078 @@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
3079 {
3080 struct mmu_notifier *mn;
3081 struct hlist_node *n;
3082 - int young = 0;
3083 + int young = 0, id;
3084
3085 - rcu_read_lock();
3086 + id = srcu_read_lock(&srcu);
3087 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3088 if (mn->ops->clear_flush_young)
3089 young |= mn->ops->clear_flush_young(mn, mm, address);
3090 }
3091 - rcu_read_unlock();
3092 + srcu_read_unlock(&srcu, id);
3093
3094 return young;
3095 }
3096 @@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
3097 {
3098 struct mmu_notifier *mn;
3099 struct hlist_node *n;
3100 - int young = 0;
3101 + int young = 0, id;
3102
3103 - rcu_read_lock();
3104 + id = srcu_read_lock(&srcu);
3105 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3106 if (mn->ops->test_young) {
3107 young = mn->ops->test_young(mn, mm, address);
3108 @@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
3109 break;
3110 }
3111 }
3112 - rcu_read_unlock();
3113 + srcu_read_unlock(&srcu, id);
3114
3115 return young;
3116 }
3117 @@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
3118 {
3119 struct mmu_notifier *mn;
3120 struct hlist_node *n;
3121 + int id;
3122
3123 - rcu_read_lock();
3124 + id = srcu_read_lock(&srcu);
3125 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3126 if (mn->ops->change_pte)
3127 mn->ops->change_pte(mn, mm, address, pte);
3128 @@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
3129 else if (mn->ops->invalidate_page)
3130 mn->ops->invalidate_page(mn, mm, address);
3131 }
3132 - rcu_read_unlock();
3133 + srcu_read_unlock(&srcu, id);
3134 }
3135
3136 void __mmu_notifier_invalidate_page(struct mm_struct *mm,
3137 @@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
3138 {
3139 struct mmu_notifier *mn;
3140 struct hlist_node *n;
3141 + int id;
3142
3143 - rcu_read_lock();
3144 + id = srcu_read_lock(&srcu);
3145 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3146 if (mn->ops->invalidate_page)
3147 mn->ops->invalidate_page(mn, mm, address);
3148 }
3149 - rcu_read_unlock();
3150 + srcu_read_unlock(&srcu, id);
3151 }
3152
3153 void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
3154 @@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
3155 {
3156 struct mmu_notifier *mn;
3157 struct hlist_node *n;
3158 + int id;
3159
3160 - rcu_read_lock();
3161 + id = srcu_read_lock(&srcu);
3162 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3163 if (mn->ops->invalidate_range_start)
3164 mn->ops->invalidate_range_start(mn, mm, start, end);
3165 }
3166 - rcu_read_unlock();
3167 + srcu_read_unlock(&srcu, id);
3168 }
3169
3170 void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
3171 @@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
3172 {
3173 struct mmu_notifier *mn;
3174 struct hlist_node *n;
3175 + int id;
3176
3177 - rcu_read_lock();
3178 + id = srcu_read_lock(&srcu);
3179 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
3180 if (mn->ops->invalidate_range_end)
3181 mn->ops->invalidate_range_end(mn, mm, start, end);
3182 }
3183 - rcu_read_unlock();
3184 + srcu_read_unlock(&srcu, id);
3185 }
3186
3187 static int do_mmu_notifier_register(struct mmu_notifier *mn,
3188 @@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
3189
3190 BUG_ON(atomic_read(&mm->mm_users) <= 0);
3191
3192 + /*
3193 + * Verify that mmu_notifier_init() already run and the global srcu is
3194 + * initialized.
3195 + */
3196 + BUG_ON(!srcu.per_cpu_ref);
3197 +
3198 ret = -ENOMEM;
3199 mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
3200 if (unlikely(!mmu_notifier_mm))
3201 @@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
3202 /*
3203 * This releases the mm_count pin automatically and frees the mm
3204 * structure if it was the last user of it. It serializes against
3205 - * running mmu notifiers with RCU and against mmu_notifier_unregister
3206 - * with the unregister lock + RCU. All sptes must be dropped before
3207 + * running mmu notifiers with SRCU and against mmu_notifier_unregister
3208 + * with the unregister lock + SRCU. All sptes must be dropped before
3209 * calling mmu_notifier_unregister. ->release or any other notifier
3210 * method may be invoked concurrently with mmu_notifier_unregister,
3211 * and only after mmu_notifier_unregister returned we're guaranteed
3212 @@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
3213 {
3214 BUG_ON(atomic_read(&mm->mm_count) <= 0);
3215
3216 + spin_lock(&mm->mmu_notifier_mm->lock);
3217 if (!hlist_unhashed(&mn->hlist)) {
3218 - /*
3219 - * RCU here will force exit_mmap to wait ->release to finish
3220 - * before freeing the pages.
3221 - */
3222 - rcu_read_lock();
3223 + int id;
3224
3225 /*
3226 - * exit_mmap will block in mmu_notifier_release to
3227 - * guarantee ->release is called before freeing the
3228 - * pages.
3229 + * Ensure we synchronize up with __mmu_notifier_release().
3230 */
3231 + id = srcu_read_lock(&srcu);
3232 +
3233 + hlist_del_rcu(&mn->hlist);
3234 + spin_unlock(&mm->mmu_notifier_mm->lock);
3235 +
3236 if (mn->ops->release)
3237 mn->ops->release(mn, mm);
3238 - rcu_read_unlock();
3239
3240 - spin_lock(&mm->mmu_notifier_mm->lock);
3241 - hlist_del_rcu(&mn->hlist);
3242 + /*
3243 + * Allow __mmu_notifier_release() to complete.
3244 + */
3245 + srcu_read_unlock(&srcu, id);
3246 + } else
3247 spin_unlock(&mm->mmu_notifier_mm->lock);
3248 - }
3249
3250 /*
3251 - * Wait any running method to finish, of course including
3252 - * ->release if it was run by mmu_notifier_relase instead of us.
3253 + * Wait for any running method to finish, including ->release() if it
3254 + * was run by __mmu_notifier_release() instead of us.
3255 */
3256 - synchronize_rcu();
3257 + synchronize_srcu(&srcu);
3258
3259 BUG_ON(atomic_read(&mm->mm_count) <= 0);
3260
3261 mmdrop(mm);
3262 }
3263 EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
3264 +
3265 +static int __init mmu_notifier_init(void)
3266 +{
3267 + return init_srcu_struct(&srcu);
3268 +}
3269 +
3270 +module_init(mmu_notifier_init);
3271 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3272 index 691b8ec..533ea80 100644
3273 --- a/mm/page_alloc.c
3274 +++ b/mm/page_alloc.c
3275 @@ -4216,10 +4216,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3276 * round what is now in bits to nearest long in bits, then return it in
3277 * bytes.
3278 */
3279 -static unsigned long __init usemap_size(unsigned long zonesize)
3280 +static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
3281 {
3282 unsigned long usemapsize;
3283
3284 + zonesize += zone_start_pfn & (pageblock_nr_pages-1);
3285 usemapsize = roundup(zonesize, pageblock_nr_pages);
3286 usemapsize = usemapsize >> pageblock_order;
3287 usemapsize *= NR_PAGEBLOCK_BITS;
3288 @@ -4229,17 +4230,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
3289 }
3290
3291 static void __init setup_usemap(struct pglist_data *pgdat,
3292 - struct zone *zone, unsigned long zonesize)
3293 + struct zone *zone,
3294 + unsigned long zone_start_pfn,
3295 + unsigned long zonesize)
3296 {
3297 - unsigned long usemapsize = usemap_size(zonesize);
3298 + unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
3299 zone->pageblock_flags = NULL;
3300 if (usemapsize)
3301 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
3302 usemapsize);
3303 }
3304 #else
3305 -static inline void setup_usemap(struct pglist_data *pgdat,
3306 - struct zone *zone, unsigned long zonesize) {}
3307 +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
3308 + unsigned long zone_start_pfn, unsigned long zonesize) {}
3309 #endif /* CONFIG_SPARSEMEM */
3310
3311 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3312 @@ -4367,7 +4370,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3313 continue;
3314
3315 set_pageblock_order(pageblock_default_order());
3316 - setup_usemap(pgdat, zone, size);
3317 + setup_usemap(pgdat, zone, zone_start_pfn, size);
3318 ret = init_currently_empty_zone(zone, zone_start_pfn,
3319 size, MEMMAP_EARLY);
3320 BUG_ON(ret);
3321 diff --git a/mm/shmem.c b/mm/shmem.c
3322 index a409bd8..58c4a47 100644
3323 --- a/mm/shmem.c
3324 +++ b/mm/shmem.c
3325 @@ -2177,6 +2177,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3326 unsigned long inodes;
3327 int error = -EINVAL;
3328
3329 + config.mpol = NULL;
3330 if (shmem_parse_options(data, &config, true))
3331 return error;
3332
3333 @@ -2201,8 +2202,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3334 sbinfo->max_inodes = config.max_inodes;
3335 sbinfo->free_inodes = config.max_inodes - inodes;
3336
3337 - mpol_put(sbinfo->mpol);
3338 - sbinfo->mpol = config.mpol; /* transfers initial ref */
3339 + /*
3340 + * Preserve previous mempolicy unless mpol remount option was specified.
3341 + */
3342 + if (config.mpol) {
3343 + mpol_put(sbinfo->mpol);
3344 + sbinfo->mpol = config.mpol; /* transfers initial ref */
3345 + }
3346 out:
3347 spin_unlock(&sbinfo->stat_lock);
3348 return error;
3349 diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
3350 index e16aade..718cbe8 100644
3351 --- a/net/bridge/br_stp_bpdu.c
3352 +++ b/net/bridge/br_stp_bpdu.c
3353 @@ -16,6 +16,7 @@
3354 #include <linux/etherdevice.h>
3355 #include <linux/llc.h>
3356 #include <linux/slab.h>
3357 +#include <linux/pkt_sched.h>
3358 #include <net/net_namespace.h>
3359 #include <net/llc.h>
3360 #include <net/llc_pdu.h>
3361 @@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
3362
3363 skb->dev = p->dev;
3364 skb->protocol = htons(ETH_P_802_2);
3365 + skb->priority = TC_PRIO_CONTROL;
3366
3367 skb_reserve(skb, LLC_RESERVE);
3368 memcpy(__skb_put(skb, length), data, length);
3369 diff --git a/net/core/datagram.c b/net/core/datagram.c
3370 index e4fbfd6..da7e0c8 100644
3371 --- a/net/core/datagram.c
3372 +++ b/net/core/datagram.c
3373 @@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3374 skb_queue_walk(queue, skb) {
3375 *peeked = skb->peeked;
3376 if (flags & MSG_PEEK) {
3377 - if (*off >= skb->len) {
3378 + if (*off >= skb->len && skb->len) {
3379 *off -= skb->len;
3380 continue;
3381 }
3382 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
3383 index b9868e1..aa74be4 100644
3384 --- a/net/core/sock_diag.c
3385 +++ b/net/core/sock_diag.c
3386 @@ -126,6 +126,9 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3387 if (nlmsg_len(nlh) < sizeof(*req))
3388 return -EINVAL;
3389
3390 + if (req->sdiag_family >= AF_MAX)
3391 + return -EINVAL;
3392 +
3393 hndl = sock_diag_lock_handler(req->sdiag_family);
3394 if (hndl == NULL)
3395 err = -ENOENT;
3396 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
3397 index 10e3751..78ec298 100644
3398 --- a/net/ipv4/af_inet.c
3399 +++ b/net/ipv4/af_inet.c
3400 @@ -227,8 +227,12 @@ EXPORT_SYMBOL(inet_listen);
3401 u32 inet_ehash_secret __read_mostly;
3402 EXPORT_SYMBOL(inet_ehash_secret);
3403
3404 +u32 ipv6_hash_secret __read_mostly;
3405 +EXPORT_SYMBOL(ipv6_hash_secret);
3406 +
3407 /*
3408 - * inet_ehash_secret must be set exactly once
3409 + * inet_ehash_secret must be set exactly once, and to a non nul value
3410 + * ipv6_hash_secret must be set exactly once.
3411 */
3412 void build_ehash_secret(void)
3413 {
3414 @@ -238,7 +242,8 @@ void build_ehash_secret(void)
3415 get_random_bytes(&rnd, sizeof(rnd));
3416 } while (rnd == 0);
3417
3418 - cmpxchg(&inet_ehash_secret, 0, rnd);
3419 + if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
3420 + get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
3421 }
3422 EXPORT_SYMBOL(build_ehash_secret);
3423
3424 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
3425 index 50009c7..c234bda 100644
3426 --- a/net/ipv4/ping.c
3427 +++ b/net/ipv4/ping.c
3428 @@ -321,8 +321,8 @@ void ping_err(struct sk_buff *skb, u32 info)
3429 struct iphdr *iph = (struct iphdr *)skb->data;
3430 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
3431 struct inet_sock *inet_sock;
3432 - int type = icmph->type;
3433 - int code = icmph->code;
3434 + int type = icmp_hdr(skb)->type;
3435 + int code = icmp_hdr(skb)->code;
3436 struct net *net = dev_net(skb->dev);
3437 struct sock *sk;
3438 int harderr;
3439 diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
3440 index bdd6164..131fd1f 100644
3441 --- a/sound/pci/ali5451/ali5451.c
3442 +++ b/sound/pci/ali5451/ali5451.c
3443 @@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
3444
3445 spin_lock(&codec->reg_lock);
3446 if (!pvoice->running) {
3447 - spin_unlock_irq(&codec->reg_lock);
3448 + spin_unlock(&codec->reg_lock);
3449 return 0;
3450 }
3451 outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
3452 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3453 index d1b805a..02a6e3f 100644
3454 --- a/sound/pci/hda/patch_hdmi.c
3455 +++ b/sound/pci/hda/patch_hdmi.c
3456 @@ -924,8 +924,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
3457 if (!static_hdmi_pcm && eld->eld_valid) {
3458 snd_hdmi_eld_update_pcm_info(eld, hinfo);
3459 if (hinfo->channels_min > hinfo->channels_max ||
3460 - !hinfo->rates || !hinfo->formats)
3461 + !hinfo->rates || !hinfo->formats) {
3462 + per_cvt->assigned = 0;
3463 + hinfo->nid = 0;
3464 + snd_hda_spdif_ctls_unassign(codec, pin_idx);
3465 return -ENODEV;
3466 + }
3467 }
3468
3469 /* Store the updated parameters */
3470 @@ -989,6 +993,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
3471 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
3472 codec->addr, pin_nid, eld->monitor_present, eld_valid);
3473
3474 + eld->eld_valid = false;
3475 if (eld_valid) {
3476 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
3477 snd_hdmi_show_eld(eld);
3478 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3479 index f7f8776..adb97d6 100644
3480 --- a/sound/pci/hda/patch_realtek.c
3481 +++ b/sound/pci/hda/patch_realtek.c
3482 @@ -5440,6 +5440,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3483 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
3484 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
3485 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
3486 + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
3487
3488 /* All Apple entries are in codec SSIDs */
3489 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
3490 diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
3491 index b4819d5..64da910 100644
3492 --- a/sound/pci/rme32.c
3493 +++ b/sound/pci/rme32.c
3494 @@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
3495 spin_lock_irq(&rme32->lock);
3496 rme32->capture_substream = NULL;
3497 rme32->capture_periodsize = 0;
3498 - spin_unlock(&rme32->lock);
3499 + spin_unlock_irq(&rme32->lock);
3500 return 0;
3501 }
3502
3503 diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
3504 index 8e0cf14..9932aac 100644
3505 --- a/sound/soc/codecs/wm2200.c
3506 +++ b/sound/soc/codecs/wm2200.c
3507 @@ -990,9 +990,9 @@ SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL,
3508
3509 SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
3510 WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1),
3511 -SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
3512 +SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_2L,
3513 WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1),
3514 -SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
3515 +SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_3L,
3516 WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1),
3517
3518 SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L,
3519 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
3520 index 63128cd..fa4c2f7 100644
3521 --- a/sound/usb/quirks-table.h
3522 +++ b/sound/usb/quirks-table.h
3523 @@ -1658,7 +1658,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
3524 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
3525 /* .vendor_name = "Roland", */
3526 /* .product_name = "A-PRO", */
3527 - .ifnum = 1,
3528 + .ifnum = 0,
3529 .type = QUIRK_MIDI_FIXED_ENDPOINT,
3530 .data = & (const struct snd_usb_midi_endpoint_info) {
3531 .out_cables = 0x0003,