Magellan Linux

Annotation of /trunk/kernel26-alx/patches-2.6.23-r1/0114-2.6.23.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 658 - (hide annotations) (download)
Mon Jun 23 21:39:39 2008 UTC (16 years ago) by niro
File size: 263041 byte(s)
2.6.23-alx-r1: new default as we fix the via epia clocksource=tsc quircks
-linux-2.6.23.17
-fbcondecor-0.9.4
-squashfs-3.3
-unionfs-2.3.3
-ipw3945-1.2.2
-mptbase-vmware fix

1 niro 658 diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
2     index f02a8ac..14dc111 100644
3     --- a/arch/i386/kernel/apm.c
4     +++ b/arch/i386/kernel/apm.c
5     @@ -2256,14 +2256,12 @@ static int __init apm_init(void)
6     apm_info.disabled = 1;
7     return -ENODEV;
8     }
9     - if (PM_IS_ACTIVE()) {
10     + if (pm_flags & PM_ACPI) {
11     printk(KERN_NOTICE "apm: overridden by ACPI.\n");
12     apm_info.disabled = 1;
13     return -ENODEV;
14     }
15     -#ifdef CONFIG_PM_LEGACY
16     - pm_active = 1;
17     -#endif
18     + pm_flags |= PM_APM;
19    
20     /*
21     * Set up a segment that references the real mode segment 0x40
22     @@ -2366,9 +2364,7 @@ static void __exit apm_exit(void)
23     kthread_stop(kapmd_task);
24     kapmd_task = NULL;
25     }
26     -#ifdef CONFIG_PM_LEGACY
27     - pm_active = 0;
28     -#endif
29     + pm_flags &= ~PM_APM;
30     }
31    
32     module_init(apm_init);
33     diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
34     index fe6aa5a..ecf401b 100644
35     --- a/arch/ia64/kernel/unaligned.c
36     +++ b/arch/ia64/kernel/unaligned.c
37     @@ -1487,16 +1487,19 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
38     case LDFA_OP:
39     case LDFCCLR_OP:
40     case LDFCNC_OP:
41     - case LDF_IMM_OP:
42     - case LDFA_IMM_OP:
43     - case LDFCCLR_IMM_OP:
44     - case LDFCNC_IMM_OP:
45     if (u.insn.x)
46     ret = emulate_load_floatpair(ifa, u.insn, regs);
47     else
48     ret = emulate_load_float(ifa, u.insn, regs);
49     break;
50    
51     + case LDF_IMM_OP:
52     + case LDFA_IMM_OP:
53     + case LDFCCLR_IMM_OP:
54     + case LDFCNC_IMM_OP:
55     + ret = emulate_load_float(ifa, u.insn, regs);
56     + break;
57     +
58     case STF_OP:
59     case STF_IMM_OP:
60     ret = emulate_store_float(ifa, u.insn, regs);
61     diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
62     index 777d345..6d4f02e 100644
63     --- a/arch/sparc64/kernel/chmc.c
64     +++ b/arch/sparc64/kernel/chmc.c
65     @@ -1,7 +1,6 @@
66     -/* $Id: 0114-2.6.23.15-all-fixes.patch,v 1.1 2008-06-23 21:39:39 niro Exp $
67     - * memctrlr.c: Driver for UltraSPARC-III memory controller.
68     +/* memctrlr.c: Driver for UltraSPARC-III memory controller.
69     *
70     - * Copyright (C) 2001 David S. Miller (davem@redhat.com)
71     + * Copyright (C) 2001, 2007 David S. Miller (davem@davemloft.net)
72     */
73    
74     #include <linux/module.h>
75     @@ -16,6 +15,7 @@
76     #include <linux/init.h>
77     #include <asm/spitfire.h>
78     #include <asm/chmctrl.h>
79     +#include <asm/cpudata.h>
80     #include <asm/oplib.h>
81     #include <asm/prom.h>
82     #include <asm/io.h>
83     @@ -242,8 +242,11 @@ int chmc_getunumber(int syndrome_code,
84     */
85     static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
86     {
87     - unsigned long ret;
88     - int this_cpu = get_cpu();
89     + unsigned long ret, this_cpu;
90     +
91     + preempt_disable();
92     +
93     + this_cpu = real_hard_smp_processor_id();
94    
95     if (mp->portid == this_cpu) {
96     __asm__ __volatile__("ldxa [%1] %2, %0"
97     @@ -255,7 +258,8 @@ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
98     : "r" (mp->regs + offset),
99     "i" (ASI_PHYS_BYPASS_EC_E));
100     }
101     - put_cpu();
102     +
103     + preempt_enable();
104    
105     return ret;
106     }
107     diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
108     index 8059531..193791c 100644
109     --- a/arch/sparc64/kernel/entry.S
110     +++ b/arch/sparc64/kernel/entry.S
111     @@ -2593,3 +2593,15 @@ sun4v_mmustat_info:
112     retl
113     nop
114     .size sun4v_mmustat_info, .-sun4v_mmustat_info
115     +
116     + .globl sun4v_mmu_demap_all
117     + .type sun4v_mmu_demap_all,#function
118     +sun4v_mmu_demap_all:
119     + clr %o0
120     + clr %o1
121     + mov HV_MMU_ALL, %o2
122     + mov HV_FAST_MMU_DEMAP_ALL, %o5
123     + ta HV_FAST_TRAP
124     + retl
125     + nop
126     + .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
127     diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
128     index e8dac81..9bc05cf 100644
129     --- a/arch/sparc64/kernel/pci.c
130     +++ b/arch/sparc64/kernel/pci.c
131     @@ -1276,4 +1276,20 @@ int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
132     return (device_mask & dma_addr_mask) == dma_addr_mask;
133     }
134    
135     +void pci_resource_to_user(const struct pci_dev *pdev, int bar,
136     + const struct resource *rp, resource_size_t *start,
137     + resource_size_t *end)
138     +{
139     + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
140     + unsigned long offset;
141     +
142     + if (rp->flags & IORESOURCE_IO)
143     + offset = pbm->io_space.start;
144     + else
145     + offset = pbm->mem_space.start;
146     +
147     + *start = rp->start - offset;
148     + *end = rp->end - offset;
149     +}
150     +
151     #endif /* !(CONFIG_PCI) */
152     diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
153     index c73b7a4..34e8a01 100644
154     --- a/arch/sparc64/kernel/smp.c
155     +++ b/arch/sparc64/kernel/smp.c
156     @@ -476,7 +476,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
157     */
158     static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
159     {
160     - u64 pstate, ver;
161     + u64 pstate, ver, busy_mask;
162     int nack_busy_id, is_jbus, need_more;
163    
164     if (cpus_empty(mask))
165     @@ -508,14 +508,20 @@ retry:
166     "i" (ASI_INTR_W));
167    
168     nack_busy_id = 0;
169     + busy_mask = 0;
170     {
171     int i;
172    
173     for_each_cpu_mask(i, mask) {
174     u64 target = (i << 14) | 0x70;
175    
176     - if (!is_jbus)
177     + if (is_jbus) {
178     + busy_mask |= (0x1UL << (i * 2));
179     + } else {
180     target |= (nack_busy_id << 24);
181     + busy_mask |= (0x1UL <<
182     + (nack_busy_id * 2));
183     + }
184     __asm__ __volatile__(
185     "stxa %%g0, [%0] %1\n\t"
186     "membar #Sync\n\t"
187     @@ -531,15 +537,16 @@ retry:
188    
189     /* Now, poll for completion. */
190     {
191     - u64 dispatch_stat;
192     + u64 dispatch_stat, nack_mask;
193     long stuck;
194    
195     stuck = 100000 * nack_busy_id;
196     + nack_mask = busy_mask << 1;
197     do {
198     __asm__ __volatile__("ldxa [%%g0] %1, %0"
199     : "=r" (dispatch_stat)
200     : "i" (ASI_INTR_DISPATCH_STAT));
201     - if (dispatch_stat == 0UL) {
202     + if (!(dispatch_stat & (busy_mask | nack_mask))) {
203     __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
204     : : "r" (pstate));
205     if (unlikely(need_more)) {
206     @@ -556,12 +563,12 @@ retry:
207     }
208     if (!--stuck)
209     break;
210     - } while (dispatch_stat & 0x5555555555555555UL);
211     + } while (dispatch_stat & busy_mask);
212    
213     __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
214     : : "r" (pstate));
215    
216     - if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
217     + if (dispatch_stat & busy_mask) {
218     /* Busy bits will not clear, continue instead
219     * of freezing up on this cpu.
220     */
221     diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
222     index 3010227..ed2484d 100644
223     --- a/arch/sparc64/mm/init.c
224     +++ b/arch/sparc64/mm/init.c
225     @@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
226     }
227     }
228    
229     -static void __init kernel_physical_mapping_init(void)
230     +static void __init init_kpte_bitmap(void)
231     {
232     unsigned long i;
233     -#ifdef CONFIG_DEBUG_PAGEALLOC
234     - unsigned long mem_alloced = 0UL;
235     -#endif
236     -
237     - read_obp_memory("reg", &pall[0], &pall_ents);
238    
239     for (i = 0; i < pall_ents; i++) {
240     unsigned long phys_start, phys_end;
241     @@ -1151,14 +1146,24 @@ static void __init kernel_physical_mapping_init(void)
242     phys_end = phys_start + pall[i].reg_size;
243    
244     mark_kpte_bitmap(phys_start, phys_end);
245     + }
246     +}
247    
248     +static void __init kernel_physical_mapping_init(void)
249     +{
250     #ifdef CONFIG_DEBUG_PAGEALLOC
251     + unsigned long i, mem_alloced = 0UL;
252     +
253     + for (i = 0; i < pall_ents; i++) {
254     + unsigned long phys_start, phys_end;
255     +
256     + phys_start = pall[i].phys_addr;
257     + phys_end = phys_start + pall[i].reg_size;
258     +
259     mem_alloced += kernel_map_range(phys_start, phys_end,
260     PAGE_KERNEL);
261     -#endif
262     }
263    
264     -#ifdef CONFIG_DEBUG_PAGEALLOC
265     printk("Allocated %ld bytes for kernel page tables.\n",
266     mem_alloced);
267    
268     @@ -1400,6 +1405,10 @@ void __init paging_init(void)
269    
270     inherit_prom_mappings();
271    
272     + read_obp_memory("reg", &pall[0], &pall_ents);
273     +
274     + init_kpte_bitmap();
275     +
276     /* Ok, we can use our TLB miss and window trap handlers safely. */
277     setup_tba();
278    
279     @@ -1854,7 +1863,9 @@ void __flush_tlb_all(void)
280     "wrpr %0, %1, %%pstate"
281     : "=r" (pstate)
282     : "i" (PSTATE_IE));
283     - if (tlb_type == spitfire) {
284     + if (tlb_type == hypervisor) {
285     + sun4v_mmu_demap_all();
286     + } else if (tlb_type == spitfire) {
287     for (i = 0; i < 64; i++) {
288     /* Spitfire Errata #32 workaround */
289     /* NOTE: Always runs on spitfire, so no
290     diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
291     index 3ec110c..d2e5298 100644
292     --- a/drivers/acpi/blacklist.c
293     +++ b/drivers/acpi/blacklist.c
294     @@ -3,6 +3,7 @@
295     *
296     * Check to see if the given machine has a known bad ACPI BIOS
297     * or if the BIOS is too old.
298     + * Check given machine against acpi_osi_dmi_table[].
299     *
300     * Copyright (C) 2004 Len Brown <len.brown@intel.com>
301     * Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
302     @@ -50,6 +51,8 @@ struct acpi_blacklist_item {
303     u32 is_critical_error;
304     };
305    
306     +static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
307     +
308     /*
309     * POLICY: If *anything* doesn't work, put it on the blacklist.
310     * If they are critical errors, mark it critical, and abort driver load.
311     @@ -67,8 +70,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
312     /* IBM 600E - _ADR should return 7, but it returns 1 */
313     {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
314     "Incorrect _ADR", 1},
315     - {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
316     - "Bogus PCI routing", 1},
317    
318     {""}
319     };
320     @@ -165,5 +166,388 @@ int __init acpi_blacklisted(void)
321    
322     blacklisted += blacklist_by_year();
323    
324     + dmi_check_system(acpi_osi_dmi_table);
325     +
326     return blacklisted;
327     }
328     +#ifdef CONFIG_DMI
329     +static int __init dmi_enable_osi_linux(struct dmi_system_id *d)
330     +{
331     + acpi_dmi_osi_linux(1, d); /* enable */
332     + return 0;
333     +}
334     +static int __init dmi_disable_osi_linux(struct dmi_system_id *d)
335     +{
336     + acpi_dmi_osi_linux(0, d); /* disable */
337     + return 0;
338     +}
339     +static int __init dmi_unknown_osi_linux(struct dmi_system_id *d)
340     +{
341     + acpi_dmi_osi_linux(-1, d); /* unknown */
342     + return 0;
343     +}
344     +
345     +/*
346     + * Most BIOS that invoke OSI(Linux) do nothing with it.
347     + * But some cause Linux to break.
348     + * Only a couple use it to make Linux run better.
349     + *
350     + * Thus, Linux should continue to disable OSI(Linux) by default,
351     + * should continue to discourage BIOS writers from using it, and
352     + * should whitelist the few existing systems that require it.
353     + *
354     + * If it appears clear a vendor isn't using OSI(Linux)
355     + * for anything constructive, blacklist them by name to disable
356     + * unnecessary dmesg warnings on all of their products.
357     + */
358     +
359     +static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
360     + /*
361     + * Disable OSI(Linux) warnings on all "Acer, inc."
362     + *
363     + * _OSI(Linux) disables the latest Windows BIOS code:
364     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
365     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5050"),
366     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
367     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5580"),
368     + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 3010"),
369     + * _OSI(Linux) effect unknown:
370     + * DMI_MATCH(DMI_PRODUCT_NAME, "Ferrari 5000"),
371     + */
372     + /*
373     + * note that dmi_check_system() uses strstr()
374     + * to match sub-strings rather than !strcmp(),
375     + * so "Acer" below matches "Acer, inc." above.
376     + */
377     + /*
378     + * Disable OSI(Linux) warnings on all "Acer"
379     + *
380     + * _OSI(Linux) effect unknown:
381     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
382     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
383     + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720Z"),
384     + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
385     + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
386     + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
387     + * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
388     + */
389     + {
390     + .callback = dmi_unknown_osi_linux,
391     + .ident = "Acer",
392     + .matches = {
393     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
394     + },
395     + },
396     + /*
397     + * Disable OSI(Linux) warnings on all "Apple Computer, Inc."
398     + *
399     + * _OSI(Linux) confirmed to be a NOP:
400     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
401     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
402     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
403     + * _OSI(Linux) effect unknown:
404     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacPro2,1"),
405     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
406     + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
407     + */
408     + {
409     + .callback = dmi_disable_osi_linux,
410     + .ident = "Apple",
411     + .matches = {
412     + DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
413     + },
414     + },
415     + /*
416     + * Disable OSI(Linux) warnings on all "BenQ"
417     + *
418     + * _OSI(Linux) confirmed to be a NOP:
419     + * DMI_MATCH(DMI_PRODUCT_NAME, "Joybook S31"),
420     + */
421     + {
422     + .callback = dmi_disable_osi_linux,
423     + .ident = "BenQ",
424     + .matches = {
425     + DMI_MATCH(DMI_SYS_VENDOR, "BenQ"),
426     + },
427     + },
428     + /*
429     + * Disable OSI(Linux) warnings on all "Clevo Co."
430     + *
431     + * _OSI(Linux) confirmed to be a NOP:
432     + * DMI_MATCH(DMI_PRODUCT_NAME, "M570RU"),
433     + */
434     + {
435     + .callback = dmi_disable_osi_linux,
436     + .ident = "Clevo",
437     + .matches = {
438     + DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
439     + },
440     + },
441     + /*
442     + * Disable OSI(Linux) warnings on all "COMPAL"
443     + *
444     + * _OSI(Linux) confirmed to be a NOP:
445     + * DMI_MATCH(DMI_BOARD_NAME, "HEL8X"),
446     + * _OSI(Linux) unknown effect:
447     + * DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
448     + */
449     + {
450     + .callback = dmi_unknown_osi_linux,
451     + .ident = "Compal",
452     + .matches = {
453     + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
454     + },
455     + },
456     + { /* OSI(Linux) touches USB, unknown side-effect */
457     + .callback = dmi_disable_osi_linux,
458     + .ident = "Dell Dimension 5150",
459     + .matches = {
460     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
461     + DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM051"),
462     + },
463     + },
464     + { /* OSI(Linux) is a NOP */
465     + .callback = dmi_disable_osi_linux,
466     + .ident = "Dell",
467     + .matches = {
468     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
469     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1501"),
470     + },
471     + },
472     + { /* OSI(Linux) effect unknown */
473     + .callback = dmi_unknown_osi_linux,
474     + .ident = "Dell",
475     + .matches = {
476     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
477     + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D830"),
478     + },
479     + },
480     + { /* OSI(Linux) effect unknown */
481     + .callback = dmi_unknown_osi_linux,
482     + .ident = "Dell",
483     + .matches = {
484     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
485     + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
486     + },
487     + },
488     + { /* OSI(Linux) effect unknown */
489     + .callback = dmi_unknown_osi_linux,
490     + .ident = "Dell",
491     + .matches = {
492     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
493     + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1900"),
494     + },
495     + },
496     + { /* OSI(Linux) touches USB */
497     + .callback = dmi_disable_osi_linux,
498     + .ident = "Dell",
499     + .matches = {
500     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
501     + DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
502     + },
503     + },
504     + { /* OSI(Linux) is a NOP */
505     + .callback = dmi_disable_osi_linux,
506     + .ident = "Dell Vostro 1000",
507     + .matches = {
508     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
509     + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1000"),
510     + },
511     + },
512     + { /* OSI(Linux) effect unknown */
513     + .callback = dmi_unknown_osi_linux,
514     + .ident = "Dell",
515     + .matches = {
516     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
517     + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge SC440"),
518     + },
519     + },
520     + { /* OSI(Linux) effect unknown */
521     + .callback = dmi_unknown_osi_linux,
522     + .ident = "Dialogue Flybook V5",
523     + .matches = {
524     + DMI_MATCH(DMI_SYS_VENDOR, "Dialogue Technology Corporation"),
525     + DMI_MATCH(DMI_PRODUCT_NAME, "Flybook V5"),
526     + },
527     + },
528     + /*
529     + * Disable OSI(Linux) warnings on all "FUJITSU SIEMENS"
530     + *
531     + * _OSI(Linux) disables latest Windows BIOS code:
532     + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2510"),
533     + * _OSI(Linux) confirmed to be a NOP:
534     + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"),
535     + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"),
536     + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"),
537     + * _OSI(Linux) unknown effect:
538     + * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"),
539     + * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"),
540     + * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
541     + */
542     + {
543     + .callback = dmi_disable_osi_linux,
544     + .ident = "Fujitsu Siemens",
545     + .matches = {
546     + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
547     + },
548     + },
549     + /*
550     + * Disable OSI(Linux) warnings on all "Hewlett-Packard"
551     + *
552     + * _OSI(Linux) confirmed to be a NOP:
553     + * .ident = "HP Pavilion tx 1000"
554     + * DMI_MATCH(DMI_BOARD_NAME, "30BF"),
555     + * .ident = "HP Pavilion dv2000"
556     + * DMI_MATCH(DMI_BOARD_NAME, "30B5"),
557     + * .ident = "HP Pavilion dv5000",
558     + * DMI_MATCH(DMI_BOARD_NAME, "30A7"),
559     + * .ident = "HP Pavilion dv6300 30BC",
560     + * DMI_MATCH(DMI_BOARD_NAME, "30BC"),
561     + * .ident = "HP Pavilion dv6000",
562     + * DMI_MATCH(DMI_BOARD_NAME, "30B7"),
563     + * DMI_MATCH(DMI_BOARD_NAME, "30B8"),
564     + * .ident = "HP Pavilion dv9000",
565     + * DMI_MATCH(DMI_BOARD_NAME, "30B9"),
566     + * .ident = "HP Pavilion dv9500",
567     + * DMI_MATCH(DMI_BOARD_NAME, "30CB"),
568     + * .ident = "HP/Compaq Presario C500",
569     + * DMI_MATCH(DMI_BOARD_NAME, "30C6"),
570     + * .ident = "HP/Compaq Presario F500",
571     + * DMI_MATCH(DMI_BOARD_NAME, "30D3"),
572     + * _OSI(Linux) unknown effect:
573     + * .ident = "HP Pavilion dv6500",
574     + * DMI_MATCH(DMI_BOARD_NAME, "30D0"),
575     + */
576     + {
577     + .callback = dmi_disable_osi_linux,
578     + .ident = "Hewlett-Packard",
579     + .matches = {
580     + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
581     + },
582     + },
583     + /*
584     + * Lenovo has a mix of systems OSI(Linux) situations
585     + * and thus we can not wildcard the vendor.
586     + *
587     + * _OSI(Linux) helps sound
588     + * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
589     + * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
590     + * _OSI(Linux) is a NOP:
591     + * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
592     + */
593     + {
594     + .callback = dmi_enable_osi_linux,
595     + .ident = "Lenovo ThinkPad R61",
596     + .matches = {
597     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
598     + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
599     + },
600     + },
601     + {
602     + .callback = dmi_enable_osi_linux,
603     + .ident = "Lenovo ThinkPad T61",
604     + .matches = {
605     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
606     + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
607     + },
608     + },
609     + {
610     + .callback = dmi_unknown_osi_linux,
611     + .ident = "Lenovo 3000 V100",
612     + .matches = {
613     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
614     + DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
615     + },
616     + },
617     + {
618     + .callback = dmi_disable_osi_linux,
619     + .ident = "Lenovo 3000 N100",
620     + .matches = {
621     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
622     + DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
623     + },
624     + },
625     + /*
626     + * Disable OSI(Linux) warnings on all "LG Electronics"
627     + *
628     + * _OSI(Linux) confirmed to be a NOP:
629     + * DMI_MATCH(DMI_PRODUCT_NAME, "P1-J150B"),
630     + * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
631     + *
632     + * unknown:
633     + * DMI_MATCH(DMI_PRODUCT_NAME, "S1-MDGDG"),
634     + * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
635     + */
636     + {
637     + .callback = dmi_disable_osi_linux,
638     + .ident = "LG",
639     + .matches = {
640     + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
641     + },
642     + },
643     + /* NEC - OSI(Linux) effect unknown */
644     + {
645     + .callback = dmi_unknown_osi_linux,
646     + .ident = "NEC VERSA M360",
647     + .matches = {
648     + DMI_MATCH(DMI_SYS_VENDOR, "NEC Computers SAS"),
649     + DMI_MATCH(DMI_PRODUCT_NAME, "NEC VERSA M360"),
650     + },
651     + },
652     + /*
653     + * Disable OSI(Linux) warnings on all "Samsung Electronics"
654     + *
655     + * OSI(Linux) disables PNP0C32 and other BIOS code for Windows:
656     + * DMI_MATCH(DMI_PRODUCT_NAME, "R40P/R41P"),
657     + * DMI_MATCH(DMI_PRODUCT_NAME, "R59P/R60P/R61P"),
658     + */
659     + {
660     + .callback = dmi_disable_osi_linux,
661     + .ident = "Samsung",
662     + .matches = {
663     + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
664     + },
665     + },
666     + /*
667     + * Disable OSI(Linux) warnings on all "Sony Corporation"
668     + *
669     + * _OSI(Linux) is a NOP:
670     + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"),
671     + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"),
672     + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"),
673     + * _OSI(Linux) unknown effect:
674     + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
675     + */
676     + {
677     + .callback = dmi_unknown_osi_linux,
678     + .ident = "Sony",
679     + .matches = {
680     + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
681     + },
682     + },
683     + /*
684     + * Disable OSI(Linux) warnings on all "TOSHIBA"
685     + *
686     + * _OSI(Linux) breaks sound (bugzilla 7787):
687     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P100"),
688     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P105"),
689     + * _OSI(Linux) is a NOP:
690     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A100"),
691     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A210"),
692     + * _OSI(Linux) unknown effect:
693     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A135"),
694     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A200"),
695     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P205"),
696     + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U305"),
697     + */
698     + {
699     + .callback = dmi_disable_osi_linux,
700     + .ident = "Toshiba",
701     + .matches = {
702     + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
703     + },
704     + },
705     + {}
706     +};
707     +
708     +#endif /* CONFIG_DMI */
709     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
710     index 9ba778a..222fcec 100644
711     --- a/drivers/acpi/bus.c
712     +++ b/drivers/acpi/bus.c
713     @@ -29,7 +29,6 @@
714     #include <linux/list.h>
715     #include <linux/sched.h>
716     #include <linux/pm.h>
717     -#include <linux/pm_legacy.h>
718     #include <linux/device.h>
719     #include <linux/proc_fs.h>
720     #ifdef CONFIG_X86
721     @@ -757,16 +756,14 @@ static int __init acpi_init(void)
722     result = acpi_bus_init();
723    
724     if (!result) {
725     -#ifdef CONFIG_PM_LEGACY
726     - if (!PM_IS_ACTIVE())
727     - pm_active = 1;
728     + if (!(pm_flags & PM_APM))
729     + pm_flags |= PM_ACPI;
730     else {
731     printk(KERN_INFO PREFIX
732     "APM is already active, exiting\n");
733     disable_acpi();
734     result = -ENODEV;
735     }
736     -#endif
737     } else
738     disable_acpi();
739    
740     diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
741     index a474ca2..954ac8c 100644
742     --- a/drivers/acpi/dispatcher/dsobject.c
743     +++ b/drivers/acpi/dispatcher/dsobject.c
744     @@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
745     return_ACPI_STATUS(status);
746     }
747     }
748     +
749     + /* Special object resolution for elements of a package */
750     +
751     + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
752     + (op->common.parent->common.aml_opcode ==
753     + AML_VAR_PACKAGE_OP)) {
754     + /*
755     + * Attempt to resolve the node to a value before we insert it into
756     + * the package. If this is a reference to a common data type,
757     + * resolve it immediately. According to the ACPI spec, package
758     + * elements can only be "data objects" or method references.
759     + * Attempt to resolve to an Integer, Buffer, String or Package.
760     + * If cannot, return the named reference (for things like Devices,
761     + * Methods, etc.) Buffer Fields and Fields will resolve to simple
762     + * objects (int/buf/str/pkg).
763     + *
764     + * NOTE: References to things like Devices, Methods, Mutexes, etc.
765     + * will remain as named references. This behavior is not described
766     + * in the ACPI spec, but it appears to be an oversight.
767     + */
768     + obj_desc = (union acpi_operand_object *)op->common.node;
769     +
770     + status =
771     + acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
772     + (struct
773     + acpi_namespace_node,
774     + &obj_desc),
775     + walk_state);
776     + if (ACPI_FAILURE(status)) {
777     + return_ACPI_STATUS(status);
778     + }
779     +
780     + switch (op->common.node->type) {
781     + /*
782     + * For these types, we need the actual node, not the subobject.
783     + * However, the subobject got an extra reference count above.
784     + */
785     + case ACPI_TYPE_MUTEX:
786     + case ACPI_TYPE_METHOD:
787     + case ACPI_TYPE_POWER:
788     + case ACPI_TYPE_PROCESSOR:
789     + case ACPI_TYPE_EVENT:
790     + case ACPI_TYPE_REGION:
791     + case ACPI_TYPE_DEVICE:
792     + case ACPI_TYPE_THERMAL:
793     +
794     + obj_desc =
795     + (union acpi_operand_object *)op->common.
796     + node;
797     + break;
798     +
799     + default:
800     + break;
801     + }
802     +
803     + /*
804     + * If above resolved to an operand object, we are done. Otherwise,
805     + * we have a NS node, we must create the package entry as a named
806     + * reference.
807     + */
808     + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
809     + ACPI_DESC_TYPE_NAMED) {
810     + goto exit;
811     + }
812     + }
813     }
814    
815     /* Create and init a new internal ACPI object */
816     @@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
817     return_ACPI_STATUS(status);
818     }
819    
820     + exit:
821     *obj_desc_ptr = obj_desc;
822     return_ACPI_STATUS(AE_OK);
823     }
824     @@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
825     arg = arg->common.next;
826     for (i = 0; arg && (i < element_count); i++) {
827     if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
828     -
829     - /* This package element is already built, just get it */
830     -
831     - obj_desc->package.elements[i] =
832     - ACPI_CAST_PTR(union acpi_operand_object,
833     - arg->common.node);
834     + if (arg->common.node->type == ACPI_TYPE_METHOD) {
835     + /*
836     + * A method reference "looks" to the parser to be a method
837     + * invocation, so we special case it here
838     + */
839     + arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
840     + status =
841     + acpi_ds_build_internal_object(walk_state,
842     + arg,
843     + &obj_desc->
844     + package.
845     + elements[i]);
846     + } else {
847     + /* This package element is already built, just get it */
848     +
849     + obj_desc->package.elements[i] =
850     + ACPI_CAST_PTR(union acpi_operand_object,
851     + arg->common.node);
852     + }
853     } else {
854     status = acpi_ds_build_internal_object(walk_state, arg,
855     &obj_desc->
856     diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
857     index e99f0c4..58ad097 100644
858     --- a/drivers/acpi/events/evregion.c
859     +++ b/drivers/acpi/events/evregion.c
860     @@ -344,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
861     * setup will potentially execute control methods
862     * (e.g., _REG method for this region)
863     */
864     - acpi_ex_relinquish_interpreter();
865     + acpi_ex_exit_interpreter();
866    
867     status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
868     handler_desc->address_space.context,
869     @@ -352,7 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
870    
871     /* Re-enter the interpreter */
872    
873     - acpi_ex_reacquire_interpreter();
874     + acpi_ex_enter_interpreter();
875    
876     /* Check for failure of the Region Setup */
877    
878     @@ -405,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
879     * exit the interpreter because the handler *might* block -- we don't
880     * know what it will do, so we can't hold the lock on the intepreter.
881     */
882     - acpi_ex_relinquish_interpreter();
883     + acpi_ex_exit_interpreter();
884     }
885    
886     /* Call the handler */
887     @@ -426,7 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
888     * We just returned from a non-default handler, we must re-enter the
889     * interpreter
890     */
891     - acpi_ex_reacquire_interpreter();
892     + acpi_ex_enter_interpreter();
893     }
894    
895     return_ACPI_STATUS(status);
896     diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
897     index 12c09fa..cd573e4 100644
898     --- a/drivers/acpi/osl.c
899     +++ b/drivers/acpi/osl.c
900     @@ -77,11 +77,55 @@ static struct workqueue_struct *kacpi_notify_wq;
901     #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
902     static char osi_additional_string[OSI_STRING_LENGTH_MAX];
903    
904     -static int osi_linux; /* disable _OSI(Linux) by default */
905     +/*
906     + * "Ode to _OSI(Linux)"
907     + *
908     + * osi_linux -- Control response to BIOS _OSI(Linux) query.
909     + *
910     + * As Linux evolves, the features that it supports change.
911     + * So an OSI string such as "Linux" is not specific enough
912     + * to be useful across multiple versions of Linux. It
913     + * doesn't identify any particular feature, interface,
914     + * or even any particular version of Linux...
915     + *
916     + * Unfortunately, Linux-2.6.22 and earlier responded "yes"
917     + * to a BIOS _OSI(Linux) query. When
918     + * a reference mobile BIOS started using it, its use
919     + * started to spread to many vendor platforms.
920     + * As it is not supportable, we need to halt that spread.
921     + *
922     + * Today, most BIOS references to _OSI(Linux) are noise --
923     + * they have no functional effect and are just dead code
924     + * carried over from the reference BIOS.
925     + *
926     + * The next most common case is that _OSI(Linux) harms Linux,
927     + * usually by causing the BIOS to follow paths that are
928     + * not tested during Windows validation.
929     + *
930     + * Finally, there is a short list of platforms
931     + * where OSI(Linux) benefits Linux.
932     + *
933     + * In Linux-2.6.23, OSI(Linux) is first disabled by default.
934     + * DMI is used to disable the dmesg warning about OSI(Linux)
935     + * on platforms where it is known to have no effect.
936     + * But a dmesg warning remains for systems where
937     + * we do not know if OSI(Linux) is good or bad for the system.
938     + * DMI is also used to enable OSI(Linux) for the machines
939     + * that are known to need it.
940     + *
941     + * BIOS writers should NOT query _OSI(Linux) on future systems.
942     + * It will be ignored by default, and to get Linux to
943     + * not ignore it will require a kernel source update to
944     + * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
945     + */
946     +#define OSI_LINUX_ENABLE 0
947    
948     -#ifdef CONFIG_DMI
949     -static struct __initdata dmi_system_id acpi_osl_dmi_table[];
950     -#endif
951     +static struct osi_linux {
952     + unsigned int enable:1;
953     + unsigned int dmi:1;
954     + unsigned int cmdline:1;
955     + unsigned int known:1;
956     +} osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
957    
958     static void __init acpi_request_region (struct acpi_generic_address *addr,
959     unsigned int length, char *desc)
960     @@ -133,7 +177,6 @@ device_initcall(acpi_reserve_resources);
961    
962     acpi_status __init acpi_os_initialize(void)
963     {
964     - dmi_check_system(acpi_osl_dmi_table);
965     return AE_OK;
966     }
967    
968     @@ -971,13 +1014,37 @@ static int __init acpi_os_name_setup(char *str)
969    
970     __setup("acpi_os_name=", acpi_os_name_setup);
971    
972     -static void enable_osi_linux(int enable) {
973     +static void __init set_osi_linux(unsigned int enable)
974     +{
975     + if (osi_linux.enable != enable) {
976     + osi_linux.enable = enable;
977     + printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
978     + enable ? "Add": "Delet");
979     + }
980     + return;
981     +}
982    
983     - if (osi_linux != enable)
984     - printk(KERN_INFO PREFIX "%sabled _OSI(Linux)\n",
985     - enable ? "En": "Dis");
986     +static void __init acpi_cmdline_osi_linux(unsigned int enable)
987     +{
988     + osi_linux.cmdline = 1; /* cmdline set the default */
989     + set_osi_linux(enable);
990     +
991     + return;
992     +}
993     +
994     +void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
995     +{
996     + osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
997     +
998     + printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
999     +
1000     + if (enable == -1)
1001     + return;
1002     +
1003     + osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */
1004     +
1005     + set_osi_linux(enable);
1006    
1007     - osi_linux = enable;
1008     return;
1009     }
1010    
1011     @@ -994,12 +1061,12 @@ static int __init acpi_osi_setup(char *str)
1012     printk(KERN_INFO PREFIX "_OSI method disabled\n");
1013     acpi_gbl_create_osi_method = FALSE;
1014     } else if (!strcmp("!Linux", str)) {
1015     - enable_osi_linux(0);
1016     + acpi_cmdline_osi_linux(0); /* !enable */
1017     } else if (*str == '!') {
1018     if (acpi_osi_invalidate(++str) == AE_OK)
1019     printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1020     } else if (!strcmp("Linux", str)) {
1021     - enable_osi_linux(1);
1022     + acpi_cmdline_osi_linux(1); /* enable */
1023     } else if (*osi_additional_string == '\0') {
1024     strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1025     printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1026     @@ -1156,6 +1223,34 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1027     return (AE_OK);
1028     }
1029    
1030     +/**
1031     + * acpi_dmi_dump - dump DMI slots needed for blacklist entry
1032     + *
1033     + * Returns 0 on success
1034     + */
1035     +static int acpi_dmi_dump(void)
1036     +{
1037     +
1038     + if (!dmi_available)
1039     + return -1;
1040     +
1041     + printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1042     + dmi_get_system_info(DMI_SYS_VENDOR));
1043     + printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1044     + dmi_get_system_info(DMI_PRODUCT_NAME));
1045     + printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1046     + dmi_get_system_info(DMI_PRODUCT_VERSION));
1047     + printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1048     + dmi_get_system_info(DMI_BOARD_NAME));
1049     + printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1050     + dmi_get_system_info(DMI_BIOS_VENDOR));
1051     + printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1052     + dmi_get_system_info(DMI_BIOS_DATE));
1053     +
1054     + return 0;
1055     +}
1056     +
1057     +
1058     /******************************************************************************
1059     *
1060     * FUNCTION: acpi_os_validate_interface
1061     @@ -1175,13 +1270,29 @@ acpi_os_validate_interface (char *interface)
1062     if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1063     return AE_OK;
1064     if (!strcmp("Linux", interface)) {
1065     - printk(KERN_WARNING PREFIX
1066     - "System BIOS is requesting _OSI(Linux)\n");
1067     - printk(KERN_WARNING PREFIX
1068     - "If \"acpi_osi=Linux\" works better,\n"
1069     - "Please send dmidecode "
1070     - "to linux-acpi@vger.kernel.org\n");
1071     - if(osi_linux)
1072     +
1073     + printk(KERN_NOTICE PREFIX
1074     + "BIOS _OSI(Linux) query %s%s\n",
1075     + osi_linux.enable ? "honored" : "ignored",
1076     + osi_linux.cmdline ? " via cmdline" :
1077     + osi_linux.dmi ? " via DMI" : "");
1078     +
1079     + if (!osi_linux.dmi) {
1080     + if (acpi_dmi_dump())
1081     + printk(KERN_NOTICE PREFIX
1082     + "[please extract dmidecode output]\n");
1083     + printk(KERN_NOTICE PREFIX
1084     + "Please send DMI info above to "
1085     + "linux-acpi@vger.kernel.org\n");
1086     + }
1087     + if (!osi_linux.known && !osi_linux.cmdline) {
1088     + printk(KERN_NOTICE PREFIX
1089     + "If \"acpi_osi=%sLinux\" works better, "
1090     + "please notify linux-acpi@vger.kernel.org\n",
1091     + osi_linux.enable ? "!" : "");
1092     + }
1093     +
1094     + if (osi_linux.enable)
1095     return AE_OK;
1096     }
1097     return AE_SUPPORT;
1098     @@ -1213,28 +1324,4 @@ acpi_os_validate_address (
1099     return AE_OK;
1100     }
1101    
1102     -#ifdef CONFIG_DMI
1103     -static int dmi_osi_linux(struct dmi_system_id *d)
1104     -{
1105     - printk(KERN_NOTICE "%s detected: enabling _OSI(Linux)\n", d->ident);
1106     - enable_osi_linux(1);
1107     - return 0;
1108     -}
1109     -
1110     -static struct dmi_system_id acpi_osl_dmi_table[] __initdata = {
1111     - /*
1112     - * Boxes that need _OSI(Linux)
1113     - */
1114     - {
1115     - .callback = dmi_osi_linux,
1116     - .ident = "Intel Napa CRB",
1117     - .matches = {
1118     - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
1119     - DMI_MATCH(DMI_BOARD_NAME, "MPAD-MSAE Customer Reference Boards"),
1120     - },
1121     - },
1122     - {}
1123     -};
1124     -#endif /* CONFIG_DMI */
1125     -
1126     #endif
1127     diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
1128     index dd3186a..62010c2 100644
1129     --- a/drivers/acpi/pci_irq.c
1130     +++ b/drivers/acpi/pci_irq.c
1131     @@ -429,6 +429,15 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
1132     &polarity, &link,
1133     acpi_pci_allocate_irq);
1134    
1135     + if (irq < 0) {
1136     + /*
1137     + * IDE legacy mode controller IRQs are magic. Why do compat
1138     + * extensions always make such a nasty mess.
1139     + */
1140     + if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
1141     + (dev->class & 0x05) == 0)
1142     + return 0;
1143     + }
1144     /*
1145     * No IRQ known to the ACPI subsystem - maybe the BIOS /
1146     * driver reported one, then use it. Exit in any case.
1147     diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
1148     index dad84c0..9d71f25 100644
1149     --- a/drivers/acpi/video.c
1150     +++ b/drivers/acpi/video.c
1151     @@ -573,7 +573,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1152     struct acpi_video_device_brightness *br = NULL;
1153    
1154    
1155     - memset(&device->cap, 0, 4);
1156     + memset(&device->cap, 0, sizeof(device->cap));
1157    
1158     if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
1159     device->cap._ADR = 1;
1160     @@ -693,7 +693,7 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
1161     {
1162     acpi_handle h_dummy1;
1163    
1164     - memset(&video->cap, 0, 4);
1165     + memset(&video->cap, 0, sizeof(video->cap));
1166     if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
1167     video->cap._DOS = 1;
1168     }
1169     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1170     index 98e33f9..4895a42 100644
1171     --- a/drivers/ata/libata-core.c
1172     +++ b/drivers/ata/libata-core.c
1173     @@ -6121,19 +6121,6 @@ static void ata_host_release(struct device *gendev, void *res)
1174     if (!ap)
1175     continue;
1176    
1177     - if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
1178     - ap->ops->port_stop(ap);
1179     - }
1180     -
1181     - if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
1182     - host->ops->host_stop(host);
1183     -
1184     - for (i = 0; i < host->n_ports; i++) {
1185     - struct ata_port *ap = host->ports[i];
1186     -
1187     - if (!ap)
1188     - continue;
1189     -
1190     if (ap->scsi_host)
1191     scsi_host_put(ap->scsi_host);
1192    
1193     @@ -6258,6 +6245,24 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
1194     return host;
1195     }
1196    
1197     +static void ata_host_stop(struct device *gendev, void *res)
1198     +{
1199     + struct ata_host *host = dev_get_drvdata(gendev);
1200     + int i;
1201     +
1202     + WARN_ON(!(host->flags & ATA_HOST_STARTED));
1203     +
1204     + for (i = 0; i < host->n_ports; i++) {
1205     + struct ata_port *ap = host->ports[i];
1206     +
1207     + if (ap->ops->port_stop)
1208     + ap->ops->port_stop(ap);
1209     + }
1210     +
1211     + if (host->ops->host_stop)
1212     + host->ops->host_stop(host);
1213     +}
1214     +
1215     /**
1216     * ata_host_start - start and freeze ports of an ATA host
1217     * @host: ATA host to start ports for
1218     @@ -6276,6 +6281,8 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
1219     */
1220     int ata_host_start(struct ata_host *host)
1221     {
1222     + int have_stop = 0;
1223     + void *start_dr = NULL;
1224     int i, rc;
1225    
1226     if (host->flags & ATA_HOST_STARTED)
1227     @@ -6287,6 +6294,22 @@ int ata_host_start(struct ata_host *host)
1228     if (!host->ops && !ata_port_is_dummy(ap))
1229     host->ops = ap->ops;
1230    
1231     + if (ap->ops->port_stop)
1232     + have_stop = 1;
1233     + }
1234     +
1235     + if (host->ops->host_stop)
1236     + have_stop = 1;
1237     +
1238     + if (have_stop) {
1239     + start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
1240     + if (!start_dr)
1241     + return -ENOMEM;
1242     + }
1243     +
1244     + for (i = 0; i < host->n_ports; i++) {
1245     + struct ata_port *ap = host->ports[i];
1246     +
1247     if (ap->ops->port_start) {
1248     rc = ap->ops->port_start(ap);
1249     if (rc) {
1250     @@ -6299,6 +6322,8 @@ int ata_host_start(struct ata_host *host)
1251     ata_eh_freeze_port(ap);
1252     }
1253    
1254     + if (start_dr)
1255     + devres_add(host->dev, start_dr);
1256     host->flags |= ATA_HOST_STARTED;
1257     return 0;
1258    
1259     @@ -6309,6 +6334,7 @@ int ata_host_start(struct ata_host *host)
1260     if (ap->ops->port_stop)
1261     ap->ops->port_stop(ap);
1262     }
1263     + devres_free(start_dr);
1264     return rc;
1265     }
1266    
1267     diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
1268     index 25698cf..bab694a 100644
1269     --- a/drivers/ata/sata_promise.c
1270     +++ b/drivers/ata/sata_promise.c
1271     @@ -50,6 +50,7 @@
1272     enum {
1273     PDC_MAX_PORTS = 4,
1274     PDC_MMIO_BAR = 3,
1275     + PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
1276    
1277     /* register offsets */
1278     PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
1279     @@ -155,7 +156,7 @@ static struct scsi_host_template pdc_ata_sht = {
1280     .queuecommand = ata_scsi_queuecmd,
1281     .can_queue = ATA_DEF_QUEUE,
1282     .this_id = ATA_SHT_THIS_ID,
1283     - .sg_tablesize = LIBATA_MAX_PRD,
1284     + .sg_tablesize = PDC_MAX_PRD,
1285     .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
1286     .emulated = ATA_SHT_EMULATED,
1287     .use_clustering = ATA_SHT_USE_CLUSTERING,
1288     @@ -527,6 +528,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
1289     memcpy(buf+31, cdb, cdb_len);
1290     }
1291    
1292     +/**
1293     + * pdc_fill_sg - Fill PCI IDE PRD table
1294     + * @qc: Metadata associated with taskfile to be transferred
1295     + *
1296     + * Fill PCI IDE PRD (scatter-gather) table with segments
1297     + * associated with the current disk command.
1298     + * Make sure hardware does not choke on it.
1299     + *
1300     + * LOCKING:
1301     + * spin_lock_irqsave(host lock)
1302     + *
1303     + */
1304     +static void pdc_fill_sg(struct ata_queued_cmd *qc)
1305     +{
1306     + struct ata_port *ap = qc->ap;
1307     + struct scatterlist *sg;
1308     + unsigned int idx;
1309     + const u32 SG_COUNT_ASIC_BUG = 41*4;
1310     +
1311     + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1312     + return;
1313     +
1314     + WARN_ON(qc->__sg == NULL);
1315     + WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1316     +
1317     + idx = 0;
1318     + ata_for_each_sg(sg, qc) {
1319     + u32 addr, offset;
1320     + u32 sg_len, len;
1321     +
1322     + /* determine if physical DMA addr spans 64K boundary.
1323     + * Note h/w doesn't support 64-bit, so we unconditionally
1324     + * truncate dma_addr_t to u32.
1325     + */
1326     + addr = (u32) sg_dma_address(sg);
1327     + sg_len = sg_dma_len(sg);
1328     +
1329     + while (sg_len) {
1330     + offset = addr & 0xffff;
1331     + len = sg_len;
1332     + if ((offset + sg_len) > 0x10000)
1333     + len = 0x10000 - offset;
1334     +
1335     + ap->prd[idx].addr = cpu_to_le32(addr);
1336     + ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1337     + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1338     +
1339     + idx++;
1340     + sg_len -= len;
1341     + addr += len;
1342     + }
1343     + }
1344     +
1345     + if (idx) {
1346     + u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
1347     +
1348     + if (len > SG_COUNT_ASIC_BUG) {
1349     + u32 addr;
1350     +
1351     + VPRINTK("Splitting last PRD.\n");
1352     +
1353     + addr = le32_to_cpu(ap->prd[idx - 1].addr);
1354     + ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
1355     + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
1356     +
1357     + addr = addr + len - SG_COUNT_ASIC_BUG;
1358     + len = SG_COUNT_ASIC_BUG;
1359     + ap->prd[idx].addr = cpu_to_le32(addr);
1360     + ap->prd[idx].flags_len = cpu_to_le32(len);
1361     + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1362     +
1363     + idx++;
1364     + }
1365     +
1366     + ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1367     + }
1368     +}
1369     +
1370     static void pdc_qc_prep(struct ata_queued_cmd *qc)
1371     {
1372     struct pdc_port_priv *pp = qc->ap->private_data;
1373     @@ -536,7 +615,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
1374    
1375     switch (qc->tf.protocol) {
1376     case ATA_PROT_DMA:
1377     - ata_qc_prep(qc);
1378     + pdc_fill_sg(qc);
1379     /* fall through */
1380    
1381     case ATA_PROT_NODATA:
1382     @@ -552,11 +631,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
1383     break;
1384    
1385     case ATA_PROT_ATAPI:
1386     - ata_qc_prep(qc);
1387     + pdc_fill_sg(qc);
1388     break;
1389    
1390     case ATA_PROT_ATAPI_DMA:
1391     - ata_qc_prep(qc);
1392     + pdc_fill_sg(qc);
1393     /*FALLTHROUGH*/
1394     case ATA_PROT_ATAPI_NODATA:
1395     pdc_atapi_pkt(qc);
1396     diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
1397     index 14ced85..0c205b0 100644
1398     --- a/drivers/atm/nicstar.c
1399     +++ b/drivers/atm/nicstar.c
1400     @@ -625,14 +625,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
1401     if (mac[i] == NULL)
1402     nicstar_init_eprom(card->membase);
1403    
1404     - if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
1405     - {
1406     - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
1407     - error = 9;
1408     - ns_init_card_error(card, error);
1409     - return error;
1410     - }
1411     -
1412     /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
1413     writel(0x00000000, card->membase + VPM);
1414    
1415     @@ -858,8 +850,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
1416     card->iovpool.count++;
1417     }
1418    
1419     - card->intcnt = 0;
1420     -
1421     /* Configure NICStAR */
1422     if (card->rct_size == 4096)
1423     ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
1424     @@ -868,6 +858,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
1425    
1426     card->efbie = 1;
1427    
1428     + card->intcnt = 0;
1429     + if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
1430     + {
1431     + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
1432     + error = 9;
1433     + ns_init_card_error(card, error);
1434     + return error;
1435     + }
1436     +
1437     /* Register device */
1438     card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
1439     if (card->atmdev == NULL)
1440     diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
1441     index ec116df..72183bd 100644
1442     --- a/drivers/char/apm-emulation.c
1443     +++ b/drivers/char/apm-emulation.c
1444     @@ -295,7 +295,6 @@ static int
1445     apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
1446     {
1447     struct apm_user *as = filp->private_data;
1448     - unsigned long flags;
1449     int err = -EINVAL;
1450    
1451     if (!as->suser || !as->writer)
1452     @@ -331,10 +330,16 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
1453     * Wait for the suspend/resume to complete. If there
1454     * are pending acknowledges, we wait here for them.
1455     */
1456     - flags = current->flags;
1457     + freezer_do_not_count();
1458    
1459     wait_event(apm_suspend_waitqueue,
1460     as->suspend_state == SUSPEND_DONE);
1461     +
1462     + /*
1463     + * Since we are waiting until the suspend is done, the
1464     + * try_to_freeze() in freezer_count() will not trigger
1465     + */
1466     + freezer_count();
1467     } else {
1468     as->suspend_state = SUSPEND_WAIT;
1469     mutex_unlock(&state_lock);
1470     @@ -362,14 +367,10 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
1471     * Wait for the suspend/resume to complete. If there
1472     * are pending acknowledges, we wait here for them.
1473     */
1474     - flags = current->flags;
1475     -
1476     - wait_event_interruptible(apm_suspend_waitqueue,
1477     + wait_event_freezable(apm_suspend_waitqueue,
1478     as->suspend_state == SUSPEND_DONE);
1479     }
1480    
1481     - current->flags = flags;
1482     -
1483     mutex_lock(&state_lock);
1484     err = as->suspend_result;
1485     as->suspend_state = SUSPEND_NONE;
1486     diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
1487     index 68e36e5..caa0bce 100644
1488     --- a/drivers/char/drm/drm_vm.c
1489     +++ b/drivers/char/drm/drm_vm.c
1490     @@ -506,6 +506,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
1491     vma->vm_ops = &drm_vm_dma_ops;
1492    
1493     vma->vm_flags |= VM_RESERVED; /* Don't swap */
1494     + vma->vm_flags |= VM_DONTEXPAND;
1495    
1496     vma->vm_file = filp; /* Needed for drm_vm_open() */
1497     drm_vm_open_locked(vma);
1498     @@ -655,6 +656,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
1499     return -EINVAL; /* This should never happen. */
1500     }
1501     vma->vm_flags |= VM_RESERVED; /* Don't swap */
1502     + vma->vm_flags |= VM_DONTEXPAND;
1503    
1504     vma->vm_file = filp; /* Needed for drm_vm_open() */
1505     drm_vm_open_locked(vma);
1506     diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
1507     index 04ac155..ada142a 100644
1508     --- a/drivers/char/mspec.c
1509     +++ b/drivers/char/mspec.c
1510     @@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
1511     vdata->refcnt = ATOMIC_INIT(1);
1512     vma->vm_private_data = vdata;
1513    
1514     - vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
1515     + vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
1516     if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
1517     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1518     vma->vm_ops = &mspec_vm_ops;
1519     diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
1520     index 3ee73cf..d08c301 100644
1521     --- a/drivers/char/tty_ioctl.c
1522     +++ b/drivers/char/tty_ioctl.c
1523     @@ -62,7 +62,7 @@ void tty_wait_until_sent(struct tty_struct * tty, long timeout)
1524     if (!timeout)
1525     timeout = MAX_SCHEDULE_TIMEOUT;
1526     if (wait_event_interruptible_timeout(tty->write_wait,
1527     - !tty->driver->chars_in_buffer(tty), timeout))
1528     + !tty->driver->chars_in_buffer(tty), timeout) < 0)
1529     return;
1530     if (tty->driver->wait_until_sent)
1531     tty->driver->wait_until_sent(tty, timeout);
1532     diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
1533     index 296f510..12ceed5 100644
1534     --- a/drivers/connector/cn_queue.c
1535     +++ b/drivers/connector/cn_queue.c
1536     @@ -99,8 +99,8 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id
1537     spin_unlock_bh(&dev->queue_lock);
1538    
1539     if (found) {
1540     - atomic_dec(&dev->refcnt);
1541     cn_queue_free_callback(cbq);
1542     + atomic_dec(&dev->refcnt);
1543     return -EINVAL;
1544     }
1545    
1546     diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
1547     index d4501dc..e9a23a4 100644
1548     --- a/drivers/crypto/padlock-aes.c
1549     +++ b/drivers/crypto/padlock-aes.c
1550     @@ -419,13 +419,58 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
1551     /* ====== Encryption/decryption routines ====== */
1552    
1553     /* These are the real call to PadLock. */
1554     +static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
1555     + void *control_word)
1556     +{
1557     + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
1558     + : "+S"(input), "+D"(output)
1559     + : "d"(control_word), "b"(key), "c"(1));
1560     +}
1561     +
1562     +static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
1563     +{
1564     + u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
1565     + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
1566     +
1567     + memcpy(tmp, in, AES_BLOCK_SIZE);
1568     + padlock_xcrypt(tmp, out, key, cword);
1569     +}
1570     +
1571     +static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
1572     + struct cword *cword)
1573     +{
1574     + asm volatile ("pushfl; popfl");
1575     +
1576     + /* padlock_xcrypt requires at least two blocks of data. */
1577     + if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
1578     + (PAGE_SIZE - 1)))) {
1579     + aes_crypt_copy(in, out, key, cword);
1580     + return;
1581     + }
1582     +
1583     + padlock_xcrypt(in, out, key, cword);
1584     +}
1585     +
1586     static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
1587     void *control_word, u32 count)
1588     {
1589     + if (count == 1) {
1590     + aes_crypt(input, output, key, control_word);
1591     + return;
1592     + }
1593     +
1594     asm volatile ("pushfl; popfl"); /* enforce key reload. */
1595     - asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
1596     + asm volatile ("test $1, %%cl;"
1597     + "je 1f;"
1598     + "lea -1(%%ecx), %%eax;"
1599     + "mov $1, %%ecx;"
1600     + ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
1601     + "mov %%eax, %%ecx;"
1602     + "1:"
1603     + ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
1604     : "+S"(input), "+D"(output)
1605     - : "d"(control_word), "b"(key), "c"(count));
1606     + : "d"(control_word), "b"(key), "c"(count)
1607     + : "ax");
1608     }
1609    
1610     static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
1611     @@ -443,13 +488,13 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
1612     static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1613     {
1614     struct aes_ctx *ctx = aes_ctx(tfm);
1615     - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
1616     + aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
1617     }
1618    
1619     static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1620     {
1621     struct aes_ctx *ctx = aes_ctx(tfm);
1622     - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
1623     + aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
1624     }
1625    
1626     static struct crypto_alg aes_alg = {
1627     diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
1628     index 59c3b5a..ed0d030 100644
1629     --- a/drivers/firmware/dmi-id.c
1630     +++ b/drivers/firmware/dmi-id.c
1631     @@ -159,8 +159,6 @@ static struct device *dmi_dev;
1632     if (dmi_get_system_info(_field)) \
1633     sys_dmi_attributes[i++] = & sys_dmi_##_name##_attr.attr;
1634    
1635     -extern int dmi_available;
1636     -
1637     static int __init dmi_id_init(void)
1638     {
1639     int ret, i;
1640     diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
1641     index f1c3d6c..27026f7 100644
1642     --- a/drivers/input/evdev.c
1643     +++ b/drivers/input/evdev.c
1644     @@ -30,6 +30,8 @@ struct evdev {
1645     wait_queue_head_t wait;
1646     struct evdev_client *grab;
1647     struct list_head client_list;
1648     + spinlock_t client_lock; /* protects client_list */
1649     + struct mutex mutex;
1650     struct device dev;
1651     };
1652    
1653     @@ -37,39 +39,53 @@ struct evdev_client {
1654     struct input_event buffer[EVDEV_BUFFER_SIZE];
1655     int head;
1656     int tail;
1657     + spinlock_t buffer_lock; /* protects access to buffer, head and tail */
1658     struct fasync_struct *fasync;
1659     struct evdev *evdev;
1660     struct list_head node;
1661     };
1662    
1663     static struct evdev *evdev_table[EVDEV_MINORS];
1664     +static DEFINE_MUTEX(evdev_table_mutex);
1665    
1666     -static void evdev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
1667     +static void evdev_pass_event(struct evdev_client *client,
1668     + struct input_event *event)
1669     +{
1670     + /*
1671     + * Interrupts are disabled, just acquire the lock
1672     + */
1673     + spin_lock(&client->buffer_lock);
1674     + client->buffer[client->head++] = *event;
1675     + client->head &= EVDEV_BUFFER_SIZE - 1;
1676     + spin_unlock(&client->buffer_lock);
1677     +
1678     + kill_fasync(&client->fasync, SIGIO, POLL_IN);
1679     +}
1680     +
1681     +/*
1682     + * Pass incoming event to all connected clients. Note that we are
1683     + * caleld under a spinlock with interrupts off so we don't need
1684     + * to use rcu_read_lock() here. Writers will be using syncronize_sched()
1685     + * instead of synchrnoize_rcu().
1686     + */
1687     +static void evdev_event(struct input_handle *handle,
1688     + unsigned int type, unsigned int code, int value)
1689     {
1690     struct evdev *evdev = handle->private;
1691     struct evdev_client *client;
1692     + struct input_event event;
1693    
1694     - if (evdev->grab) {
1695     - client = evdev->grab;
1696     -
1697     - do_gettimeofday(&client->buffer[client->head].time);
1698     - client->buffer[client->head].type = type;
1699     - client->buffer[client->head].code = code;
1700     - client->buffer[client->head].value = value;
1701     - client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1);
1702     -
1703     - kill_fasync(&client->fasync, SIGIO, POLL_IN);
1704     - } else
1705     - list_for_each_entry(client, &evdev->client_list, node) {
1706     -
1707     - do_gettimeofday(&client->buffer[client->head].time);
1708     - client->buffer[client->head].type = type;
1709     - client->buffer[client->head].code = code;
1710     - client->buffer[client->head].value = value;
1711     - client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1);
1712     + do_gettimeofday(&event.time);
1713     + event.type = type;
1714     + event.code = code;
1715     + event.value = value;
1716    
1717     - kill_fasync(&client->fasync, SIGIO, POLL_IN);
1718     - }
1719     + client = rcu_dereference(evdev->grab);
1720     + if (client)
1721     + evdev_pass_event(client, &event);
1722     + else
1723     + list_for_each_entry_rcu(client, &evdev->client_list, node)
1724     + evdev_pass_event(client, &event);
1725    
1726     wake_up_interruptible(&evdev->wait);
1727     }
1728     @@ -88,38 +104,145 @@ static int evdev_flush(struct file *file, fl_owner_t id)
1729     {
1730     struct evdev_client *client = file->private_data;
1731     struct evdev *evdev = client->evdev;
1732     + int retval;
1733     +
1734     + retval = mutex_lock_interruptible(&evdev->mutex);
1735     + if (retval)
1736     + return retval;
1737    
1738     if (!evdev->exist)
1739     - return -ENODEV;
1740     + retval = -ENODEV;
1741     + else
1742     + retval = input_flush_device(&evdev->handle, file);
1743    
1744     - return input_flush_device(&evdev->handle, file);
1745     + mutex_unlock(&evdev->mutex);
1746     + return retval;
1747     }
1748    
1749     static void evdev_free(struct device *dev)
1750     {
1751     struct evdev *evdev = container_of(dev, struct evdev, dev);
1752    
1753     - evdev_table[evdev->minor] = NULL;
1754     kfree(evdev);
1755     }
1756    
1757     +/*
1758     + * Grabs an event device (along with underlying input device).
1759     + * This function is called with evdev->mutex taken.
1760     + */
1761     +static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
1762     +{
1763     + int error;
1764     +
1765     + if (evdev->grab)
1766     + return -EBUSY;
1767     +
1768     + error = input_grab_device(&evdev->handle);
1769     + if (error)
1770     + return error;
1771     +
1772     + rcu_assign_pointer(evdev->grab, client);
1773     + /*
1774     + * We don't use synchronize_rcu() here because read-side
1775     + * critical section is protected by a spinlock instead
1776     + * of rcu_read_lock().
1777     + */
1778     + synchronize_sched();
1779     +
1780     + return 0;
1781     +}
1782     +
1783     +static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
1784     +{
1785     + if (evdev->grab != client)
1786     + return -EINVAL;
1787     +
1788     + rcu_assign_pointer(evdev->grab, NULL);
1789     + synchronize_sched();
1790     + input_release_device(&evdev->handle);
1791     +
1792     + return 0;
1793     +}
1794     +
1795     +static void evdev_attach_client(struct evdev *evdev,
1796     + struct evdev_client *client)
1797     +{
1798     + spin_lock(&evdev->client_lock);
1799     + list_add_tail_rcu(&client->node, &evdev->client_list);
1800     + spin_unlock(&evdev->client_lock);
1801     + synchronize_sched();
1802     +}
1803     +
1804     +static void evdev_detach_client(struct evdev *evdev,
1805     + struct evdev_client *client)
1806     +{
1807     + spin_lock(&evdev->client_lock);
1808     + list_del_rcu(&client->node);
1809     + spin_unlock(&evdev->client_lock);
1810     + synchronize_sched();
1811     +}
1812     +
1813     +static int evdev_open_device(struct evdev *evdev)
1814     +{
1815     + int retval;
1816     +
1817     + retval = mutex_lock_interruptible(&evdev->mutex);
1818     + if (retval)
1819     + return retval;
1820     +
1821     + if (!evdev->exist)
1822     + retval = -ENODEV;
1823     + else if (!evdev->open++) {
1824     + retval = input_open_device(&evdev->handle);
1825     + if (retval)
1826     + evdev->open--;
1827     + }
1828     +
1829     + mutex_unlock(&evdev->mutex);
1830     + return retval;
1831     +}
1832     +
1833     +static void evdev_close_device(struct evdev *evdev)
1834     +{
1835     + mutex_lock(&evdev->mutex);
1836     +
1837     + if (evdev->exist && !--evdev->open)
1838     + input_close_device(&evdev->handle);
1839     +
1840     + mutex_unlock(&evdev->mutex);
1841     +}
1842     +
1843     +/*
1844     + * Wake up users waiting for IO so they can disconnect from
1845     + * dead device.
1846     + */
1847     +static void evdev_hangup(struct evdev *evdev)
1848     +{
1849     + struct evdev_client *client;
1850     +
1851     + spin_lock(&evdev->client_lock);
1852     + list_for_each_entry(client, &evdev->client_list, node)
1853     + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
1854     + spin_unlock(&evdev->client_lock);
1855     +
1856     + wake_up_interruptible(&evdev->wait);
1857     +}
1858     +
1859     static int evdev_release(struct inode *inode, struct file *file)
1860     {
1861     struct evdev_client *client = file->private_data;
1862     struct evdev *evdev = client->evdev;
1863    
1864     - if (evdev->grab == client) {
1865     - input_release_device(&evdev->handle);
1866     - evdev->grab = NULL;
1867     - }
1868     + mutex_lock(&evdev->mutex);
1869     + if (evdev->grab == client)
1870     + evdev_ungrab(evdev, client);
1871     + mutex_unlock(&evdev->mutex);
1872    
1873     evdev_fasync(-1, file, 0);
1874     - list_del(&client->node);
1875     + evdev_detach_client(evdev, client);
1876     kfree(client);
1877    
1878     - if (!--evdev->open && evdev->exist)
1879     - input_close_device(&evdev->handle);
1880     -
1881     + evdev_close_device(evdev);
1882     put_device(&evdev->dev);
1883    
1884     return 0;
1885     @@ -127,41 +250,44 @@ static int evdev_release(struct inode *inode, struct file *file)
1886    
1887     static int evdev_open(struct inode *inode, struct file *file)
1888     {
1889     - struct evdev_client *client;
1890     struct evdev *evdev;
1891     + struct evdev_client *client;
1892     int i = iminor(inode) - EVDEV_MINOR_BASE;
1893     int error;
1894    
1895     if (i >= EVDEV_MINORS)
1896     return -ENODEV;
1897    
1898     + error = mutex_lock_interruptible(&evdev_table_mutex);
1899     + if (error)
1900     + return error;
1901     evdev = evdev_table[i];
1902     + if (evdev)
1903     + get_device(&evdev->dev);
1904     + mutex_unlock(&evdev_table_mutex);
1905    
1906     - if (!evdev || !evdev->exist)
1907     + if (!evdev)
1908     return -ENODEV;
1909    
1910     - get_device(&evdev->dev);
1911     -
1912     client = kzalloc(sizeof(struct evdev_client), GFP_KERNEL);
1913     if (!client) {
1914     error = -ENOMEM;
1915     goto err_put_evdev;
1916     }
1917    
1918     + spin_lock_init(&client->buffer_lock);
1919     client->evdev = evdev;
1920     - list_add_tail(&client->node, &evdev->client_list);
1921     + evdev_attach_client(evdev, client);
1922    
1923     - if (!evdev->open++ && evdev->exist) {
1924     - error = input_open_device(&evdev->handle);
1925     - if (error)
1926     - goto err_free_client;
1927     - }
1928     + error = evdev_open_device(evdev);
1929     + if (error)
1930     + goto err_free_client;
1931    
1932     file->private_data = client;
1933     return 0;
1934    
1935     err_free_client:
1936     - list_del(&client->node);
1937     + evdev_detach_client(evdev, client);
1938     kfree(client);
1939     err_put_evdev:
1940     put_device(&evdev->dev);
1941     @@ -197,12 +323,14 @@ static inline size_t evdev_event_size(void)
1942     sizeof(struct input_event_compat) : sizeof(struct input_event);
1943     }
1944    
1945     -static int evdev_event_from_user(const char __user *buffer, struct input_event *event)
1946     +static int evdev_event_from_user(const char __user *buffer,
1947     + struct input_event *event)
1948     {
1949     if (COMPAT_TEST) {
1950     struct input_event_compat compat_event;
1951    
1952     - if (copy_from_user(&compat_event, buffer, sizeof(struct input_event_compat)))
1953     + if (copy_from_user(&compat_event, buffer,
1954     + sizeof(struct input_event_compat)))
1955     return -EFAULT;
1956    
1957     event->time.tv_sec = compat_event.time.tv_sec;
1958     @@ -219,7 +347,8 @@ static int evdev_event_from_user(const char __user *buffer, struct input_event *
1959     return 0;
1960     }
1961    
1962     -static int evdev_event_to_user(char __user *buffer, const struct input_event *event)
1963     +static int evdev_event_to_user(char __user *buffer,
1964     + const struct input_event *event)
1965     {
1966     if (COMPAT_TEST) {
1967     struct input_event_compat compat_event;
1968     @@ -230,7 +359,8 @@ static int evdev_event_to_user(char __user *buffer, const struct input_event *ev
1969     compat_event.code = event->code;
1970     compat_event.value = event->value;
1971    
1972     - if (copy_to_user(buffer, &compat_event, sizeof(struct input_event_compat)))
1973     + if (copy_to_user(buffer, &compat_event,
1974     + sizeof(struct input_event_compat)))
1975     return -EFAULT;
1976    
1977     } else {
1978     @@ -248,7 +378,8 @@ static inline size_t evdev_event_size(void)
1979     return sizeof(struct input_event);
1980     }
1981    
1982     -static int evdev_event_from_user(const char __user *buffer, struct input_event *event)
1983     +static int evdev_event_from_user(const char __user *buffer,
1984     + struct input_event *event)
1985     {
1986     if (copy_from_user(event, buffer, sizeof(struct input_event)))
1987     return -EFAULT;
1988     @@ -256,7 +387,8 @@ static int evdev_event_from_user(const char __user *buffer, struct input_event *
1989     return 0;
1990     }
1991    
1992     -static int evdev_event_to_user(char __user *buffer, const struct input_event *event)
1993     +static int evdev_event_to_user(char __user *buffer,
1994     + const struct input_event *event)
1995     {
1996     if (copy_to_user(buffer, event, sizeof(struct input_event)))
1997     return -EFAULT;
1998     @@ -266,37 +398,71 @@ static int evdev_event_to_user(char __user *buffer, const struct input_event *ev
1999    
2000     #endif /* CONFIG_COMPAT */
2001    
2002     -static ssize_t evdev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
2003     +static ssize_t evdev_write(struct file *file, const char __user *buffer,
2004     + size_t count, loff_t *ppos)
2005     {
2006     struct evdev_client *client = file->private_data;
2007     struct evdev *evdev = client->evdev;
2008     struct input_event event;
2009     - int retval = 0;
2010     + int retval;
2011    
2012     - if (!evdev->exist)
2013     - return -ENODEV;
2014     + retval = mutex_lock_interruptible(&evdev->mutex);
2015     + if (retval)
2016     + return retval;
2017     +
2018     + if (!evdev->exist) {
2019     + retval = -ENODEV;
2020     + goto out;
2021     + }
2022    
2023     while (retval < count) {
2024    
2025     - if (evdev_event_from_user(buffer + retval, &event))
2026     - return -EFAULT;
2027     - input_inject_event(&evdev->handle, event.type, event.code, event.value);
2028     + if (evdev_event_from_user(buffer + retval, &event)) {
2029     + retval = -EFAULT;
2030     + goto out;
2031     + }
2032     +
2033     + input_inject_event(&evdev->handle,
2034     + event.type, event.code, event.value);
2035     retval += evdev_event_size();
2036     }
2037    
2038     + out:
2039     + mutex_unlock(&evdev->mutex);
2040     return retval;
2041     }
2042    
2043     -static ssize_t evdev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
2044     +static int evdev_fetch_next_event(struct evdev_client *client,
2045     + struct input_event *event)
2046     +{
2047     + int have_event;
2048     +
2049     + spin_lock_irq(&client->buffer_lock);
2050     +
2051     + have_event = client->head != client->tail;
2052     + if (have_event) {
2053     + *event = client->buffer[client->tail++];
2054     + client->tail &= EVDEV_BUFFER_SIZE - 1;
2055     + }
2056     +
2057     + spin_unlock_irq(&client->buffer_lock);
2058     +
2059     + return have_event;
2060     +}
2061     +
2062     +static ssize_t evdev_read(struct file *file, char __user *buffer,
2063     + size_t count, loff_t *ppos)
2064     {
2065     struct evdev_client *client = file->private_data;
2066     struct evdev *evdev = client->evdev;
2067     + struct input_event event;
2068     int retval;
2069    
2070     if (count < evdev_event_size())
2071     return -EINVAL;
2072    
2073     - if (client->head == client->tail && evdev->exist && (file->f_flags & O_NONBLOCK))
2074     + if (client->head == client->tail && evdev->exist &&
2075     + (file->f_flags & O_NONBLOCK))
2076     return -EAGAIN;
2077    
2078     retval = wait_event_interruptible(evdev->wait,
2079     @@ -307,14 +473,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer, size_t count,
2080     if (!evdev->exist)
2081     return -ENODEV;
2082    
2083     - while (client->head != client->tail && retval + evdev_event_size() <= count) {
2084     + while (retval + evdev_event_size() <= count &&
2085     + evdev_fetch_next_event(client, &event)) {
2086    
2087     - struct input_event *event = (struct input_event *) client->buffer + client->tail;
2088     -
2089     - if (evdev_event_to_user(buffer + retval, event))
2090     + if (evdev_event_to_user(buffer + retval, &event))
2091     return -EFAULT;
2092    
2093     - client->tail = (client->tail + 1) & (EVDEV_BUFFER_SIZE - 1);
2094     retval += evdev_event_size();
2095     }
2096    
2097     @@ -409,8 +573,8 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
2098     return copy_to_user(p, str, len) ? -EFAULT : len;
2099     }
2100    
2101     -static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
2102     - void __user *p, int compat_mode)
2103     +static long evdev_do_ioctl(struct file *file, unsigned int cmd,
2104     + void __user *p, int compat_mode)
2105     {
2106     struct evdev_client *client = file->private_data;
2107     struct evdev *evdev = client->evdev;
2108     @@ -421,215 +585,289 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
2109     int i, t, u, v;
2110     int error;
2111    
2112     - if (!evdev->exist)
2113     - return -ENODEV;
2114     -
2115     switch (cmd) {
2116    
2117     - case EVIOCGVERSION:
2118     - return put_user(EV_VERSION, ip);
2119     + case EVIOCGVERSION:
2120     + return put_user(EV_VERSION, ip);
2121    
2122     - case EVIOCGID:
2123     - if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
2124     - return -EFAULT;
2125     - return 0;
2126     + case EVIOCGID:
2127     + if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
2128     + return -EFAULT;
2129     + return 0;
2130    
2131     - case EVIOCGREP:
2132     - if (!test_bit(EV_REP, dev->evbit))
2133     - return -ENOSYS;
2134     - if (put_user(dev->rep[REP_DELAY], ip))
2135     - return -EFAULT;
2136     - if (put_user(dev->rep[REP_PERIOD], ip + 1))
2137     - return -EFAULT;
2138     - return 0;
2139     + case EVIOCGREP:
2140     + if (!test_bit(EV_REP, dev->evbit))
2141     + return -ENOSYS;
2142     + if (put_user(dev->rep[REP_DELAY], ip))
2143     + return -EFAULT;
2144     + if (put_user(dev->rep[REP_PERIOD], ip + 1))
2145     + return -EFAULT;
2146     + return 0;
2147    
2148     - case EVIOCSREP:
2149     - if (!test_bit(EV_REP, dev->evbit))
2150     - return -ENOSYS;
2151     - if (get_user(u, ip))
2152     - return -EFAULT;
2153     - if (get_user(v, ip + 1))
2154     - return -EFAULT;
2155     + case EVIOCSREP:
2156     + if (!test_bit(EV_REP, dev->evbit))
2157     + return -ENOSYS;
2158     + if (get_user(u, ip))
2159     + return -EFAULT;
2160     + if (get_user(v, ip + 1))
2161     + return -EFAULT;
2162    
2163     - input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
2164     - input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
2165     + input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u);
2166     + input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v);
2167    
2168     - return 0;
2169     + return 0;
2170    
2171     - case EVIOCGKEYCODE:
2172     - if (get_user(t, ip))
2173     - return -EFAULT;
2174     + case EVIOCGKEYCODE:
2175     + if (get_user(t, ip))
2176     + return -EFAULT;
2177    
2178     - error = dev->getkeycode(dev, t, &v);
2179     - if (error)
2180     - return error;
2181     + error = dev->getkeycode(dev, t, &v);
2182     + if (error)
2183     + return error;
2184    
2185     - if (put_user(v, ip + 1))
2186     - return -EFAULT;
2187     + if (put_user(v, ip + 1))
2188     + return -EFAULT;
2189    
2190     - return 0;
2191     + return 0;
2192    
2193     - case EVIOCSKEYCODE:
2194     - if (get_user(t, ip) || get_user(v, ip + 1))
2195     - return -EFAULT;
2196     + case EVIOCSKEYCODE:
2197     + if (get_user(t, ip) || get_user(v, ip + 1))
2198     + return -EFAULT;
2199    
2200     - return dev->setkeycode(dev, t, v);
2201     + return dev->setkeycode(dev, t, v);
2202    
2203     - case EVIOCSFF:
2204     - if (copy_from_user(&effect, p, sizeof(effect)))
2205     - return -EFAULT;
2206     + case EVIOCSFF:
2207     + if (copy_from_user(&effect, p, sizeof(effect)))
2208     + return -EFAULT;
2209    
2210     - error = input_ff_upload(dev, &effect, file);
2211     + error = input_ff_upload(dev, &effect, file);
2212    
2213     - if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
2214     - return -EFAULT;
2215     + if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
2216     + return -EFAULT;
2217    
2218     - return error;
2219     + return error;
2220    
2221     - case EVIOCRMFF:
2222     - return input_ff_erase(dev, (int)(unsigned long) p, file);
2223     + case EVIOCRMFF:
2224     + return input_ff_erase(dev, (int)(unsigned long) p, file);
2225    
2226     - case EVIOCGEFFECTS:
2227     - i = test_bit(EV_FF, dev->evbit) ? dev->ff->max_effects : 0;
2228     - if (put_user(i, ip))
2229     - return -EFAULT;
2230     - return 0;
2231     -
2232     - case EVIOCGRAB:
2233     - if (p) {
2234     - if (evdev->grab)
2235     - return -EBUSY;
2236     - if (input_grab_device(&evdev->handle))
2237     - return -EBUSY;
2238     - evdev->grab = client;
2239     - return 0;
2240     - } else {
2241     - if (evdev->grab != client)
2242     - return -EINVAL;
2243     - input_release_device(&evdev->handle);
2244     - evdev->grab = NULL;
2245     - return 0;
2246     - }
2247     + case EVIOCGEFFECTS:
2248     + i = test_bit(EV_FF, dev->evbit) ?
2249     + dev->ff->max_effects : 0;
2250     + if (put_user(i, ip))
2251     + return -EFAULT;
2252     + return 0;
2253    
2254     - default:
2255     + case EVIOCGRAB:
2256     + if (p)
2257     + return evdev_grab(evdev, client);
2258     + else
2259     + return evdev_ungrab(evdev, client);
2260    
2261     - if (_IOC_TYPE(cmd) != 'E')
2262     - return -EINVAL;
2263     + default:
2264    
2265     - if (_IOC_DIR(cmd) == _IOC_READ) {
2266     + if (_IOC_TYPE(cmd) != 'E')
2267     + return -EINVAL;
2268    
2269     - if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) {
2270     + if (_IOC_DIR(cmd) == _IOC_READ) {
2271    
2272     - unsigned long *bits;
2273     - int len;
2274     + if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) {
2275    
2276     - switch (_IOC_NR(cmd) & EV_MAX) {
2277     - case 0: bits = dev->evbit; len = EV_MAX; break;
2278     - case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
2279     - case EV_REL: bits = dev->relbit; len = REL_MAX; break;
2280     - case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
2281     - case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
2282     - case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
2283     - case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
2284     - case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
2285     - case EV_SW: bits = dev->swbit; len = SW_MAX; break;
2286     - default: return -EINVAL;
2287     - }
2288     - return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
2289     - }
2290     + unsigned long *bits;
2291     + int len;
2292    
2293     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
2294     - return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
2295     - p, compat_mode);
2296     + switch (_IOC_NR(cmd) & EV_MAX) {
2297    
2298     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0)))
2299     - return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd),
2300     - p, compat_mode);
2301     + case 0: bits = dev->evbit; len = EV_MAX; break;
2302     + case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
2303     + case EV_REL: bits = dev->relbit; len = REL_MAX; break;
2304     + case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
2305     + case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
2306     + case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
2307     + case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
2308     + case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
2309     + case EV_SW: bits = dev->swbit; len = SW_MAX; break;
2310     + default: return -EINVAL;
2311     + }
2312     + return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
2313     + }
2314    
2315     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0)))
2316     - return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd),
2317     - p, compat_mode);
2318     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
2319     + return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
2320     + p, compat_mode);
2321    
2322     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0)))
2323     - return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd),
2324     - p, compat_mode);
2325     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0)))
2326     + return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd),
2327     + p, compat_mode);
2328    
2329     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
2330     - return str_to_user(dev->name, _IOC_SIZE(cmd), p);
2331     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0)))
2332     + return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd),
2333     + p, compat_mode);
2334    
2335     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
2336     - return str_to_user(dev->phys, _IOC_SIZE(cmd), p);
2337     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0)))
2338     + return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd),
2339     + p, compat_mode);
2340    
2341     - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0)))
2342     - return str_to_user(dev->uniq, _IOC_SIZE(cmd), p);
2343     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0)))
2344     + return str_to_user(dev->name, _IOC_SIZE(cmd), p);
2345    
2346     - if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
2347     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0)))
2348     + return str_to_user(dev->phys, _IOC_SIZE(cmd), p);
2349    
2350     - t = _IOC_NR(cmd) & ABS_MAX;
2351     + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0)))
2352     + return str_to_user(dev->uniq, _IOC_SIZE(cmd), p);
2353    
2354     - abs.value = dev->abs[t];
2355     - abs.minimum = dev->absmin[t];
2356     - abs.maximum = dev->absmax[t];
2357     - abs.fuzz = dev->absfuzz[t];
2358     - abs.flat = dev->absflat[t];
2359     + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) {
2360    
2361     - if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
2362     - return -EFAULT;
2363     + t = _IOC_NR(cmd) & ABS_MAX;
2364    
2365     - return 0;
2366     - }
2367     + abs.value = dev->abs[t];
2368     + abs.minimum = dev->absmin[t];
2369     + abs.maximum = dev->absmax[t];
2370     + abs.fuzz = dev->absfuzz[t];
2371     + abs.flat = dev->absflat[t];
2372    
2373     + if (copy_to_user(p, &abs, sizeof(struct input_absinfo)))
2374     + return -EFAULT;
2375     +
2376     + return 0;
2377     }
2378    
2379     - if (_IOC_DIR(cmd) == _IOC_WRITE) {
2380     + }
2381    
2382     - if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
2383     + if (_IOC_DIR(cmd) == _IOC_WRITE) {
2384    
2385     - t = _IOC_NR(cmd) & ABS_MAX;
2386     + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) {
2387    
2388     - if (copy_from_user(&abs, p, sizeof(struct input_absinfo)))
2389     - return -EFAULT;
2390     + t = _IOC_NR(cmd) & ABS_MAX;
2391    
2392     - dev->abs[t] = abs.value;
2393     - dev->absmin[t] = abs.minimum;
2394     - dev->absmax[t] = abs.maximum;
2395     - dev->absfuzz[t] = abs.fuzz;
2396     - dev->absflat[t] = abs.flat;
2397     + if (copy_from_user(&abs, p,
2398     + sizeof(struct input_absinfo)))
2399     + return -EFAULT;
2400    
2401     - return 0;
2402     - }
2403     + /*
2404     + * Take event lock to ensure that we are not
2405     + * changing device parameters in the middle
2406     + * of event.
2407     + */
2408     + spin_lock_irq(&dev->event_lock);
2409     +
2410     + dev->abs[t] = abs.value;
2411     + dev->absmin[t] = abs.minimum;
2412     + dev->absmax[t] = abs.maximum;
2413     + dev->absfuzz[t] = abs.fuzz;
2414     + dev->absflat[t] = abs.flat;
2415     +
2416     + spin_unlock_irq(&dev->event_lock);
2417     +
2418     + return 0;
2419     }
2420     + }
2421     }
2422     return -EINVAL;
2423     }
2424    
2425     +static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
2426     + void __user *p, int compat_mode)
2427     +{
2428     + struct evdev_client *client = file->private_data;
2429     + struct evdev *evdev = client->evdev;
2430     + int retval;
2431     +
2432     + retval = mutex_lock_interruptible(&evdev->mutex);
2433     + if (retval)
2434     + return retval;
2435     +
2436     + if (!evdev->exist) {
2437     + retval = -ENODEV;
2438     + goto out;
2439     + }
2440     +
2441     + retval = evdev_do_ioctl(file, cmd, p, compat_mode);
2442     +
2443     + out:
2444     + mutex_unlock(&evdev->mutex);
2445     + return retval;
2446     +}
2447     +
2448     static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2449     {
2450     return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0);
2451     }
2452    
2453     #ifdef CONFIG_COMPAT
2454     -static long evdev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
2455     +static long evdev_ioctl_compat(struct file *file,
2456     + unsigned int cmd, unsigned long arg)
2457     {
2458     return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1);
2459     }
2460     #endif
2461    
2462     static const struct file_operations evdev_fops = {
2463     - .owner = THIS_MODULE,
2464     - .read = evdev_read,
2465     - .write = evdev_write,
2466     - .poll = evdev_poll,
2467     - .open = evdev_open,
2468     - .release = evdev_release,
2469     - .unlocked_ioctl = evdev_ioctl,
2470     + .owner = THIS_MODULE,
2471     + .read = evdev_read,
2472     + .write = evdev_write,
2473     + .poll = evdev_poll,
2474     + .open = evdev_open,
2475     + .release = evdev_release,
2476     + .unlocked_ioctl = evdev_ioctl,
2477     #ifdef CONFIG_COMPAT
2478     - .compat_ioctl = evdev_ioctl_compat,
2479     + .compat_ioctl = evdev_ioctl_compat,
2480     #endif
2481     - .fasync = evdev_fasync,
2482     - .flush = evdev_flush
2483     + .fasync = evdev_fasync,
2484     + .flush = evdev_flush
2485     };
2486    
2487     +static int evdev_install_chrdev(struct evdev *evdev)
2488     +{
2489     + /*
2490     + * No need to do any locking here as calls to connect and
2491     + * disconnect are serialized by the input core
2492     + */
2493     + evdev_table[evdev->minor] = evdev;
2494     + return 0;
2495     +}
2496     +
2497     +static void evdev_remove_chrdev(struct evdev *evdev)
2498     +{
2499     + /*
2500     + * Lock evdev table to prevent race with evdev_open()
2501     + */
2502     + mutex_lock(&evdev_table_mutex);
2503     + evdev_table[evdev->minor] = NULL;
2504     + mutex_unlock(&evdev_table_mutex);
2505     +}
2506     +
2507     +/*
2508     + * Mark device non-existent. This disables writes, ioctls and
2509     + * prevents new users from opening the device. Already posted
2510     + * blocking reads will stay, however new ones will fail.
2511     + */
2512     +static void evdev_mark_dead(struct evdev *evdev)
2513     +{
2514     + mutex_lock(&evdev->mutex);
2515     + evdev->exist = 0;
2516     + mutex_unlock(&evdev->mutex);
2517     +}
2518     +
2519     +static void evdev_cleanup(struct evdev *evdev)
2520     +{
2521     + struct input_handle *handle = &evdev->handle;
2522     +
2523     + evdev_mark_dead(evdev);
2524     + evdev_hangup(evdev);
2525     + evdev_remove_chrdev(evdev);
2526     +
2527     + /* evdev is marked dead so no one else accesses evdev->open */
2528     + if (evdev->open) {
2529     + input_flush_device(handle, NULL);
2530     + input_close_device(handle);
2531     + }
2532     +}
2533     +
2534     +/*
2535     + * Create new evdev device. Note that input core serializes calls
2536     + * to connect and disconnect so we don't need to lock evdev_table here.
2537     + */
2538     static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
2539     const struct input_device_id *id)
2540     {
2541     @@ -637,7 +875,10 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
2542     int minor;
2543     int error;
2544    
2545     - for (minor = 0; minor < EVDEV_MINORS && evdev_table[minor]; minor++);
2546     + for (minor = 0; minor < EVDEV_MINORS; minor++)
2547     + if (!evdev_table[minor])
2548     + break;
2549     +
2550     if (minor == EVDEV_MINORS) {
2551     printk(KERN_ERR "evdev: no more free evdev devices\n");
2552     return -ENFILE;
2553     @@ -648,38 +889,44 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
2554     return -ENOMEM;
2555    
2556     INIT_LIST_HEAD(&evdev->client_list);
2557     + spin_lock_init(&evdev->client_lock);
2558     + mutex_init(&evdev->mutex);
2559     init_waitqueue_head(&evdev->wait);
2560    
2561     + snprintf(evdev->name, sizeof(evdev->name), "event%d", minor);
2562     evdev->exist = 1;
2563     evdev->minor = minor;
2564     +
2565     evdev->handle.dev = dev;
2566     evdev->handle.name = evdev->name;
2567     evdev->handle.handler = handler;
2568     evdev->handle.private = evdev;
2569     - snprintf(evdev->name, sizeof(evdev->name), "event%d", minor);
2570    
2571     - snprintf(evdev->dev.bus_id, sizeof(evdev->dev.bus_id),
2572     - "event%d", minor);
2573     + strlcpy(evdev->dev.bus_id, evdev->name, sizeof(evdev->dev.bus_id));
2574     + evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor);
2575     evdev->dev.class = &input_class;
2576     evdev->dev.parent = &dev->dev;
2577     - evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor);
2578     evdev->dev.release = evdev_free;
2579     device_initialize(&evdev->dev);
2580    
2581     - evdev_table[minor] = evdev;
2582     -
2583     - error = device_add(&evdev->dev);
2584     + error = input_register_handle(&evdev->handle);
2585     if (error)
2586     goto err_free_evdev;
2587    
2588     - error = input_register_handle(&evdev->handle);
2589     + error = evdev_install_chrdev(evdev);
2590     + if (error)
2591     + goto err_unregister_handle;
2592     +
2593     + error = device_add(&evdev->dev);
2594     if (error)
2595     - goto err_delete_evdev;
2596     + goto err_cleanup_evdev;
2597    
2598     return 0;
2599    
2600     - err_delete_evdev:
2601     - device_del(&evdev->dev);
2602     + err_cleanup_evdev:
2603     + evdev_cleanup(evdev);
2604     + err_unregister_handle:
2605     + input_unregister_handle(&evdev->handle);
2606     err_free_evdev:
2607     put_device(&evdev->dev);
2608     return error;
2609     @@ -688,21 +935,10 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
2610     static void evdev_disconnect(struct input_handle *handle)
2611     {
2612     struct evdev *evdev = handle->private;
2613     - struct evdev_client *client;
2614    
2615     - input_unregister_handle(handle);
2616     device_del(&evdev->dev);
2617     -
2618     - evdev->exist = 0;
2619     -
2620     - if (evdev->open) {
2621     - input_flush_device(handle, NULL);
2622     - input_close_device(handle);
2623     - list_for_each_entry(client, &evdev->client_list, node)
2624     - kill_fasync(&client->fasync, SIGIO, POLL_HUP);
2625     - wake_up_interruptible(&evdev->wait);
2626     - }
2627     -
2628     + evdev_cleanup(evdev);
2629     + input_unregister_handle(handle);
2630     put_device(&evdev->dev);
2631     }
2632    
2633     @@ -714,13 +950,13 @@ static const struct input_device_id evdev_ids[] = {
2634     MODULE_DEVICE_TABLE(input, evdev_ids);
2635    
2636     static struct input_handler evdev_handler = {
2637     - .event = evdev_event,
2638     - .connect = evdev_connect,
2639     - .disconnect = evdev_disconnect,
2640     - .fops = &evdev_fops,
2641     - .minor = EVDEV_MINOR_BASE,
2642     - .name = "evdev",
2643     - .id_table = evdev_ids,
2644     + .event = evdev_event,
2645     + .connect = evdev_connect,
2646     + .disconnect = evdev_disconnect,
2647     + .fops = &evdev_fops,
2648     + .minor = EVDEV_MINOR_BASE,
2649     + .name = "evdev",
2650     + .id_table = evdev_ids,
2651     };
2652    
2653     static int __init evdev_init(void)
2654     diff --git a/drivers/input/input.c b/drivers/input/input.c
2655     index 5fe7555..c59544f 100644
2656     --- a/drivers/input/input.c
2657     +++ b/drivers/input/input.c
2658     @@ -17,10 +17,10 @@
2659     #include <linux/major.h>
2660     #include <linux/proc_fs.h>
2661     #include <linux/seq_file.h>
2662     -#include <linux/interrupt.h>
2663     #include <linux/poll.h>
2664     #include <linux/device.h>
2665     #include <linux/mutex.h>
2666     +#include <linux/rcupdate.h>
2667    
2668     MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
2669     MODULE_DESCRIPTION("Input core");
2670     @@ -31,167 +31,244 @@ MODULE_LICENSE("GPL");
2671     static LIST_HEAD(input_dev_list);
2672     static LIST_HEAD(input_handler_list);
2673    
2674     +/*
2675     + * input_mutex protects access to both input_dev_list and input_handler_list.
2676     + * This also causes input_[un]register_device and input_[un]register_handler
2677     + * be mutually exclusive which simplifies locking in drivers implementing
2678     + * input handlers.
2679     + */
2680     +static DEFINE_MUTEX(input_mutex);
2681     +
2682     static struct input_handler *input_table[8];
2683    
2684     -/**
2685     - * input_event() - report new input event
2686     - * @dev: device that generated the event
2687     - * @type: type of the event
2688     - * @code: event code
2689     - * @value: value of the event
2690     - *
2691     - * This function should be used by drivers implementing various input devices
2692     - * See also input_inject_event()
2693     - */
2694     -void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
2695     +static inline int is_event_supported(unsigned int code,
2696     + unsigned long *bm, unsigned int max)
2697     {
2698     - struct input_handle *handle;
2699     + return code <= max && test_bit(code, bm);
2700     +}
2701    
2702     - if (type > EV_MAX || !test_bit(type, dev->evbit))
2703     - return;
2704     +static int input_defuzz_abs_event(int value, int old_val, int fuzz)
2705     +{
2706     + if (fuzz) {
2707     + if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
2708     + return old_val;
2709    
2710     - add_input_randomness(type, code, value);
2711     + if (value > old_val - fuzz && value < old_val + fuzz)
2712     + return (old_val * 3 + value) / 4;
2713    
2714     - switch (type) {
2715     + if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
2716     + return (old_val + value) / 2;
2717     + }
2718    
2719     - case EV_SYN:
2720     - switch (code) {
2721     - case SYN_CONFIG:
2722     - if (dev->event)
2723     - dev->event(dev, type, code, value);
2724     - break;
2725     -
2726     - case SYN_REPORT:
2727     - if (dev->sync)
2728     - return;
2729     - dev->sync = 1;
2730     - break;
2731     - }
2732     - break;
2733     + return value;
2734     +}
2735    
2736     - case EV_KEY:
2737     +/*
2738     + * Pass event through all open handles. This function is called with
2739     + * dev->event_lock held and interrupts disabled. Because of that we
2740     + * do not need to use rcu_read_lock() here although we are using RCU
2741     + * to access handle list. Note that because of that write-side uses
2742     + * synchronize_sched() instead of synchronize_ru().
2743     + */
2744     +static void input_pass_event(struct input_dev *dev,
2745     + unsigned int type, unsigned int code, int value)
2746     +{
2747     + struct input_handle *handle = rcu_dereference(dev->grab);
2748    
2749     - if (code > KEY_MAX || !test_bit(code, dev->keybit) || !!test_bit(code, dev->key) == value)
2750     - return;
2751     + if (handle)
2752     + handle->handler->event(handle, type, code, value);
2753     + else
2754     + list_for_each_entry_rcu(handle, &dev->h_list, d_node)
2755     + if (handle->open)
2756     + handle->handler->event(handle,
2757     + type, code, value);
2758     +}
2759    
2760     - if (value == 2)
2761     - break;
2762     +/*
2763     + * Generate software autorepeat event. Note that we take
2764     + * dev->event_lock here to avoid racing with input_event
2765     + * which may cause keys get "stuck".
2766     + */
2767     +static void input_repeat_key(unsigned long data)
2768     +{
2769     + struct input_dev *dev = (void *) data;
2770     + unsigned long flags;
2771    
2772     - change_bit(code, dev->key);
2773     + spin_lock_irqsave(&dev->event_lock, flags);
2774    
2775     - if (test_bit(EV_REP, dev->evbit) && dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && dev->timer.data && value) {
2776     - dev->repeat_key = code;
2777     - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
2778     - }
2779     + if (test_bit(dev->repeat_key, dev->key) &&
2780     + is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2781    
2782     - break;
2783     + input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
2784    
2785     - case EV_SW:
2786     + if (dev->sync) {
2787     + /*
2788     + * Only send SYN_REPORT if we are not in a middle
2789     + * of driver parsing a new hardware packet.
2790     + * Otherwise assume that the driver will send
2791     + * SYN_REPORT once it's done.
2792     + */
2793     + input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
2794     + }
2795    
2796     - if (code > SW_MAX || !test_bit(code, dev->swbit) || !!test_bit(code, dev->sw) == value)
2797     - return;
2798     + if (dev->rep[REP_PERIOD])
2799     + mod_timer(&dev->timer, jiffies +
2800     + msecs_to_jiffies(dev->rep[REP_PERIOD]));
2801     + }
2802    
2803     - change_bit(code, dev->sw);
2804     + spin_unlock_irqrestore(&dev->event_lock, flags);
2805     +}
2806    
2807     - break;
2808     +static void input_start_autorepeat(struct input_dev *dev, int code)
2809     +{
2810     + if (test_bit(EV_REP, dev->evbit) &&
2811     + dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
2812     + dev->timer.data) {
2813     + dev->repeat_key = code;
2814     + mod_timer(&dev->timer,
2815     + jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
2816     + }
2817     +}
2818    
2819     - case EV_ABS:
2820     +#define INPUT_IGNORE_EVENT 0
2821     +#define INPUT_PASS_TO_HANDLERS 1
2822     +#define INPUT_PASS_TO_DEVICE 2
2823     +#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
2824    
2825     - if (code > ABS_MAX || !test_bit(code, dev->absbit))
2826     - return;
2827     +static void input_handle_event(struct input_dev *dev,
2828     + unsigned int type, unsigned int code, int value)
2829     +{
2830     + int disposition = INPUT_IGNORE_EVENT;
2831    
2832     - if (dev->absfuzz[code]) {
2833     - if ((value > dev->abs[code] - (dev->absfuzz[code] >> 1)) &&
2834     - (value < dev->abs[code] + (dev->absfuzz[code] >> 1)))
2835     - return;
2836     + switch (type) {
2837    
2838     - if ((value > dev->abs[code] - dev->absfuzz[code]) &&
2839     - (value < dev->abs[code] + dev->absfuzz[code]))
2840     - value = (dev->abs[code] * 3 + value) >> 2;
2841     + case EV_SYN:
2842     + switch (code) {
2843     + case SYN_CONFIG:
2844     + disposition = INPUT_PASS_TO_ALL;
2845     + break;
2846    
2847     - if ((value > dev->abs[code] - (dev->absfuzz[code] << 1)) &&
2848     - (value < dev->abs[code] + (dev->absfuzz[code] << 1)))
2849     - value = (dev->abs[code] + value) >> 1;
2850     + case SYN_REPORT:
2851     + if (!dev->sync) {
2852     + dev->sync = 1;
2853     + disposition = INPUT_PASS_TO_HANDLERS;
2854     }
2855     -
2856     - if (dev->abs[code] == value)
2857     - return;
2858     -
2859     - dev->abs[code] = value;
2860     break;
2861     + }
2862     + break;
2863    
2864     - case EV_REL:
2865     + case EV_KEY:
2866     + if (is_event_supported(code, dev->keybit, KEY_MAX) &&
2867     + !!test_bit(code, dev->key) != value) {
2868    
2869     - if (code > REL_MAX || !test_bit(code, dev->relbit) || (value == 0))
2870     - return;
2871     + if (value != 2) {
2872     + __change_bit(code, dev->key);
2873     + if (value)
2874     + input_start_autorepeat(dev, code);
2875     + }
2876    
2877     - break;
2878     + disposition = INPUT_PASS_TO_HANDLERS;
2879     + }
2880     + break;
2881    
2882     - case EV_MSC:
2883     + case EV_SW:
2884     + if (is_event_supported(code, dev->swbit, SW_MAX) &&
2885     + !!test_bit(code, dev->sw) != value) {
2886    
2887     - if (code > MSC_MAX || !test_bit(code, dev->mscbit))
2888     - return;
2889     + __change_bit(code, dev->sw);
2890     + disposition = INPUT_PASS_TO_HANDLERS;
2891     + }
2892     + break;
2893    
2894     - if (dev->event)
2895     - dev->event(dev, type, code, value);
2896     + case EV_ABS:
2897     + if (is_event_supported(code, dev->absbit, ABS_MAX)) {
2898    
2899     - break;
2900     + value = input_defuzz_abs_event(value,
2901     + dev->abs[code], dev->absfuzz[code]);
2902    
2903     - case EV_LED:
2904     + if (dev->abs[code] != value) {
2905     + dev->abs[code] = value;
2906     + disposition = INPUT_PASS_TO_HANDLERS;
2907     + }
2908     + }
2909     + break;
2910    
2911     - if (code > LED_MAX || !test_bit(code, dev->ledbit) || !!test_bit(code, dev->led) == value)
2912     - return;
2913     + case EV_REL:
2914     + if (is_event_supported(code, dev->relbit, REL_MAX) && value)
2915     + disposition = INPUT_PASS_TO_HANDLERS;
2916    
2917     - change_bit(code, dev->led);
2918     + break;
2919    
2920     - if (dev->event)
2921     - dev->event(dev, type, code, value);
2922     + case EV_MSC:
2923     + if (is_event_supported(code, dev->mscbit, MSC_MAX))
2924     + disposition = INPUT_PASS_TO_ALL;
2925    
2926     - break;
2927     + break;
2928     +
2929     + case EV_LED:
2930     + if (is_event_supported(code, dev->ledbit, LED_MAX) &&
2931     + !!test_bit(code, dev->led) != value) {
2932    
2933     - case EV_SND:
2934     + __change_bit(code, dev->led);
2935     + disposition = INPUT_PASS_TO_ALL;
2936     + }
2937     + break;
2938    
2939     - if (code > SND_MAX || !test_bit(code, dev->sndbit))
2940     - return;
2941     + case EV_SND:
2942     + if (is_event_supported(code, dev->sndbit, SND_MAX)) {
2943    
2944     if (!!test_bit(code, dev->snd) != !!value)
2945     - change_bit(code, dev->snd);
2946     + __change_bit(code, dev->snd);
2947     + disposition = INPUT_PASS_TO_ALL;
2948     + }
2949     + break;
2950    
2951     - if (dev->event)
2952     - dev->event(dev, type, code, value);
2953     + case EV_REP:
2954     + if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
2955     + dev->rep[code] = value;
2956     + disposition = INPUT_PASS_TO_ALL;
2957     + }
2958     + break;
2959    
2960     - break;
2961     + case EV_FF:
2962     + if (value >= 0)
2963     + disposition = INPUT_PASS_TO_ALL;
2964     + break;
2965     + }
2966    
2967     - case EV_REP:
2968     + if (type != EV_SYN)
2969     + dev->sync = 0;
2970    
2971     - if (code > REP_MAX || value < 0 || dev->rep[code] == value)
2972     - return;
2973     + if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
2974     + dev->event(dev, type, code, value);
2975    
2976     - dev->rep[code] = value;
2977     - if (dev->event)
2978     - dev->event(dev, type, code, value);
2979     + if (disposition & INPUT_PASS_TO_HANDLERS)
2980     + input_pass_event(dev, type, code, value);
2981     +}
2982    
2983     - break;
2984     +/**
2985     + * input_event() - report new input event
2986     + * @dev: device that generated the event
2987     + * @type: type of the event
2988     + * @code: event code
2989     + * @value: value of the event
2990     + *
2991     + * This function should be used by drivers implementing various input
2992     + * devices. See also input_inject_event().
2993     + */
2994    
2995     - case EV_FF:
2996     +void input_event(struct input_dev *dev,
2997     + unsigned int type, unsigned int code, int value)
2998     +{
2999     + unsigned long flags;
3000    
3001     - if (value < 0)
3002     - return;
3003     + if (is_event_supported(type, dev->evbit, EV_MAX)) {
3004    
3005     - if (dev->event)
3006     - dev->event(dev, type, code, value);
3007     - break;
3008     + spin_lock_irqsave(&dev->event_lock, flags);
3009     + add_input_randomness(type, code, value);
3010     + input_handle_event(dev, type, code, value);
3011     + spin_unlock_irqrestore(&dev->event_lock, flags);
3012     }
3013     -
3014     - if (type != EV_SYN)
3015     - dev->sync = 0;
3016     -
3017     - if (dev->grab)
3018     - dev->grab->handler->event(dev->grab, type, code, value);
3019     - else
3020     - list_for_each_entry(handle, &dev->h_list, d_node)
3021     - if (handle->open)
3022     - handle->handler->event(handle, type, code, value);
3023     }
3024     EXPORT_SYMBOL(input_event);
3025    
3026     @@ -202,102 +279,230 @@ EXPORT_SYMBOL(input_event);
3027     * @code: event code
3028     * @value: value of the event
3029     *
3030     - * Similar to input_event() but will ignore event if device is "grabbed" and handle
3031     - * injecting event is not the one that owns the device.
3032     + * Similar to input_event() but will ignore event if device is
3033     + * "grabbed" and handle injecting event is not the one that owns
3034     + * the device.
3035     */
3036     -void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
3037     +void input_inject_event(struct input_handle *handle,
3038     + unsigned int type, unsigned int code, int value)
3039     {
3040     - if (!handle->dev->grab || handle->dev->grab == handle)
3041     - input_event(handle->dev, type, code, value);
3042     -}
3043     -EXPORT_SYMBOL(input_inject_event);
3044     -
3045     -static void input_repeat_key(unsigned long data)
3046     -{
3047     - struct input_dev *dev = (void *) data;
3048     + struct input_dev *dev = handle->dev;
3049     + struct input_handle *grab;
3050     + unsigned long flags;
3051    
3052     - if (!test_bit(dev->repeat_key, dev->key))
3053     - return;
3054     + if (is_event_supported(type, dev->evbit, EV_MAX)) {
3055     + spin_lock_irqsave(&dev->event_lock, flags);
3056    
3057     - input_event(dev, EV_KEY, dev->repeat_key, 2);
3058     - input_sync(dev);
3059     + grab = rcu_dereference(dev->grab);
3060     + if (!grab || grab == handle)
3061     + input_handle_event(dev, type, code, value);
3062    
3063     - if (dev->rep[REP_PERIOD])
3064     - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_PERIOD]));
3065     + spin_unlock_irqrestore(&dev->event_lock, flags);
3066     + }
3067     }
3068     +EXPORT_SYMBOL(input_inject_event);
3069    
3070     +/**
3071     + * input_grab_device - grabs device for exclusive use
3072     + * @handle: input handle that wants to own the device
3073     + *
3074     + * When a device is grabbed by an input handle all events generated by
3075     + * the device are delivered only to this handle. Also events injected
3076     + * by other input handles are ignored while device is grabbed.
3077     + */
3078     int input_grab_device(struct input_handle *handle)
3079     {
3080     - if (handle->dev->grab)
3081     - return -EBUSY;
3082     + struct input_dev *dev = handle->dev;
3083     + int retval;
3084    
3085     - handle->dev->grab = handle;
3086     - return 0;
3087     + retval = mutex_lock_interruptible(&dev->mutex);
3088     + if (retval)
3089     + return retval;
3090     +
3091     + if (dev->grab) {
3092     + retval = -EBUSY;
3093     + goto out;
3094     + }
3095     +
3096     + rcu_assign_pointer(dev->grab, handle);
3097     + /*
3098     + * Not using synchronize_rcu() because read-side is protected
3099     + * by a spinlock with interrupts off instead of rcu_read_lock().
3100     + */
3101     + synchronize_sched();
3102     +
3103     + out:
3104     + mutex_unlock(&dev->mutex);
3105     + return retval;
3106     }
3107     EXPORT_SYMBOL(input_grab_device);
3108    
3109     -void input_release_device(struct input_handle *handle)
3110     +static void __input_release_device(struct input_handle *handle)
3111     {
3112     struct input_dev *dev = handle->dev;
3113    
3114     if (dev->grab == handle) {
3115     - dev->grab = NULL;
3116     + rcu_assign_pointer(dev->grab, NULL);
3117     + /* Make sure input_pass_event() notices that grab is gone */
3118     + synchronize_sched();
3119    
3120     list_for_each_entry(handle, &dev->h_list, d_node)
3121     - if (handle->handler->start)
3122     + if (handle->open && handle->handler->start)
3123     handle->handler->start(handle);
3124     }
3125     }
3126     +
3127     +/**
3128     + * input_release_device - release previously grabbed device
3129     + * @handle: input handle that owns the device
3130     + *
3131     + * Releases previously grabbed device so that other input handles can
3132     + * start receiving input events. Upon release all handlers attached
3133     + * to the device have their start() method called so they have a change
3134     + * to synchronize device state with the rest of the system.
3135     + */
3136     +void input_release_device(struct input_handle *handle)
3137     +{
3138     + struct input_dev *dev = handle->dev;
3139     +
3140     + mutex_lock(&dev->mutex);
3141     + __input_release_device(handle);
3142     + mutex_unlock(&dev->mutex);
3143     +}
3144     EXPORT_SYMBOL(input_release_device);
3145    
3146     +/**
3147     + * input_open_device - open input device
3148     + * @handle: handle through which device is being accessed
3149     + *
3150     + * This function should be called by input handlers when they
3151     + * want to start receive events from given input device.
3152     + */
3153     int input_open_device(struct input_handle *handle)
3154     {
3155     struct input_dev *dev = handle->dev;
3156     - int err;
3157     + int retval;
3158    
3159     - err = mutex_lock_interruptible(&dev->mutex);
3160     - if (err)
3161     - return err;
3162     + retval = mutex_lock_interruptible(&dev->mutex);
3163     + if (retval)
3164     + return retval;
3165     +
3166     + if (dev->going_away) {
3167     + retval = -ENODEV;
3168     + goto out;
3169     + }
3170    
3171     handle->open++;
3172    
3173     if (!dev->users++ && dev->open)
3174     - err = dev->open(dev);
3175     -
3176     - if (err)
3177     - handle->open--;
3178     + retval = dev->open(dev);
3179     +
3180     + if (retval) {
3181     + dev->users--;
3182     + if (!--handle->open) {
3183     + /*
3184     + * Make sure we are not delivering any more events
3185     + * through this handle
3186     + */
3187     + synchronize_sched();
3188     + }
3189     + }
3190    
3191     + out:
3192     mutex_unlock(&dev->mutex);
3193     -
3194     - return err;
3195     + return retval;
3196     }
3197     EXPORT_SYMBOL(input_open_device);
3198    
3199     -int input_flush_device(struct input_handle* handle, struct file* file)
3200     +int input_flush_device(struct input_handle *handle, struct file *file)
3201     {
3202     - if (handle->dev->flush)
3203     - return handle->dev->flush(handle->dev, file);
3204     + struct input_dev *dev = handle->dev;
3205     + int retval;
3206    
3207     - return 0;
3208     + retval = mutex_lock_interruptible(&dev->mutex);
3209     + if (retval)
3210     + return retval;
3211     +
3212     + if (dev->flush)
3213     + retval = dev->flush(dev, file);
3214     +
3215     + mutex_unlock(&dev->mutex);
3216     + return retval;
3217     }
3218     EXPORT_SYMBOL(input_flush_device);
3219    
3220     +/**
3221     + * input_close_device - close input device
3222     + * @handle: handle through which device is being accessed
3223     + *
3224     + * This function should be called by input handlers when they
3225     + * want to stop receive events from given input device.
3226     + */
3227     void input_close_device(struct input_handle *handle)
3228     {
3229     struct input_dev *dev = handle->dev;
3230    
3231     - input_release_device(handle);
3232     -
3233     mutex_lock(&dev->mutex);
3234    
3235     + __input_release_device(handle);
3236     +
3237     if (!--dev->users && dev->close)
3238     dev->close(dev);
3239     - handle->open--;
3240     +
3241     + if (!--handle->open) {
3242     + /*
3243     + * synchronize_sched() makes sure that input_pass_event()
3244     + * completed and that no more input events are delivered
3245     + * through this handle
3246     + */
3247     + synchronize_sched();
3248     + }
3249    
3250     mutex_unlock(&dev->mutex);
3251     }
3252     EXPORT_SYMBOL(input_close_device);
3253    
3254     +/*
3255     + * Prepare device for unregistering
3256     + */
3257     +static void input_disconnect_device(struct input_dev *dev)
3258     +{
3259     + struct input_handle *handle;
3260     + int code;
3261     +
3262     + /*
3263     + * Mark device as going away. Note that we take dev->mutex here
3264     + * not to protect access to dev->going_away but rather to ensure
3265     + * that there are no threads in the middle of input_open_device()
3266     + */
3267     + mutex_lock(&dev->mutex);
3268     + dev->going_away = 1;
3269     + mutex_unlock(&dev->mutex);
3270     +
3271     + spin_lock_irq(&dev->event_lock);
3272     +
3273     + /*
3274     + * Simulate keyup events for all pressed keys so that handlers
3275     + * are not left with "stuck" keys. The driver may continue
3276     + * generate events even after we done here but they will not
3277     + * reach any handlers.
3278     + */
3279     + if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
3280     + for (code = 0; code <= KEY_MAX; code++) {
3281     + if (is_event_supported(code, dev->keybit, KEY_MAX) &&
3282     + test_bit(code, dev->key)) {
3283     + input_pass_event(dev, EV_KEY, code, 0);
3284     + }
3285     + }
3286     + input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
3287     + }
3288     +
3289     + list_for_each_entry(handle, &dev->h_list, d_node)
3290     + handle->open = 0;
3291     +
3292     + spin_unlock_irq(&dev->event_lock);
3293     +}
3294     +
3295     static int input_fetch_keycode(struct input_dev *dev, int scancode)
3296     {
3297     switch (dev->keycodesize) {
3298     @@ -473,7 +678,8 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
3299    
3300     static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
3301     {
3302     - /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
3303     + if (mutex_lock_interruptible(&input_mutex))
3304     + return NULL;
3305    
3306     return seq_list_start(&input_dev_list, *pos);
3307     }
3308     @@ -485,7 +691,7 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3309    
3310     static void input_devices_seq_stop(struct seq_file *seq, void *v)
3311     {
3312     - /* release lock here */
3313     + mutex_unlock(&input_mutex);
3314     }
3315    
3316     static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
3317     @@ -569,7 +775,9 @@ static const struct file_operations input_devices_fileops = {
3318    
3319     static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
3320     {
3321     - /* acquire lock here ... Yes, we do need locking, I knowi, I know... */
3322     + if (mutex_lock_interruptible(&input_mutex))
3323     + return NULL;
3324     +
3325     seq->private = (void *)(unsigned long)*pos;
3326     return seq_list_start(&input_handler_list, *pos);
3327     }
3328     @@ -582,7 +790,7 @@ static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3329    
3330     static void input_handlers_seq_stop(struct seq_file *seq, void *v)
3331     {
3332     - /* release lock here */
3333     + mutex_unlock(&input_mutex);
3334     }
3335    
3336     static int input_handlers_seq_show(struct seq_file *seq, void *v)
3337     @@ -1005,6 +1213,7 @@ struct input_dev *input_allocate_device(void)
3338     dev->dev.class = &input_class;
3339     device_initialize(&dev->dev);
3340     mutex_init(&dev->mutex);
3341     + spin_lock_init(&dev->event_lock);
3342     INIT_LIST_HEAD(&dev->h_list);
3343     INIT_LIST_HEAD(&dev->node);
3344    
3345     @@ -1022,7 +1231,7 @@ EXPORT_SYMBOL(input_allocate_device);
3346     * This function should only be used if input_register_device()
3347     * was not called yet or if it failed. Once device was registered
3348     * use input_unregister_device() and memory will be freed once last
3349     - * refrence to the device is dropped.
3350     + * reference to the device is dropped.
3351     *
3352     * Device should be allocated by input_allocate_device().
3353     *
3354     @@ -1092,6 +1301,18 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
3355     }
3356     EXPORT_SYMBOL(input_set_capability);
3357    
3358     +/**
3359     + * input_register_device - register device with input core
3360     + * @dev: device to be registered
3361     + *
3362     + * This function registers device with input core. The device must be
3363     + * allocated with input_allocate_device() and all it's capabilities
3364     + * set up before registering.
3365     + * If function fails the device must be freed with input_free_device().
3366     + * Once device has been successfully registered it can be unregistered
3367     + * with input_unregister_device(); input_free_device() should not be
3368     + * called in this case.
3369     + */
3370     int input_register_device(struct input_dev *dev)
3371     {
3372     static atomic_t input_no = ATOMIC_INIT(0);
3373     @@ -1099,7 +1320,7 @@ int input_register_device(struct input_dev *dev)
3374     const char *path;
3375     int error;
3376    
3377     - set_bit(EV_SYN, dev->evbit);
3378     + __set_bit(EV_SYN, dev->evbit);
3379    
3380     /*
3381     * If delay and period are pre-set by the driver, then autorepeating
3382     @@ -1120,8 +1341,6 @@ int input_register_device(struct input_dev *dev)
3383     if (!dev->setkeycode)
3384     dev->setkeycode = input_default_setkeycode;
3385    
3386     - list_add_tail(&dev->node, &input_dev_list);
3387     -
3388     snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
3389     "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1);
3390    
3391     @@ -1137,49 +1356,79 @@ int input_register_device(struct input_dev *dev)
3392     dev->name ? dev->name : "Unspecified device", path ? path : "N/A");
3393     kfree(path);
3394    
3395     + error = mutex_lock_interruptible(&input_mutex);
3396     + if (error) {
3397     + device_del(&dev->dev);
3398     + return error;
3399     + }
3400     +
3401     + list_add_tail(&dev->node, &input_dev_list);
3402     +
3403     list_for_each_entry(handler, &input_handler_list, node)
3404     input_attach_handler(dev, handler);
3405    
3406     input_wakeup_procfs_readers();
3407    
3408     + mutex_unlock(&input_mutex);
3409     +
3410     return 0;
3411     }
3412     EXPORT_SYMBOL(input_register_device);
3413    
3414     +/**
3415     + * input_unregister_device - unregister previously registered device
3416     + * @dev: device to be unregistered
3417     + *
3418     + * This function unregisters an input device. Once device is unregistered
3419     + * the caller should not try to access it as it may get freed at any moment.
3420     + */
3421     void input_unregister_device(struct input_dev *dev)
3422     {
3423     struct input_handle *handle, *next;
3424     - int code;
3425    
3426     - for (code = 0; code <= KEY_MAX; code++)
3427     - if (test_bit(code, dev->key))
3428     - input_report_key(dev, code, 0);
3429     - input_sync(dev);
3430     + input_disconnect_device(dev);
3431    
3432     - del_timer_sync(&dev->timer);
3433     + mutex_lock(&input_mutex);
3434    
3435     list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
3436     handle->handler->disconnect(handle);
3437     WARN_ON(!list_empty(&dev->h_list));
3438    
3439     + del_timer_sync(&dev->timer);
3440     list_del_init(&dev->node);
3441    
3442     - device_unregister(&dev->dev);
3443     -
3444     input_wakeup_procfs_readers();
3445     +
3446     + mutex_unlock(&input_mutex);
3447     +
3448     + device_unregister(&dev->dev);
3449     }
3450     EXPORT_SYMBOL(input_unregister_device);
3451    
3452     +/**
3453     + * input_register_handler - register a new input handler
3454     + * @handler: handler to be registered
3455     + *
3456     + * This function registers a new input handler (interface) for input
3457     + * devices in the system and attaches it to all input devices that
3458     + * are compatible with the handler.
3459     + */
3460     int input_register_handler(struct input_handler *handler)
3461     {
3462     struct input_dev *dev;
3463     + int retval;
3464     +
3465     + retval = mutex_lock_interruptible(&input_mutex);
3466     + if (retval)
3467     + return retval;
3468    
3469     INIT_LIST_HEAD(&handler->h_list);
3470    
3471     if (handler->fops != NULL) {
3472     - if (input_table[handler->minor >> 5])
3473     - return -EBUSY;
3474     -
3475     + if (input_table[handler->minor >> 5]) {
3476     + retval = -EBUSY;
3477     + goto out;
3478     + }
3479     input_table[handler->minor >> 5] = handler;
3480     }
3481    
3482     @@ -1189,14 +1438,26 @@ int input_register_handler(struct input_handler *handler)
3483     input_attach_handler(dev, handler);
3484    
3485     input_wakeup_procfs_readers();
3486     - return 0;
3487     +
3488     + out:
3489     + mutex_unlock(&input_mutex);
3490     + return retval;
3491     }
3492     EXPORT_SYMBOL(input_register_handler);
3493    
3494     +/**
3495     + * input_unregister_handler - unregisters an input handler
3496     + * @handler: handler to be unregistered
3497     + *
3498     + * This function disconnects a handler from its input devices and
3499     + * removes it from lists of known handlers.
3500     + */
3501     void input_unregister_handler(struct input_handler *handler)
3502     {
3503     struct input_handle *handle, *next;
3504    
3505     + mutex_lock(&input_mutex);
3506     +
3507     list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
3508     handler->disconnect(handle);
3509     WARN_ON(!list_empty(&handler->h_list));
3510     @@ -1207,14 +1468,50 @@ void input_unregister_handler(struct input_handler *handler)
3511     input_table[handler->minor >> 5] = NULL;
3512    
3513     input_wakeup_procfs_readers();
3514     +
3515     + mutex_unlock(&input_mutex);
3516     }
3517     EXPORT_SYMBOL(input_unregister_handler);
3518    
3519     +/**
3520     + * input_register_handle - register a new input handle
3521     + * @handle: handle to register
3522     + *
3523     + * This function puts a new input handle onto device's
3524     + * and handler's lists so that events can flow through
3525     + * it once it is opened using input_open_device().
3526     + *
3527     + * This function is supposed to be called from handler's
3528     + * connect() method.
3529     + */
3530     int input_register_handle(struct input_handle *handle)
3531     {
3532     struct input_handler *handler = handle->handler;
3533     + struct input_dev *dev = handle->dev;
3534     + int error;
3535     +
3536     + /*
3537     + * We take dev->mutex here to prevent race with
3538     + * input_release_device().
3539     + */
3540     + error = mutex_lock_interruptible(&dev->mutex);
3541     + if (error)
3542     + return error;
3543     + list_add_tail_rcu(&handle->d_node, &dev->h_list);
3544     + mutex_unlock(&dev->mutex);
3545     + /*
3546     + * We don't use synchronize_rcu() here because we rely
3547     + * on dev->event_lock to protect read-side critical
3548     + * section in input_pass_event().
3549     + */
3550     + synchronize_sched();
3551    
3552     - list_add_tail(&handle->d_node, &handle->dev->h_list);
3553     + /*
3554     + * Since we are supposed to be called from ->connect()
3555     + * which is mutually exclusive with ->disconnect()
3556     + * we can't be racing with input_unregister_handle()
3557     + * and so separate lock is not needed here.
3558     + */
3559     list_add_tail(&handle->h_node, &handler->h_list);
3560    
3561     if (handler->start)
3562     @@ -1224,10 +1521,29 @@ int input_register_handle(struct input_handle *handle)
3563     }
3564     EXPORT_SYMBOL(input_register_handle);
3565    
3566     +/**
3567     + * input_unregister_handle - unregister an input handle
3568     + * @handle: handle to unregister
3569     + *
3570     + * This function removes input handle from device's
3571     + * and handler's lists.
3572     + *
3573     + * This function is supposed to be called from handler's
3574     + * disconnect() method.
3575     + */
3576     void input_unregister_handle(struct input_handle *handle)
3577     {
3578     + struct input_dev *dev = handle->dev;
3579     +
3580     list_del_init(&handle->h_node);
3581     - list_del_init(&handle->d_node);
3582     +
3583     + /*
3584     + * Take dev->mutex to prevent race with input_release_device().
3585     + */
3586     + mutex_lock(&dev->mutex);
3587     + list_del_rcu(&handle->d_node);
3588     + mutex_unlock(&dev->mutex);
3589     + synchronize_sched();
3590     }
3591     EXPORT_SYMBOL(input_unregister_handle);
3592    
3593     diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
3594     index a9a0180..f306c97 100644
3595     --- a/drivers/input/joydev.c
3596     +++ b/drivers/input/joydev.c
3597     @@ -43,6 +43,8 @@ struct joydev {
3598     struct input_handle handle;
3599     wait_queue_head_t wait;
3600     struct list_head client_list;
3601     + spinlock_t client_lock; /* protects client_list */
3602     + struct mutex mutex;
3603     struct device dev;
3604    
3605     struct js_corr corr[ABS_MAX + 1];
3606     @@ -61,31 +63,61 @@ struct joydev_client {
3607     int head;
3608     int tail;
3609     int startup;
3610     + spinlock_t buffer_lock; /* protects access to buffer, head and tail */
3611     struct fasync_struct *fasync;
3612     struct joydev *joydev;
3613     struct list_head node;
3614     };
3615    
3616     static struct joydev *joydev_table[JOYDEV_MINORS];
3617     +static DEFINE_MUTEX(joydev_table_mutex);
3618    
3619     static int joydev_correct(int value, struct js_corr *corr)
3620     {
3621     switch (corr->type) {
3622     - case JS_CORR_NONE:
3623     - break;
3624     - case JS_CORR_BROKEN:
3625     - value = value > corr->coef[0] ? (value < corr->coef[1] ? 0 :
3626     - ((corr->coef[3] * (value - corr->coef[1])) >> 14)) :
3627     - ((corr->coef[2] * (value - corr->coef[0])) >> 14);
3628     - break;
3629     - default:
3630     - return 0;
3631     +
3632     + case JS_CORR_NONE:
3633     + break;
3634     +
3635     + case JS_CORR_BROKEN:
3636     + value = value > corr->coef[0] ? (value < corr->coef[1] ? 0 :
3637     + ((corr->coef[3] * (value - corr->coef[1])) >> 14)) :
3638     + ((corr->coef[2] * (value - corr->coef[0])) >> 14);
3639     + break;
3640     +
3641     + default:
3642     + return 0;
3643     }
3644    
3645     return value < -32767 ? -32767 : (value > 32767 ? 32767 : value);
3646     }
3647    
3648     -static void joydev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
3649     +static void joydev_pass_event(struct joydev_client *client,
3650     + struct js_event *event)
3651     +{
3652     + struct joydev *joydev = client->joydev;
3653     +
3654     + /*
3655     + * IRQs already disabled, just acquire the lock
3656     + */
3657     + spin_lock(&client->buffer_lock);
3658     +
3659     + client->buffer[client->head] = *event;
3660     +
3661     + if (client->startup == joydev->nabs + joydev->nkey) {
3662     + client->head++;
3663     + client->head &= JOYDEV_BUFFER_SIZE - 1;
3664     + if (client->tail == client->head)
3665     + client->startup = 0;
3666     + }
3667     +
3668     + spin_unlock(&client->buffer_lock);
3669     +
3670     + kill_fasync(&client->fasync, SIGIO, POLL_IN);
3671     +}
3672     +
3673     +static void joydev_event(struct input_handle *handle,
3674     + unsigned int type, unsigned int code, int value)
3675     {
3676     struct joydev *joydev = handle->private;
3677     struct joydev_client *client;
3678     @@ -93,39 +125,32 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne
3679    
3680     switch (type) {
3681    
3682     - case EV_KEY:
3683     - if (code < BTN_MISC || value == 2)
3684     - return;
3685     - event.type = JS_EVENT_BUTTON;
3686     - event.number = joydev->keymap[code - BTN_MISC];
3687     - event.value = value;
3688     - break;
3689     -
3690     - case EV_ABS:
3691     - event.type = JS_EVENT_AXIS;
3692     - event.number = joydev->absmap[code];
3693     - event.value = joydev_correct(value, joydev->corr + event.number);
3694     - if (event.value == joydev->abs[event.number])
3695     - return;
3696     - joydev->abs[event.number] = event.value;
3697     - break;
3698     + case EV_KEY:
3699     + if (code < BTN_MISC || value == 2)
3700     + return;
3701     + event.type = JS_EVENT_BUTTON;
3702     + event.number = joydev->keymap[code - BTN_MISC];
3703     + event.value = value;
3704     + break;
3705    
3706     - default:
3707     + case EV_ABS:
3708     + event.type = JS_EVENT_AXIS;
3709     + event.number = joydev->absmap[code];
3710     + event.value = joydev_correct(value,
3711     + &joydev->corr[event.number]);
3712     + if (event.value == joydev->abs[event.number])
3713     return;
3714     + joydev->abs[event.number] = event.value;
3715     + break;
3716     +
3717     + default:
3718     + return;
3719     }
3720    
3721     event.time = jiffies_to_msecs(jiffies);
3722    
3723     - list_for_each_entry(client, &joydev->client_list, node) {
3724     -
3725     - memcpy(client->buffer + client->head, &event, sizeof(struct js_event));
3726     -
3727     - if (client->startup == joydev->nabs + joydev->nkey)
3728     - if (client->tail == (client->head = (client->head + 1) & (JOYDEV_BUFFER_SIZE - 1)))
3729     - client->startup = 0;
3730     -
3731     - kill_fasync(&client->fasync, SIGIO, POLL_IN);
3732     - }
3733     + list_for_each_entry_rcu(client, &joydev->client_list, node)
3734     + joydev_pass_event(client, &event);
3735    
3736     wake_up_interruptible(&joydev->wait);
3737     }
3738     @@ -144,23 +169,88 @@ static void joydev_free(struct device *dev)
3739     {
3740     struct joydev *joydev = container_of(dev, struct joydev, dev);
3741    
3742     - joydev_table[joydev->minor] = NULL;
3743     kfree(joydev);
3744     }
3745    
3746     +static void joydev_attach_client(struct joydev *joydev,
3747     + struct joydev_client *client)
3748     +{
3749     + spin_lock(&joydev->client_lock);
3750     + list_add_tail_rcu(&client->node, &joydev->client_list);
3751     + spin_unlock(&joydev->client_lock);
3752     + /*
3753     + * We don't use synchronize_rcu() here because read-side
3754     + * critical section is protected by a spinlock (dev->event_lock)
3755     + * instead of rcu_read_lock().
3756     + */
3757     + synchronize_sched();
3758     +}
3759     +
3760     +static void joydev_detach_client(struct joydev *joydev,
3761     + struct joydev_client *client)
3762     +{
3763     + spin_lock(&joydev->client_lock);
3764     + list_del_rcu(&client->node);
3765     + spin_unlock(&joydev->client_lock);
3766     + synchronize_sched();
3767     +}
3768     +
3769     +static int joydev_open_device(struct joydev *joydev)
3770     +{
3771     + int retval;
3772     +
3773     + retval = mutex_lock_interruptible(&joydev->mutex);
3774     + if (retval)
3775     + return retval;
3776     +
3777     + if (!joydev->exist)
3778     + retval = -ENODEV;
3779     + else if (!joydev->open++) {
3780     + retval = input_open_device(&joydev->handle);
3781     + if (retval)
3782     + joydev->open--;
3783     + }
3784     +
3785     + mutex_unlock(&joydev->mutex);
3786     + return retval;
3787     +}
3788     +
3789     +static void joydev_close_device(struct joydev *joydev)
3790     +{
3791     + mutex_lock(&joydev->mutex);
3792     +
3793     + if (joydev->exist && !--joydev->open)
3794     + input_close_device(&joydev->handle);
3795     +
3796     + mutex_unlock(&joydev->mutex);
3797     +}
3798     +
3799     +/*
3800     + * Wake up users waiting for IO so they can disconnect from
3801     + * dead device.
3802     + */
3803     +static void joydev_hangup(struct joydev *joydev)
3804     +{
3805     + struct joydev_client *client;
3806     +
3807     + spin_lock(&joydev->client_lock);
3808     + list_for_each_entry(client, &joydev->client_list, node)
3809     + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
3810     + spin_unlock(&joydev->client_lock);
3811     +
3812     + wake_up_interruptible(&joydev->wait);
3813     +}
3814     +
3815     static int joydev_release(struct inode *inode, struct file *file)
3816     {
3817     struct joydev_client *client = file->private_data;
3818     struct joydev *joydev = client->joydev;
3819    
3820     joydev_fasync(-1, file, 0);
3821     -
3822     - list_del(&client->node);
3823     + joydev_detach_client(joydev, client);
3824     kfree(client);
3825    
3826     - if (!--joydev->open && joydev->exist)
3827     - input_close_device(&joydev->handle);
3828     -
3829     + joydev_close_device(joydev);
3830     put_device(&joydev->dev);
3831    
3832     return 0;
3833     @@ -176,11 +266,16 @@ static int joydev_open(struct inode *inode, struct file *file)
3834     if (i >= JOYDEV_MINORS)
3835     return -ENODEV;
3836    
3837     + error = mutex_lock_interruptible(&joydev_table_mutex);
3838     + if (error)
3839     + return error;
3840     joydev = joydev_table[i];
3841     - if (!joydev || !joydev->exist)
3842     - return -ENODEV;
3843     + if (joydev)
3844     + get_device(&joydev->dev);
3845     + mutex_unlock(&joydev_table_mutex);
3846    
3847     - get_device(&joydev->dev);
3848     + if (!joydev)
3849     + return -ENODEV;
3850    
3851     client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL);
3852     if (!client) {
3853     @@ -188,37 +283,129 @@ static int joydev_open(struct inode *inode, struct file *file)
3854     goto err_put_joydev;
3855     }
3856    
3857     + spin_lock_init(&client->buffer_lock);
3858     client->joydev = joydev;
3859     - list_add_tail(&client->node, &joydev->client_list);
3860     + joydev_attach_client(joydev, client);
3861    
3862     - if (!joydev->open++ && joydev->exist) {
3863     - error = input_open_device(&joydev->handle);
3864     - if (error)
3865     - goto err_free_client;
3866     - }
3867     + error = joydev_open_device(joydev);
3868     + if (error)
3869     + goto err_free_client;
3870    
3871     file->private_data = client;
3872     return 0;
3873    
3874     err_free_client:
3875     - list_del(&client->node);
3876     + joydev_detach_client(joydev, client);
3877     kfree(client);
3878     err_put_joydev:
3879     put_device(&joydev->dev);
3880     return error;
3881     }
3882    
3883     -static ssize_t joydev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
3884     +static int joydev_generate_startup_event(struct joydev_client *client,
3885     + struct input_dev *input,
3886     + struct js_event *event)
3887     {
3888     - return -EINVAL;
3889     + struct joydev *joydev = client->joydev;
3890     + int have_event;
3891     +
3892     + spin_lock_irq(&client->buffer_lock);
3893     +
3894     + have_event = client->startup < joydev->nabs + joydev->nkey;
3895     +
3896     + if (have_event) {
3897     +
3898     + event->time = jiffies_to_msecs(jiffies);
3899     + if (client->startup < joydev->nkey) {
3900     + event->type = JS_EVENT_BUTTON | JS_EVENT_INIT;
3901     + event->number = client->startup;
3902     + event->value = !!test_bit(joydev->keypam[event->number],
3903     + input->key);
3904     + } else {
3905     + event->type = JS_EVENT_AXIS | JS_EVENT_INIT;
3906     + event->number = client->startup - joydev->nkey;
3907     + event->value = joydev->abs[event->number];
3908     + }
3909     + client->startup++;
3910     + }
3911     +
3912     + spin_unlock_irq(&client->buffer_lock);
3913     +
3914     + return have_event;
3915     +}
3916     +
3917     +static int joydev_fetch_next_event(struct joydev_client *client,
3918     + struct js_event *event)
3919     +{
3920     + int have_event;
3921     +
3922     + spin_lock_irq(&client->buffer_lock);
3923     +
3924     + have_event = client->head != client->tail;
3925     + if (have_event) {
3926     + *event = client->buffer[client->tail++];
3927     + client->tail &= JOYDEV_BUFFER_SIZE - 1;
3928     + }
3929     +
3930     + spin_unlock_irq(&client->buffer_lock);
3931     +
3932     + return have_event;
3933     +}
3934     +
3935     +/*
3936     + * Old joystick interface
3937     + */
3938     +static ssize_t joydev_0x_read(struct joydev_client *client,
3939     + struct input_dev *input,
3940     + char __user *buf)
3941     +{
3942     + struct joydev *joydev = client->joydev;
3943     + struct JS_DATA_TYPE data;
3944     + int i;
3945     +
3946     + spin_lock_irq(&input->event_lock);
3947     +
3948     + /*
3949     + * Get device state
3950     + */
3951     + for (data.buttons = i = 0; i < 32 && i < joydev->nkey; i++)
3952     + data.buttons |=
3953     + test_bit(joydev->keypam[i], input->key) ? (1 << i) : 0;
3954     + data.x = (joydev->abs[0] / 256 + 128) >> joydev->glue.JS_CORR.x;
3955     + data.y = (joydev->abs[1] / 256 + 128) >> joydev->glue.JS_CORR.y;
3956     +
3957     + /*
3958     + * Reset reader's event queue
3959     + */
3960     + spin_lock(&client->buffer_lock);
3961     + client->startup = 0;
3962     + client->tail = client->head;
3963     + spin_unlock(&client->buffer_lock);
3964     +
3965     + spin_unlock_irq(&input->event_lock);
3966     +
3967     + if (copy_to_user(buf, &data, sizeof(struct JS_DATA_TYPE)))
3968     + return -EFAULT;
3969     +
3970     + return sizeof(struct JS_DATA_TYPE);
3971     +}
3972     +
3973     +static inline int joydev_data_pending(struct joydev_client *client)
3974     +{
3975     + struct joydev *joydev = client->joydev;
3976     +
3977     + return client->startup < joydev->nabs + joydev->nkey ||
3978     + client->head != client->tail;
3979     }
3980    
3981     -static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3982     +static ssize_t joydev_read(struct file *file, char __user *buf,
3983     + size_t count, loff_t *ppos)
3984     {
3985     struct joydev_client *client = file->private_data;
3986     struct joydev *joydev = client->joydev;
3987     struct input_dev *input = joydev->handle.dev;
3988     - int retval = 0;
3989     + struct js_event event;
3990     + int retval;
3991    
3992     if (!joydev->exist)
3993     return -ENODEV;
3994     @@ -226,68 +413,35 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo
3995     if (count < sizeof(struct js_event))
3996     return -EINVAL;
3997    
3998     - if (count == sizeof(struct JS_DATA_TYPE)) {
3999     -
4000     - struct JS_DATA_TYPE data;
4001     - int i;
4002     -
4003     - for (data.buttons = i = 0; i < 32 && i < joydev->nkey; i++)
4004     - data.buttons |= test_bit(joydev->keypam[i], input->key) ? (1 << i) : 0;
4005     - data.x = (joydev->abs[0] / 256 + 128) >> joydev->glue.JS_CORR.x;
4006     - data.y = (joydev->abs[1] / 256 + 128) >> joydev->glue.JS_CORR.y;
4007     -
4008     - if (copy_to_user(buf, &data, sizeof(struct JS_DATA_TYPE)))
4009     - return -EFAULT;
4010     -
4011     - client->startup = 0;
4012     - client->tail = client->head;
4013     + if (count == sizeof(struct JS_DATA_TYPE))
4014     + return joydev_0x_read(client, input, buf);
4015    
4016     - return sizeof(struct JS_DATA_TYPE);
4017     - }
4018     -
4019     - if (client->startup == joydev->nabs + joydev->nkey &&
4020     - client->head == client->tail && (file->f_flags & O_NONBLOCK))
4021     + if (!joydev_data_pending(client) && (file->f_flags & O_NONBLOCK))
4022     return -EAGAIN;
4023    
4024     retval = wait_event_interruptible(joydev->wait,
4025     - !joydev->exist ||
4026     - client->startup < joydev->nabs + joydev->nkey ||
4027     - client->head != client->tail);
4028     + !joydev->exist || joydev_data_pending(client));
4029     if (retval)
4030     return retval;
4031    
4032     if (!joydev->exist)
4033     return -ENODEV;
4034    
4035     - while (client->startup < joydev->nabs + joydev->nkey && retval + sizeof(struct js_event) <= count) {
4036     -
4037     - struct js_event event;
4038     -
4039     - event.time = jiffies_to_msecs(jiffies);
4040     -
4041     - if (client->startup < joydev->nkey) {
4042     - event.type = JS_EVENT_BUTTON | JS_EVENT_INIT;
4043     - event.number = client->startup;
4044     - event.value = !!test_bit(joydev->keypam[event.number], input->key);
4045     - } else {
4046     - event.type = JS_EVENT_AXIS | JS_EVENT_INIT;
4047     - event.number = client->startup - joydev->nkey;
4048     - event.value = joydev->abs[event.number];
4049     - }
4050     + while (retval + sizeof(struct js_event) <= count &&
4051     + joydev_generate_startup_event(client, input, &event)) {
4052    
4053     if (copy_to_user(buf + retval, &event, sizeof(struct js_event)))
4054     return -EFAULT;
4055    
4056     - client->startup++;
4057     retval += sizeof(struct js_event);
4058     }
4059    
4060     - while (client->head != client->tail && retval + sizeof(struct js_event) <= count) {
4061     + while (retval + sizeof(struct js_event) <= count &&
4062     + joydev_fetch_next_event(client, &event)) {
4063    
4064     - if (copy_to_user(buf + retval, client->buffer + client->tail, sizeof(struct js_event)))
4065     + if (copy_to_user(buf + retval, &event, sizeof(struct js_event)))
4066     return -EFAULT;
4067    
4068     - client->tail = (client->tail + 1) & (JOYDEV_BUFFER_SIZE - 1);
4069     retval += sizeof(struct js_event);
4070     }
4071    
4072     @@ -301,126 +455,144 @@ static unsigned int joydev_poll(struct file *file, poll_table *wait)
4073     struct joydev *joydev = client->joydev;
4074    
4075     poll_wait(file, &joydev->wait, wait);
4076     - return ((client->head != client->tail || client->startup < joydev->nabs + joydev->nkey) ?
4077     - (POLLIN | POLLRDNORM) : 0) | (joydev->exist ? 0 : (POLLHUP | POLLERR));
4078     + return (joydev_data_pending(client) ? (POLLIN | POLLRDNORM) : 0) |
4079     + (joydev->exist ? 0 : (POLLHUP | POLLERR));
4080     }
4081    
4082     -static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __user *argp)
4083     +static int joydev_ioctl_common(struct joydev *joydev,
4084     + unsigned int cmd, void __user *argp)
4085     {
4086     struct input_dev *dev = joydev->handle.dev;
4087     int i, j;
4088    
4089     switch (cmd) {
4090    
4091     - case JS_SET_CAL:
4092     - return copy_from_user(&joydev->glue.JS_CORR, argp,
4093     + case JS_SET_CAL:
4094     + return copy_from_user(&joydev->glue.JS_CORR, argp,
4095     sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
4096    
4097     - case JS_GET_CAL:
4098     - return copy_to_user(argp, &joydev->glue.JS_CORR,
4099     + case JS_GET_CAL:
4100     + return copy_to_user(argp, &joydev->glue.JS_CORR,
4101     sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0;
4102    
4103     - case JS_SET_TIMEOUT:
4104     - return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
4105     + case JS_SET_TIMEOUT:
4106     + return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
4107    
4108     - case JS_GET_TIMEOUT:
4109     - return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
4110     + case JS_GET_TIMEOUT:
4111     + return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp);
4112    
4113     - case JSIOCGVERSION:
4114     - return put_user(JS_VERSION, (__u32 __user *) argp);
4115     + case JSIOCGVERSION:
4116     + return put_user(JS_VERSION, (__u32 __user *) argp);
4117    
4118     - case JSIOCGAXES:
4119     - return put_user(joydev->nabs, (__u8 __user *) argp);
4120     + case JSIOCGAXES:
4121     + return put_user(joydev->nabs, (__u8 __user *) argp);
4122    
4123     - case JSIOCGBUTTONS:
4124     - return put_user(joydev->nkey, (__u8 __user *) argp);
4125     + case JSIOCGBUTTONS:
4126     + return put_user(joydev->nkey, (__u8 __user *) argp);
4127    
4128     - case JSIOCSCORR:
4129     - if (copy_from_user(joydev->corr, argp,
4130     - sizeof(joydev->corr[0]) * joydev->nabs))
4131     - return -EFAULT;
4132     - for (i = 0; i < joydev->nabs; i++) {
4133     - j = joydev->abspam[i];
4134     - joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i);
4135     - }
4136     - return 0;
4137     + case JSIOCSCORR:
4138     + if (copy_from_user(joydev->corr, argp,
4139     + sizeof(joydev->corr[0]) * joydev->nabs))
4140     + return -EFAULT;
4141    
4142     - case JSIOCGCORR:
4143     - return copy_to_user(argp, joydev->corr,
4144     - sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
4145     + for (i = 0; i < joydev->nabs; i++) {
4146     + j = joydev->abspam[i];
4147     + joydev->abs[i] = joydev_correct(dev->abs[j],
4148     + &joydev->corr[i]);
4149     + }
4150     + return 0;
4151    
4152     - case JSIOCSAXMAP:
4153     - if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1)))
4154     - return -EFAULT;
4155     - for (i = 0; i < joydev->nabs; i++) {
4156     - if (joydev->abspam[i] > ABS_MAX)
4157     - return -EINVAL;
4158     - joydev->absmap[joydev->abspam[i]] = i;
4159     - }
4160     - return 0;
4161     -
4162     - case JSIOCGAXMAP:
4163     - return copy_to_user(argp, joydev->abspam,
4164     - sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0;
4165     -
4166     - case JSIOCSBTNMAP:
4167     - if (copy_from_user(joydev->keypam, argp, sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)))
4168     + case JSIOCGCORR:
4169     + return copy_to_user(argp, joydev->corr,
4170     + sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
4171     +
4172     + case JSIOCSAXMAP:
4173     + if (copy_from_user(joydev->abspam, argp,
4174     + sizeof(__u8) * (ABS_MAX + 1)))
4175     + return -EFAULT;
4176     +
4177     + for (i = 0; i < joydev->nabs; i++) {
4178     + if (joydev->abspam[i] > ABS_MAX)
4179     + return -EINVAL;
4180     + joydev->absmap[joydev->abspam[i]] = i;
4181     + }
4182     + return 0;
4183     +
4184     + case JSIOCGAXMAP:
4185     + return copy_to_user(argp, joydev->abspam,
4186     + sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0;
4187     +
4188     + case JSIOCSBTNMAP:
4189     + if (copy_from_user(joydev->keypam, argp,
4190     + sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)))
4191     + return -EFAULT;
4192     +
4193     + for (i = 0; i < joydev->nkey; i++) {
4194     + if (joydev->keypam[i] > KEY_MAX ||
4195     + joydev->keypam[i] < BTN_MISC)
4196     + return -EINVAL;
4197     + joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
4198     + }
4199     +
4200     + return 0;
4201     +
4202     + case JSIOCGBTNMAP:
4203     + return copy_to_user(argp, joydev->keypam,
4204     + sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0;
4205     +
4206     + default:
4207     + if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) {
4208     + int len;
4209     + if (!dev->name)
4210     + return 0;
4211     + len = strlen(dev->name) + 1;
4212     + if (len > _IOC_SIZE(cmd))
4213     + len = _IOC_SIZE(cmd);
4214     + if (copy_to_user(argp, dev->name, len))
4215     return -EFAULT;
4216     - for (i = 0; i < joydev->nkey; i++) {
4217     - if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC)
4218     - return -EINVAL;
4219     - joydev->keymap[joydev->keypam[i] - BTN_MISC] = i;
4220     - }
4221     - return 0;
4222     -
4223     - case JSIOCGBTNMAP:
4224     - return copy_to_user(argp, joydev->keypam,
4225     - sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0;
4226     -
4227     - default:
4228     - if ((cmd & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) == JSIOCGNAME(0)) {
4229     - int len;
4230     - if (!dev->name)
4231     - return 0;
4232     - len = strlen(dev->name) + 1;
4233     - if (len > _IOC_SIZE(cmd))
4234     - len = _IOC_SIZE(cmd);
4235     - if (copy_to_user(argp, dev->name, len))
4236     - return -EFAULT;
4237     - return len;
4238     - }
4239     + return len;
4240     + }
4241     }
4242     return -EINVAL;
4243     }
4244    
4245     #ifdef CONFIG_COMPAT
4246     -static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4247     +static long joydev_compat_ioctl(struct file *file,
4248     + unsigned int cmd, unsigned long arg)
4249     {
4250     struct joydev_client *client = file->private_data;
4251     struct joydev *joydev = client->joydev;
4252     void __user *argp = (void __user *)arg;
4253     s32 tmp32;
4254     struct JS_DATA_SAVE_TYPE_32 ds32;
4255     - int err;
4256     + int retval;
4257    
4258     - if (!joydev->exist)
4259     - return -ENODEV;
4260     + retval = mutex_lock_interruptible(&joydev->mutex);
4261     + if (retval)
4262     + return retval;
4263     +
4264     + if (!joydev->exist) {
4265     + retval = -ENODEV;
4266     + goto out;
4267     + }
4268     +
4269     + switch (cmd) {
4270    
4271     - switch(cmd) {
4272     case JS_SET_TIMELIMIT:
4273     - err = get_user(tmp32, (s32 __user *) arg);
4274     - if (err == 0)
4275     + retval = get_user(tmp32, (s32 __user *) arg);
4276     + if (retval == 0)
4277     joydev->glue.JS_TIMELIMIT = tmp32;
4278     break;
4279     +
4280     case JS_GET_TIMELIMIT:
4281     tmp32 = joydev->glue.JS_TIMELIMIT;
4282     - err = put_user(tmp32, (s32 __user *) arg);
4283     + retval = put_user(tmp32, (s32 __user *) arg);
4284     break;
4285    
4286     case JS_SET_ALL:
4287     - err = copy_from_user(&ds32, argp,
4288     - sizeof(ds32)) ? -EFAULT : 0;
4289     - if (err == 0) {
4290     + retval = copy_from_user(&ds32, argp,
4291     + sizeof(ds32)) ? -EFAULT : 0;
4292     + if (retval == 0) {
4293     joydev->glue.JS_TIMEOUT = ds32.JS_TIMEOUT;
4294     joydev->glue.BUSY = ds32.BUSY;
4295     joydev->glue.JS_EXPIRETIME = ds32.JS_EXPIRETIME;
4296     @@ -438,55 +610,119 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
4297     ds32.JS_SAVE = joydev->glue.JS_SAVE;
4298     ds32.JS_CORR = joydev->glue.JS_CORR;
4299    
4300     - err = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0;
4301     + retval = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0;
4302     break;
4303    
4304     default:
4305     - err = joydev_ioctl_common(joydev, cmd, argp);
4306     + retval = joydev_ioctl_common(joydev, cmd, argp);
4307     + break;
4308     }
4309     - return err;
4310     +
4311     + out:
4312     + mutex_unlock(&joydev->mutex);
4313     + return retval;
4314     }
4315     #endif /* CONFIG_COMPAT */
4316    
4317     -static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
4318     +static long joydev_ioctl(struct file *file,
4319     + unsigned int cmd, unsigned long arg)
4320     {
4321     struct joydev_client *client = file->private_data;
4322     struct joydev *joydev = client->joydev;
4323     void __user *argp = (void __user *)arg;
4324     + int retval;
4325    
4326     - if (!joydev->exist)
4327     - return -ENODEV;
4328     + retval = mutex_lock_interruptible(&joydev->mutex);
4329     + if (retval)
4330     + return retval;
4331     +
4332     + if (!joydev->exist) {
4333     + retval = -ENODEV;
4334     + goto out;
4335     + }
4336     +
4337     + switch (cmd) {
4338     +
4339     + case JS_SET_TIMELIMIT:
4340     + retval = get_user(joydev->glue.JS_TIMELIMIT,
4341     + (long __user *) arg);
4342     + break;
4343     +
4344     + case JS_GET_TIMELIMIT:
4345     + retval = put_user(joydev->glue.JS_TIMELIMIT,
4346     + (long __user *) arg);
4347     + break;
4348     +
4349     + case JS_SET_ALL:
4350     + retval = copy_from_user(&joydev->glue, argp,
4351     + sizeof(joydev->glue)) ? -EFAULT: 0;
4352     + break;
4353     +
4354     + case JS_GET_ALL:
4355     + retval = copy_to_user(argp, &joydev->glue,
4356     + sizeof(joydev->glue)) ? -EFAULT : 0;
4357     + break;
4358    
4359     - switch(cmd) {
4360     - case JS_SET_TIMELIMIT:
4361     - return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
4362     - case JS_GET_TIMELIMIT:
4363     - return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg);
4364     - case JS_SET_ALL:
4365     - return copy_from_user(&joydev->glue, argp,
4366     - sizeof(joydev->glue)) ? -EFAULT : 0;
4367     - case JS_GET_ALL:
4368     - return copy_to_user(argp, &joydev->glue,
4369     - sizeof(joydev->glue)) ? -EFAULT : 0;
4370     - default:
4371     - return joydev_ioctl_common(joydev, cmd, argp);
4372     + default:
4373     + retval = joydev_ioctl_common(joydev, cmd, argp);
4374     + break;
4375     }
4376     + out:
4377     + mutex_unlock(&joydev->mutex);
4378     + return retval;
4379     }
4380    
4381     static const struct file_operations joydev_fops = {
4382     - .owner = THIS_MODULE,
4383     - .read = joydev_read,
4384     - .write = joydev_write,
4385     - .poll = joydev_poll,
4386     - .open = joydev_open,
4387     - .release = joydev_release,
4388     - .ioctl = joydev_ioctl,
4389     + .owner = THIS_MODULE,
4390     + .read = joydev_read,
4391     + .poll = joydev_poll,
4392     + .open = joydev_open,
4393     + .release = joydev_release,
4394     + .unlocked_ioctl = joydev_ioctl,
4395     #ifdef CONFIG_COMPAT
4396     - .compat_ioctl = joydev_compat_ioctl,
4397     + .compat_ioctl = joydev_compat_ioctl,
4398     #endif
4399     - .fasync = joydev_fasync,
4400     + .fasync = joydev_fasync,
4401     };
4402    
4403     +static int joydev_install_chrdev(struct joydev *joydev)
4404     +{
4405     + joydev_table[joydev->minor] = joydev;
4406     + return 0;
4407     +}
4408     +
4409     +static void joydev_remove_chrdev(struct joydev *joydev)
4410     +{
4411     + mutex_lock(&joydev_table_mutex);
4412     + joydev_table[joydev->minor] = NULL;
4413     + mutex_unlock(&joydev_table_mutex);
4414     +}
4415     +
4416     +/*
4417     + * Mark device non-existant. This disables writes, ioctls and
4418     + * prevents new users from opening the device. Already posted
4419     + * blocking reads will stay, however new ones will fail.
4420     + */
4421     +static void joydev_mark_dead(struct joydev *joydev)
4422     +{
4423     + mutex_lock(&joydev->mutex);
4424     + joydev->exist = 0;
4425     + mutex_unlock(&joydev->mutex);
4426     +}
4427     +
4428     +static void joydev_cleanup(struct joydev *joydev)
4429     +{
4430     + struct input_handle *handle = &joydev->handle;
4431     +
4432     + joydev_mark_dead(joydev);
4433     + joydev_hangup(joydev);
4434     + joydev_remove_chrdev(joydev);
4435     +
4436     + /* joydev is marked dead so noone else accesses joydev->open */
4437     + if (joydev->open)
4438     + input_close_device(handle);
4439     +}
4440     +
4441     static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
4442     const struct input_device_id *id)
4443     {
4444     @@ -494,7 +730,10 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
4445     int i, j, t, minor;
4446     int error;
4447    
4448     - for (minor = 0; minor < JOYDEV_MINORS && joydev_table[minor]; minor++);
4449     + for (minor = 0; minor < JOYDEV_MINORS; minor++)
4450     + if (!joydev_table[minor])
4451     + break;
4452     +
4453     if (minor == JOYDEV_MINORS) {
4454     printk(KERN_ERR "joydev: no more free joydev devices\n");
4455     return -ENFILE;
4456     @@ -505,15 +744,19 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
4457     return -ENOMEM;
4458    
4459     INIT_LIST_HEAD(&joydev->client_list);
4460     + spin_lock_init(&joydev->client_lock);
4461     + mutex_init(&joydev->mutex);
4462     init_waitqueue_head(&joydev->wait);
4463    
4464     + snprintf(joydev->name, sizeof(joydev->name), "js%d", minor);
4465     + joydev->exist = 1;
4466     joydev->minor = minor;
4467     +
4468     joydev->exist = 1;
4469     joydev->handle.dev = dev;
4470     joydev->handle.name = joydev->name;
4471     joydev->handle.handler = handler;
4472     joydev->handle.private = joydev;
4473     - snprintf(joydev->name, sizeof(joydev->name), "js%d", minor);
4474    
4475     for (i = 0; i < ABS_MAX + 1; i++)
4476     if (test_bit(i, dev->absbit)) {
4477     @@ -545,67 +788,65 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
4478     }
4479     joydev->corr[i].type = JS_CORR_BROKEN;
4480     joydev->corr[i].prec = dev->absfuzz[j];
4481     - joydev->corr[i].coef[0] = (dev->absmax[j] + dev->absmin[j]) / 2 - dev->absflat[j];
4482     - joydev->corr[i].coef[1] = (dev->absmax[j] + dev->absmin[j]) / 2 + dev->absflat[j];
4483     - if (!(t = ((dev->absmax[j] - dev->absmin[j]) / 2 - 2 * dev->absflat[j])))
4484     - continue;
4485     - joydev->corr[i].coef[2] = (1 << 29) / t;
4486     - joydev->corr[i].coef[3] = (1 << 29) / t;
4487     -
4488     - joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i);
4489     + joydev->corr[i].coef[0] =
4490     + (dev->absmax[j] + dev->absmin[j]) / 2 - dev->absflat[j];
4491     + joydev->corr[i].coef[1] =
4492     + (dev->absmax[j] + dev->absmin[j]) / 2 + dev->absflat[j];
4493     +
4494     + t = (dev->absmax[j] - dev->absmin[j]) / 2 - 2 * dev->absflat[j];
4495     + if (t) {
4496     + joydev->corr[i].coef[2] = (1 << 29) / t;
4497     + joydev->corr[i].coef[3] = (1 << 29) / t;
4498     +
4499     + joydev->abs[i] = joydev_correct(dev->abs[j],
4500     + joydev->corr + i);
4501     + }
4502     }
4503    
4504     - snprintf(joydev->dev.bus_id, sizeof(joydev->dev.bus_id),
4505     - "js%d", minor);
4506     + strlcpy(joydev->dev.bus_id, joydev->name, sizeof(joydev->dev.bus_id));
4507     + joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor);
4508     joydev->dev.class = &input_class;
4509     joydev->dev.parent = &dev->dev;
4510     - joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor);
4511     joydev->dev.release = joydev_free;
4512     device_initialize(&joydev->dev);
4513    
4514     - joydev_table[minor] = joydev;
4515     -
4516     - error = device_add(&joydev->dev);
4517     + error = input_register_handle(&joydev->handle);
4518     if (error)
4519     goto err_free_joydev;
4520    
4521     - error = input_register_handle(&joydev->handle);
4522     + error = joydev_install_chrdev(joydev);
4523     if (error)
4524     - goto err_delete_joydev;
4525     + goto err_unregister_handle;
4526     +
4527     + error = device_add(&joydev->dev);
4528     + if (error)
4529     + goto err_cleanup_joydev;
4530    
4531     return 0;
4532    
4533     - err_delete_joydev:
4534     - device_del(&joydev->dev);
4535     + err_cleanup_joydev:
4536     + joydev_cleanup(joydev);
4537     + err_unregister_handle:
4538     + input_unregister_handle(&joydev->handle);
4539     err_free_joydev:
4540     put_device(&joydev->dev);
4541     return error;
4542     }
4543    
4544     -
4545     static void joydev_disconnect(struct input_handle *handle)
4546     {
4547     struct joydev *joydev = handle->private;
4548     - struct joydev_client *client;
4549    
4550     - input_unregister_handle(handle);
4551     device_del(&joydev->dev);
4552     -
4553     - joydev->exist = 0;
4554     -
4555     - if (joydev->open) {
4556     - input_close_device(handle);
4557     - list_for_each_entry(client, &joydev->client_list, node)
4558     - kill_fasync(&client->fasync, SIGIO, POLL_HUP);
4559     - wake_up_interruptible(&joydev->wait);
4560     - }
4561     -
4562     + joydev_cleanup(joydev);
4563     + input_unregister_handle(handle);
4564     put_device(&joydev->dev);
4565     }
4566    
4567     static const struct input_device_id joydev_blacklist[] = {
4568     {
4569     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
4570     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
4571     + INPUT_DEVICE_ID_MATCH_KEYBIT,
4572     .evbit = { BIT(EV_KEY) },
4573     .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
4574     }, /* Avoid itouchpads, touchscreens and tablets */
4575     @@ -614,17 +855,20 @@ static const struct input_device_id joydev_blacklist[] = {
4576    
4577     static const struct input_device_id joydev_ids[] = {
4578     {
4579     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
4580     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
4581     + INPUT_DEVICE_ID_MATCH_ABSBIT,
4582     .evbit = { BIT(EV_ABS) },
4583     .absbit = { BIT(ABS_X) },
4584     },
4585     {
4586     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
4587     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
4588     + INPUT_DEVICE_ID_MATCH_ABSBIT,
4589     .evbit = { BIT(EV_ABS) },
4590     .absbit = { BIT(ABS_WHEEL) },
4591     },
4592     {
4593     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
4594     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
4595     + INPUT_DEVICE_ID_MATCH_ABSBIT,
4596     .evbit = { BIT(EV_ABS) },
4597     .absbit = { BIT(ABS_THROTTLE) },
4598     },
4599     @@ -634,14 +878,14 @@ static const struct input_device_id joydev_ids[] = {
4600     MODULE_DEVICE_TABLE(input, joydev_ids);
4601    
4602     static struct input_handler joydev_handler = {
4603     - .event = joydev_event,
4604     - .connect = joydev_connect,
4605     - .disconnect = joydev_disconnect,
4606     - .fops = &joydev_fops,
4607     - .minor = JOYDEV_MINOR_BASE,
4608     - .name = "joydev",
4609     - .id_table = joydev_ids,
4610     - .blacklist = joydev_blacklist,
4611     + .event = joydev_event,
4612     + .connect = joydev_connect,
4613     + .disconnect = joydev_disconnect,
4614     + .fops = &joydev_fops,
4615     + .minor = JOYDEV_MINOR_BASE,
4616     + .name = "joydev",
4617     + .id_table = joydev_ids,
4618     + .blacklist = joydev_blacklist,
4619     };
4620    
4621     static int __init joydev_init(void)
4622     diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
4623     index 9173916..cc36edb 100644
4624     --- a/drivers/input/mousedev.c
4625     +++ b/drivers/input/mousedev.c
4626     @@ -61,9 +61,11 @@ struct mousedev {
4627     int open;
4628     int minor;
4629     char name[16];
4630     + struct input_handle handle;
4631     wait_queue_head_t wait;
4632     struct list_head client_list;
4633     - struct input_handle handle;
4634     + spinlock_t client_lock; /* protects client_list */
4635     + struct mutex mutex;
4636     struct device dev;
4637    
4638     struct list_head mixdev_node;
4639     @@ -113,108 +115,137 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
4640     static struct input_handler mousedev_handler;
4641    
4642     static struct mousedev *mousedev_table[MOUSEDEV_MINORS];
4643     +static DEFINE_MUTEX(mousedev_table_mutex);
4644     static struct mousedev *mousedev_mix;
4645     static LIST_HEAD(mousedev_mix_list);
4646    
4647     +static void mixdev_open_devices(void);
4648     +static void mixdev_close_devices(void);
4649     +
4650     #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
4651     #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
4652    
4653     -static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value)
4654     +static void mousedev_touchpad_event(struct input_dev *dev,
4655     + struct mousedev *mousedev,
4656     + unsigned int code, int value)
4657     {
4658     int size, tmp;
4659     enum { FRACTION_DENOM = 128 };
4660    
4661     switch (code) {
4662     - case ABS_X:
4663     - fx(0) = value;
4664     - if (mousedev->touch && mousedev->pkt_count >= 2) {
4665     - size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4666     - if (size == 0)
4667     - size = 256 * 2;
4668     - tmp = ((value - fx(2)) * (256 * FRACTION_DENOM)) / size;
4669     - tmp += mousedev->frac_dx;
4670     - mousedev->packet.dx = tmp / FRACTION_DENOM;
4671     - mousedev->frac_dx = tmp - mousedev->packet.dx * FRACTION_DENOM;
4672     - }
4673     - break;
4674    
4675     - case ABS_Y:
4676     - fy(0) = value;
4677     - if (mousedev->touch && mousedev->pkt_count >= 2) {
4678     - /* use X size to keep the same scale */
4679     - size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4680     - if (size == 0)
4681     - size = 256 * 2;
4682     - tmp = -((value - fy(2)) * (256 * FRACTION_DENOM)) / size;
4683     - tmp += mousedev->frac_dy;
4684     - mousedev->packet.dy = tmp / FRACTION_DENOM;
4685     - mousedev->frac_dy = tmp - mousedev->packet.dy * FRACTION_DENOM;
4686     - }
4687     - break;
4688     + case ABS_X:
4689     + fx(0) = value;
4690     + if (mousedev->touch && mousedev->pkt_count >= 2) {
4691     + size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4692     + if (size == 0)
4693     + size = 256 * 2;
4694     + tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size;
4695     + tmp += mousedev->frac_dx;
4696     + mousedev->packet.dx = tmp / FRACTION_DENOM;
4697     + mousedev->frac_dx =
4698     + tmp - mousedev->packet.dx * FRACTION_DENOM;
4699     + }
4700     + break;
4701     +
4702     + case ABS_Y:
4703     + fy(0) = value;
4704     + if (mousedev->touch && mousedev->pkt_count >= 2) {
4705     + /* use X size to keep the same scale */
4706     + size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4707     + if (size == 0)
4708     + size = 256 * 2;
4709     + tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size;
4710     + tmp += mousedev->frac_dy;
4711     + mousedev->packet.dy = tmp / FRACTION_DENOM;
4712     + mousedev->frac_dy = tmp -
4713     + mousedev->packet.dy * FRACTION_DENOM;
4714     + }
4715     + break;
4716     }
4717     }
4718    
4719     -static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value)
4720     +static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
4721     + unsigned int code, int value)
4722     {
4723     int size;
4724    
4725     switch (code) {
4726     - case ABS_X:
4727     - size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4728     - if (size == 0)
4729     - size = xres ? : 1;
4730     - if (value > dev->absmax[ABS_X])
4731     - value = dev->absmax[ABS_X];
4732     - if (value < dev->absmin[ABS_X])
4733     - value = dev->absmin[ABS_X];
4734     - mousedev->packet.x = ((value - dev->absmin[ABS_X]) * xres) / size;
4735     - mousedev->packet.abs_event = 1;
4736     - break;
4737    
4738     - case ABS_Y:
4739     - size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
4740     - if (size == 0)
4741     - size = yres ? : 1;
4742     - if (value > dev->absmax[ABS_Y])
4743     - value = dev->absmax[ABS_Y];
4744     - if (value < dev->absmin[ABS_Y])
4745     - value = dev->absmin[ABS_Y];
4746     - mousedev->packet.y = yres - ((value - dev->absmin[ABS_Y]) * yres) / size;
4747     - mousedev->packet.abs_event = 1;
4748     - break;
4749     + case ABS_X:
4750     + size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
4751     + if (size == 0)
4752     + size = xres ? : 1;
4753     + if (value > dev->absmax[ABS_X])
4754     + value = dev->absmax[ABS_X];
4755     + if (value < dev->absmin[ABS_X])
4756     + value = dev->absmin[ABS_X];
4757     + mousedev->packet.x =
4758     + ((value - dev->absmin[ABS_X]) * xres) / size;
4759     + mousedev->packet.abs_event = 1;
4760     + break;
4761     +
4762     + case ABS_Y:
4763     + size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
4764     + if (size == 0)
4765     + size = yres ? : 1;
4766     + if (value > dev->absmax[ABS_Y])
4767     + value = dev->absmax[ABS_Y];
4768     + if (value < dev->absmin[ABS_Y])
4769     + value = dev->absmin[ABS_Y];
4770     + mousedev->packet.y = yres -
4771     + ((value - dev->absmin[ABS_Y]) * yres) / size;
4772     + mousedev->packet.abs_event = 1;
4773     + break;
4774     }
4775     }
4776    
4777     -static void mousedev_rel_event(struct mousedev *mousedev, unsigned int code, int value)
4778     +static void mousedev_rel_event(struct mousedev *mousedev,
4779     + unsigned int code, int value)
4780     {
4781     switch (code) {
4782     - case REL_X: mousedev->packet.dx += value; break;
4783     - case REL_Y: mousedev->packet.dy -= value; break;
4784     - case REL_WHEEL: mousedev->packet.dz -= value; break;
4785     + case REL_X:
4786     + mousedev->packet.dx += value;
4787     + break;
4788     +
4789     + case REL_Y:
4790     + mousedev->packet.dy -= value;
4791     + break;
4792     +
4793     + case REL_WHEEL:
4794     + mousedev->packet.dz -= value;
4795     + break;
4796     }
4797     }
4798    
4799     -static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int value)
4800     +static void mousedev_key_event(struct mousedev *mousedev,
4801     + unsigned int code, int value)
4802     {
4803     int index;
4804    
4805     switch (code) {
4806     - case BTN_TOUCH:
4807     - case BTN_0:
4808     - case BTN_LEFT: index = 0; break;
4809     - case BTN_STYLUS:
4810     - case BTN_1:
4811     - case BTN_RIGHT: index = 1; break;
4812     - case BTN_2:
4813     - case BTN_FORWARD:
4814     - case BTN_STYLUS2:
4815     - case BTN_MIDDLE: index = 2; break;
4816     - case BTN_3:
4817     - case BTN_BACK:
4818     - case BTN_SIDE: index = 3; break;
4819     - case BTN_4:
4820     - case BTN_EXTRA: index = 4; break;
4821     - default: return;
4822     +
4823     + case BTN_TOUCH:
4824     + case BTN_0:
4825     + case BTN_LEFT: index = 0; break;
4826     +
4827     + case BTN_STYLUS:
4828     + case BTN_1:
4829     + case BTN_RIGHT: index = 1; break;
4830     +
4831     + case BTN_2:
4832     + case BTN_FORWARD:
4833     + case BTN_STYLUS2:
4834     + case BTN_MIDDLE: index = 2; break;
4835     +
4836     + case BTN_3:
4837     + case BTN_BACK:
4838     + case BTN_SIDE: index = 3; break;
4839     +
4840     + case BTN_4:
4841     + case BTN_EXTRA: index = 4; break;
4842     +
4843     + default: return;
4844     }
4845    
4846     if (value) {
4847     @@ -226,19 +257,22 @@ static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int
4848     }
4849     }
4850    
4851     -static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_hw_data *packet)
4852     +static void mousedev_notify_readers(struct mousedev *mousedev,
4853     + struct mousedev_hw_data *packet)
4854     {
4855     struct mousedev_client *client;
4856     struct mousedev_motion *p;
4857     - unsigned long flags;
4858     + unsigned int new_head;
4859     int wake_readers = 0;
4860    
4861     - list_for_each_entry(client, &mousedev->client_list, node) {
4862     - spin_lock_irqsave(&client->packet_lock, flags);
4863     + list_for_each_entry_rcu(client, &mousedev->client_list, node) {
4864     +
4865     + /* Just acquire the lock, interrupts already disabled */
4866     + spin_lock(&client->packet_lock);
4867    
4868     p = &client->packets[client->head];
4869     if (client->ready && p->buttons != mousedev->packet.buttons) {
4870     - unsigned int new_head = (client->head + 1) % PACKET_QUEUE_LEN;
4871     + new_head = (client->head + 1) % PACKET_QUEUE_LEN;
4872     if (new_head != client->tail) {
4873     p = &client->packets[client->head = new_head];
4874     memset(p, 0, sizeof(struct mousedev_motion));
4875     @@ -253,19 +287,22 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
4876     }
4877    
4878     client->pos_x += packet->dx;
4879     - client->pos_x = client->pos_x < 0 ? 0 : (client->pos_x >= xres ? xres : client->pos_x);
4880     + client->pos_x = client->pos_x < 0 ?
4881     + 0 : (client->pos_x >= xres ? xres : client->pos_x);
4882     client->pos_y += packet->dy;
4883     - client->pos_y = client->pos_y < 0 ? 0 : (client->pos_y >= yres ? yres : client->pos_y);
4884     + client->pos_y = client->pos_y < 0 ?
4885     + 0 : (client->pos_y >= yres ? yres : client->pos_y);
4886    
4887     p->dx += packet->dx;
4888     p->dy += packet->dy;
4889     p->dz += packet->dz;
4890     p->buttons = mousedev->packet.buttons;
4891    
4892     - if (p->dx || p->dy || p->dz || p->buttons != client->last_buttons)
4893     + if (p->dx || p->dy || p->dz ||
4894     + p->buttons != client->last_buttons)
4895     client->ready = 1;
4896    
4897     - spin_unlock_irqrestore(&client->packet_lock, flags);
4898     + spin_unlock(&client->packet_lock);
4899    
4900     if (client->ready) {
4901     kill_fasync(&client->fasync, SIGIO, POLL_IN);
4902     @@ -281,7 +318,8 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
4903     {
4904     if (!value) {
4905     if (mousedev->touch &&
4906     - time_before(jiffies, mousedev->touch + msecs_to_jiffies(tap_time))) {
4907     + time_before(jiffies,
4908     + mousedev->touch + msecs_to_jiffies(tap_time))) {
4909     /*
4910     * Toggle left button to emulate tap.
4911     * We rely on the fact that mousedev_mix always has 0
4912     @@ -290,7 +328,8 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
4913     set_bit(0, &mousedev->packet.buttons);
4914     set_bit(0, &mousedev_mix->packet.buttons);
4915     mousedev_notify_readers(mousedev, &mousedev_mix->packet);
4916     - mousedev_notify_readers(mousedev_mix, &mousedev_mix->packet);
4917     + mousedev_notify_readers(mousedev_mix,
4918     + &mousedev_mix->packet);
4919     clear_bit(0, &mousedev->packet.buttons);
4920     clear_bit(0, &mousedev_mix->packet.buttons);
4921     }
4922     @@ -302,54 +341,61 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
4923     mousedev->touch = jiffies;
4924     }
4925    
4926     -static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
4927     +static void mousedev_event(struct input_handle *handle,
4928     + unsigned int type, unsigned int code, int value)
4929     {
4930     struct mousedev *mousedev = handle->private;
4931    
4932     switch (type) {
4933     - case EV_ABS:
4934     - /* Ignore joysticks */
4935     - if (test_bit(BTN_TRIGGER, handle->dev->keybit))
4936     - return;
4937    
4938     - if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
4939     - mousedev_touchpad_event(handle->dev, mousedev, code, value);
4940     - else
4941     - mousedev_abs_event(handle->dev, mousedev, code, value);
4942     + case EV_ABS:
4943     + /* Ignore joysticks */
4944     + if (test_bit(BTN_TRIGGER, handle->dev->keybit))
4945     + return;
4946    
4947     - break;
4948     + if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
4949     + mousedev_touchpad_event(handle->dev,
4950     + mousedev, code, value);
4951     + else
4952     + mousedev_abs_event(handle->dev, mousedev, code, value);
4953    
4954     - case EV_REL:
4955     - mousedev_rel_event(mousedev, code, value);
4956     - break;
4957     + break;
4958    
4959     - case EV_KEY:
4960     - if (value != 2) {
4961     - if (code == BTN_TOUCH && test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
4962     - mousedev_touchpad_touch(mousedev, value);
4963     - else
4964     - mousedev_key_event(mousedev, code, value);
4965     - }
4966     - break;
4967     + case EV_REL:
4968     + mousedev_rel_event(mousedev, code, value);
4969     + break;
4970    
4971     - case EV_SYN:
4972     - if (code == SYN_REPORT) {
4973     - if (mousedev->touch) {
4974     - mousedev->pkt_count++;
4975     - /* Input system eats duplicate events, but we need all of them
4976     - * to do correct averaging so apply present one forward
4977     - */
4978     - fx(0) = fx(1);
4979     - fy(0) = fy(1);
4980     - }
4981     -
4982     - mousedev_notify_readers(mousedev, &mousedev->packet);
4983     - mousedev_notify_readers(mousedev_mix, &mousedev->packet);
4984     -
4985     - mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0;
4986     - mousedev->packet.abs_event = 0;
4987     + case EV_KEY:
4988     + if (value != 2) {
4989     + if (code == BTN_TOUCH &&
4990     + test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
4991     + mousedev_touchpad_touch(mousedev, value);
4992     + else
4993     + mousedev_key_event(mousedev, code, value);
4994     + }
4995     + break;
4996     +
4997     + case EV_SYN:
4998     + if (code == SYN_REPORT) {
4999     + if (mousedev->touch) {
5000     + mousedev->pkt_count++;
5001     + /*
5002     + * Input system eats duplicate events,
5003     + * but we need all of them to do correct
5004     + * averaging so apply present one forward
5005     + */
5006     + fx(0) = fx(1);
5007     + fy(0) = fy(1);
5008     }
5009     - break;
5010     +
5011     + mousedev_notify_readers(mousedev, &mousedev->packet);
5012     + mousedev_notify_readers(mousedev_mix, &mousedev->packet);
5013     +
5014     + mousedev->packet.dx = mousedev->packet.dy =
5015     + mousedev->packet.dz = 0;
5016     + mousedev->packet.abs_event = 0;
5017     + }
5018     + break;
5019     }
5020     }
5021    
5022     @@ -367,41 +413,48 @@ static void mousedev_free(struct device *dev)
5023     {
5024     struct mousedev *mousedev = container_of(dev, struct mousedev, dev);
5025    
5026     - mousedev_table[mousedev->minor] = NULL;
5027     kfree(mousedev);
5028     }
5029    
5030     -static int mixdev_add_device(struct mousedev *mousedev)
5031     +static int mousedev_open_device(struct mousedev *mousedev)
5032     {
5033     - int error;
5034     + int retval;
5035    
5036     - if (mousedev_mix->open) {
5037     - error = input_open_device(&mousedev->handle);
5038     - if (error)
5039     - return error;
5040     + retval = mutex_lock_interruptible(&mousedev->mutex);
5041     + if (retval)
5042     + return retval;
5043    
5044     - mousedev->open++;
5045     - mousedev->mixdev_open = 1;
5046     + if (mousedev->minor == MOUSEDEV_MIX)
5047     + mixdev_open_devices();
5048     + else if (!mousedev->exist)
5049     + retval = -ENODEV;
5050     + else if (!mousedev->open++) {
5051     + retval = input_open_device(&mousedev->handle);
5052     + if (retval)
5053     + mousedev->open--;
5054     }
5055    
5056     - get_device(&mousedev->dev);
5057     - list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list);
5058     -
5059     - return 0;
5060     + mutex_unlock(&mousedev->mutex);
5061     + return retval;
5062     }
5063    
5064     -static void mixdev_remove_device(struct mousedev *mousedev)
5065     +static void mousedev_close_device(struct mousedev *mousedev)
5066     {
5067     - if (mousedev->mixdev_open) {
5068     - mousedev->mixdev_open = 0;
5069     - if (!--mousedev->open && mousedev->exist)
5070     - input_close_device(&mousedev->handle);
5071     - }
5072     + mutex_lock(&mousedev->mutex);
5073    
5074     - list_del_init(&mousedev->mixdev_node);
5075     - put_device(&mousedev->dev);
5076     + if (mousedev->minor == MOUSEDEV_MIX)
5077     + mixdev_close_devices();
5078     + else if (mousedev->exist && !--mousedev->open)
5079     + input_close_device(&mousedev->handle);
5080     +
5081     + mutex_unlock(&mousedev->mutex);
5082     }
5083    
5084     +/*
5085     + * Open all available devices so they can all be multiplexed in one.
5086     + * stream. Note that this function is called with mousedev_mix->mutex
5087     + * held.
5088     + */
5089     static void mixdev_open_devices(void)
5090     {
5091     struct mousedev *mousedev;
5092     @@ -411,16 +464,19 @@ static void mixdev_open_devices(void)
5093    
5094     list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
5095     if (!mousedev->mixdev_open) {
5096     - if (!mousedev->open && mousedev->exist)
5097     - if (input_open_device(&mousedev->handle))
5098     - continue;
5099     + if (mousedev_open_device(mousedev))
5100     + continue;
5101    
5102     - mousedev->open++;
5103     mousedev->mixdev_open = 1;
5104     }
5105     }
5106     }
5107    
5108     +/*
5109     + * Close all devices that were opened as part of multiplexed
5110     + * device. Note that this function is called with mousedev_mix->mutex
5111     + * held.
5112     + */
5113     static void mixdev_close_devices(void)
5114     {
5115     struct mousedev *mousedev;
5116     @@ -431,33 +487,50 @@ static void mixdev_close_devices(void)
5117     list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
5118     if (mousedev->mixdev_open) {
5119     mousedev->mixdev_open = 0;
5120     - if (!--mousedev->open && mousedev->exist)
5121     - input_close_device(&mousedev->handle);
5122     + mousedev_close_device(mousedev);
5123     }
5124     }
5125     }
5126    
5127     +
5128     +static void mousedev_attach_client(struct mousedev *mousedev,
5129     + struct mousedev_client *client)
5130     +{
5131     + spin_lock(&mousedev->client_lock);
5132     + list_add_tail_rcu(&client->node, &mousedev->client_list);
5133     + spin_unlock(&mousedev->client_lock);
5134     + /*
5135     + * We don't use synchronize_rcu() here because read-side
5136     + * critical section is protected by a spinlock (dev->event_lock)
5137     + * instead of rcu_read_lock().
5138     + */
5139     + synchronize_sched();
5140     +}
5141     +
5142     +static void mousedev_detach_client(struct mousedev *mousedev,
5143     + struct mousedev_client *client)
5144     +{
5145     + spin_lock(&mousedev->client_lock);
5146     + list_del_rcu(&client->node);
5147     + spin_unlock(&mousedev->client_lock);
5148     + synchronize_sched();
5149     +}
5150     +
5151     static int mousedev_release(struct inode *inode, struct file *file)
5152     {
5153     struct mousedev_client *client = file->private_data;
5154     struct mousedev *mousedev = client->mousedev;
5155    
5156     mousedev_fasync(-1, file, 0);
5157     -
5158     - list_del(&client->node);
5159     + mousedev_detach_client(mousedev, client);
5160     kfree(client);
5161    
5162     - if (mousedev->minor == MOUSEDEV_MIX)
5163     - mixdev_close_devices();
5164     - else if (!--mousedev->open && mousedev->exist)
5165     - input_close_device(&mousedev->handle);
5166     -
5167     + mousedev_close_device(mousedev);
5168     put_device(&mousedev->dev);
5169    
5170     return 0;
5171     }
5172    
5173     -
5174     static int mousedev_open(struct inode *inode, struct file *file)
5175     {
5176     struct mousedev_client *client;
5177     @@ -475,12 +548,17 @@ static int mousedev_open(struct inode *inode, struct file *file)
5178     if (i >= MOUSEDEV_MINORS)
5179     return -ENODEV;
5180    
5181     + error = mutex_lock_interruptible(&mousedev_table_mutex);
5182     + if (error)
5183     + return error;
5184     mousedev = mousedev_table[i];
5185     + if (mousedev)
5186     + get_device(&mousedev->dev);
5187     + mutex_unlock(&mousedev_table_mutex);
5188     +
5189     if (!mousedev)
5190     return -ENODEV;
5191    
5192     - get_device(&mousedev->dev);
5193     -
5194     client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
5195     if (!client) {
5196     error = -ENOMEM;
5197     @@ -491,21 +569,17 @@ static int mousedev_open(struct inode *inode, struct file *file)
5198     client->pos_x = xres / 2;
5199     client->pos_y = yres / 2;
5200     client->mousedev = mousedev;
5201     - list_add_tail(&client->node, &mousedev->client_list);
5202     + mousedev_attach_client(mousedev, client);
5203    
5204     - if (mousedev->minor == MOUSEDEV_MIX)
5205     - mixdev_open_devices();
5206     - else if (!mousedev->open++ && mousedev->exist) {
5207     - error = input_open_device(&mousedev->handle);
5208     - if (error)
5209     - goto err_free_client;
5210     - }
5211     + error = mousedev_open_device(mousedev);
5212     + if (error)
5213     + goto err_free_client;
5214    
5215     file->private_data = client;
5216     return 0;
5217    
5218     err_free_client:
5219     - list_del(&client->node);
5220     + mousedev_detach_client(mousedev, client);
5221     kfree(client);
5222     err_put_mousedev:
5223     put_device(&mousedev->dev);
5224     @@ -517,41 +591,41 @@ static inline int mousedev_limit_delta(int delta, int limit)
5225     return delta > limit ? limit : (delta < -limit ? -limit : delta);
5226     }
5227    
5228     -static void mousedev_packet(struct mousedev_client *client, signed char *ps2_data)
5229     +static void mousedev_packet(struct mousedev_client *client,
5230     + signed char *ps2_data)
5231     {
5232     - struct mousedev_motion *p;
5233     - unsigned long flags;
5234     -
5235     - spin_lock_irqsave(&client->packet_lock, flags);
5236     - p = &client->packets[client->tail];
5237     + struct mousedev_motion *p = &client->packets[client->tail];
5238    
5239     - ps2_data[0] = 0x08 | ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
5240     + ps2_data[0] = 0x08 |
5241     + ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
5242     ps2_data[1] = mousedev_limit_delta(p->dx, 127);
5243     ps2_data[2] = mousedev_limit_delta(p->dy, 127);
5244     p->dx -= ps2_data[1];
5245     p->dy -= ps2_data[2];
5246    
5247     switch (client->mode) {
5248     - case MOUSEDEV_EMUL_EXPS:
5249     - ps2_data[3] = mousedev_limit_delta(p->dz, 7);
5250     - p->dz -= ps2_data[3];
5251     - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
5252     - client->bufsiz = 4;
5253     - break;
5254     -
5255     - case MOUSEDEV_EMUL_IMPS:
5256     - ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
5257     - ps2_data[3] = mousedev_limit_delta(p->dz, 127);
5258     - p->dz -= ps2_data[3];
5259     - client->bufsiz = 4;
5260     - break;
5261     -
5262     - case MOUSEDEV_EMUL_PS2:
5263     - default:
5264     - ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
5265     - p->dz = 0;
5266     - client->bufsiz = 3;
5267     - break;
5268     + case MOUSEDEV_EMUL_EXPS:
5269     + ps2_data[3] = mousedev_limit_delta(p->dz, 7);
5270     + p->dz -= ps2_data[3];
5271     + ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
5272     + client->bufsiz = 4;
5273     + break;
5274     +
5275     + case MOUSEDEV_EMUL_IMPS:
5276     + ps2_data[0] |=
5277     + ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
5278     + ps2_data[3] = mousedev_limit_delta(p->dz, 127);
5279     + p->dz -= ps2_data[3];
5280     + client->bufsiz = 4;
5281     + break;
5282     +
5283     + case MOUSEDEV_EMUL_PS2:
5284     + default:
5285     + ps2_data[0] |=
5286     + ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
5287     + p->dz = 0;
5288     + client->bufsiz = 3;
5289     + break;
5290     }
5291    
5292     if (!p->dx && !p->dy && !p->dz) {
5293     @@ -561,12 +635,56 @@ static void mousedev_packet(struct mousedev_client *client, signed char *ps2_dat
5294     } else
5295     client->tail = (client->tail + 1) % PACKET_QUEUE_LEN;
5296     }
5297     -
5298     - spin_unlock_irqrestore(&client->packet_lock, flags);
5299     }
5300    
5301     +static void mousedev_generate_response(struct mousedev_client *client,
5302     + int command)
5303     +{
5304     + client->ps2[0] = 0xfa; /* ACK */
5305     +
5306     + switch (command) {
5307    
5308     -static ssize_t mousedev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
5309     + case 0xeb: /* Poll */
5310     + mousedev_packet(client, &client->ps2[1]);
5311     + client->bufsiz++; /* account for leading ACK */
5312     + break;
5313     +
5314     + case 0xf2: /* Get ID */
5315     + switch (client->mode) {
5316     + case MOUSEDEV_EMUL_PS2:
5317     + client->ps2[1] = 0;
5318     + break;
5319     + case MOUSEDEV_EMUL_IMPS:
5320     + client->ps2[1] = 3;
5321     + break;
5322     + case MOUSEDEV_EMUL_EXPS:
5323     + client->ps2[1] = 4;
5324     + break;
5325     + }
5326     + client->bufsiz = 2;
5327     + break;
5328     +
5329     + case 0xe9: /* Get info */
5330     + client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200;
5331     + client->bufsiz = 4;
5332     + break;
5333     +
5334     + case 0xff: /* Reset */
5335     + client->impsseq = client->imexseq = 0;
5336     + client->mode = MOUSEDEV_EMUL_PS2;
5337     + client->ps2[1] = 0xaa; client->ps2[2] = 0x00;
5338     + client->bufsiz = 3;
5339     + break;
5340     +
5341     + default:
5342     + client->bufsiz = 1;
5343     + break;
5344     + }
5345     + client->buffer = client->bufsiz;
5346     +}
5347     +
5348     +static ssize_t mousedev_write(struct file *file, const char __user *buffer,
5349     + size_t count, loff_t *ppos)
5350     {
5351     struct mousedev_client *client = file->private_data;
5352     unsigned char c;
5353     @@ -577,6 +695,8 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer, size
5354     if (get_user(c, buffer + i))
5355     return -EFAULT;
5356    
5357     + spin_lock_irq(&client->packet_lock);
5358     +
5359     if (c == mousedev_imex_seq[client->imexseq]) {
5360     if (++client->imexseq == MOUSEDEV_SEQ_LEN) {
5361     client->imexseq = 0;
5362     @@ -593,68 +713,39 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer, size
5363     } else
5364     client->impsseq = 0;
5365    
5366     - client->ps2[0] = 0xfa;
5367     -
5368     - switch (c) {
5369     -
5370     - case 0xeb: /* Poll */
5371     - mousedev_packet(client, &client->ps2[1]);
5372     - client->bufsiz++; /* account for leading ACK */
5373     - break;
5374     -
5375     - case 0xf2: /* Get ID */
5376     - switch (client->mode) {
5377     - case MOUSEDEV_EMUL_PS2: client->ps2[1] = 0; break;
5378     - case MOUSEDEV_EMUL_IMPS: client->ps2[1] = 3; break;
5379     - case MOUSEDEV_EMUL_EXPS: client->ps2[1] = 4; break;
5380     - }
5381     - client->bufsiz = 2;
5382     - break;
5383     -
5384     - case 0xe9: /* Get info */
5385     - client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200;
5386     - client->bufsiz = 4;
5387     - break;
5388     -
5389     - case 0xff: /* Reset */
5390     - client->impsseq = client->imexseq = 0;
5391     - client->mode = MOUSEDEV_EMUL_PS2;
5392     - client->ps2[1] = 0xaa; client->ps2[2] = 0x00;
5393     - client->bufsiz = 3;
5394     - break;
5395     -
5396     - default:
5397     - client->bufsiz = 1;
5398     - break;
5399     - }
5400     + mousedev_generate_response(client, c);
5401    
5402     - client->buffer = client->bufsiz;
5403     + spin_unlock_irq(&client->packet_lock);
5404     }
5405    
5406     kill_fasync(&client->fasync, SIGIO, POLL_IN);
5407     -
5408     wake_up_interruptible(&client->mousedev->wait);
5409    
5410     return count;
5411     }
5412    
5413     -static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
5414     +static ssize_t mousedev_read(struct file *file, char __user *buffer,
5415     + size_t count, loff_t *ppos)
5416     {
5417     struct mousedev_client *client = file->private_data;
5418     + struct mousedev *mousedev = client->mousedev;
5419     + signed char data[sizeof(client->ps2)];
5420     int retval = 0;
5421    
5422     - if (!client->ready && !client->buffer && (file->f_flags & O_NONBLOCK))
5423     + if (!client->ready && !client->buffer && mousedev->exist &&
5424     + (file->f_flags & O_NONBLOCK))
5425     return -EAGAIN;
5426    
5427     - retval = wait_event_interruptible(client->mousedev->wait,
5428     - !client->mousedev->exist || client->ready || client->buffer);
5429     -
5430     + retval = wait_event_interruptible(mousedev->wait,
5431     + !mousedev->exist || client->ready || client->buffer);
5432     if (retval)
5433     return retval;
5434    
5435     - if (!client->mousedev->exist)
5436     + if (!mousedev->exist)
5437     return -ENODEV;
5438    
5439     + spin_lock_irq(&client->packet_lock);
5440     +
5441     if (!client->buffer && client->ready) {
5442     mousedev_packet(client, client->ps2);
5443     client->buffer = client->bufsiz;
5444     @@ -663,9 +754,12 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t coun
5445     if (count > client->buffer)
5446     count = client->buffer;
5447    
5448     + memcpy(data, client->ps2 + client->bufsiz - client->buffer, count);
5449     client->buffer -= count;
5450    
5451     - if (copy_to_user(buffer, client->ps2 + client->bufsiz - client->buffer - count, count))
5452     + spin_unlock_irq(&client->packet_lock);
5453     +
5454     + if (copy_to_user(buffer, data, count))
5455     return -EFAULT;
5456    
5457     return count;
5458     @@ -692,6 +786,60 @@ static const struct file_operations mousedev_fops = {
5459     .fasync = mousedev_fasync,
5460     };
5461    
5462     +static int mousedev_install_chrdev(struct mousedev *mousedev)
5463     +{
5464     + mousedev_table[mousedev->minor] = mousedev;
5465     + return 0;
5466     +}
5467     +
5468     +static void mousedev_remove_chrdev(struct mousedev *mousedev)
5469     +{
5470     + mutex_lock(&mousedev_table_mutex);
5471     + mousedev_table[mousedev->minor] = NULL;
5472     + mutex_unlock(&mousedev_table_mutex);
5473     +}
5474     +
5475     +/*
5476     + * Mark device non-existent. This disables writes, ioctls and
5477     + * prevents new users from opening the device. Already posted
5478     + * blocking reads will stay, however new ones will fail.
5479     + */
5480     +static void mousedev_mark_dead(struct mousedev *mousedev)
5481     +{
5482     + mutex_lock(&mousedev->mutex);
5483     + mousedev->exist = 0;
5484     + mutex_unlock(&mousedev->mutex);
5485     +}
5486     +
5487     +/*
5488     + * Wake up users waiting for IO so they can disconnect from
5489     + * dead device.
5490     + */
5491     +static void mousedev_hangup(struct mousedev *mousedev)
5492     +{
5493     + struct mousedev_client *client;
5494     +
5495     + spin_lock(&mousedev->client_lock);
5496     + list_for_each_entry(client, &mousedev->client_list, node)
5497     + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
5498     + spin_unlock(&mousedev->client_lock);
5499     +
5500     + wake_up_interruptible(&mousedev->wait);
5501     +}
5502     +
5503     +static void mousedev_cleanup(struct mousedev *mousedev)
5504     +{
5505     + struct input_handle *handle = &mousedev->handle;
5506     +
5507     + mousedev_mark_dead(mousedev);
5508     + mousedev_hangup(mousedev);
5509     + mousedev_remove_chrdev(mousedev);
5510     +
5511     + /* mousedev is marked dead so no one else accesses mousedev->open */
5512     + if (mousedev->open)
5513     + input_close_device(handle);
5514     +}
5515     +
5516     static struct mousedev *mousedev_create(struct input_dev *dev,
5517     struct input_handler *handler,
5518     int minor)
5519     @@ -707,6 +855,10 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
5520    
5521     INIT_LIST_HEAD(&mousedev->client_list);
5522     INIT_LIST_HEAD(&mousedev->mixdev_node);
5523     + spin_lock_init(&mousedev->client_lock);
5524     + mutex_init(&mousedev->mutex);
5525     + lockdep_set_subclass(&mousedev->mutex,
5526     + minor == MOUSEDEV_MIX ? MOUSEDEV_MIX : 0);
5527     init_waitqueue_head(&mousedev->wait);
5528    
5529     if (minor == MOUSEDEV_MIX)
5530     @@ -731,14 +883,27 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
5531     mousedev->dev.release = mousedev_free;
5532     device_initialize(&mousedev->dev);
5533    
5534     - mousedev_table[minor] = mousedev;
5535     + if (minor != MOUSEDEV_MIX) {
5536     + error = input_register_handle(&mousedev->handle);
5537     + if (error)
5538     + goto err_free_mousedev;
5539     + }
5540     +
5541     + error = mousedev_install_chrdev(mousedev);
5542     + if (error)
5543     + goto err_unregister_handle;
5544    
5545     error = device_add(&mousedev->dev);
5546     if (error)
5547     - goto err_free_mousedev;
5548     + goto err_cleanup_mousedev;
5549    
5550     return mousedev;
5551    
5552     + err_cleanup_mousedev:
5553     + mousedev_cleanup(mousedev);
5554     + err_unregister_handle:
5555     + if (minor != MOUSEDEV_MIX)
5556     + input_unregister_handle(&mousedev->handle);
5557     err_free_mousedev:
5558     put_device(&mousedev->dev);
5559     err_out:
5560     @@ -747,29 +912,64 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
5561    
5562     static void mousedev_destroy(struct mousedev *mousedev)
5563     {
5564     - struct mousedev_client *client;
5565     -
5566     device_del(&mousedev->dev);
5567     - mousedev->exist = 0;
5568     + mousedev_cleanup(mousedev);
5569     + if (mousedev->minor != MOUSEDEV_MIX)
5570     + input_unregister_handle(&mousedev->handle);
5571     + put_device(&mousedev->dev);
5572     +}
5573    
5574     - if (mousedev->open) {
5575     - input_close_device(&mousedev->handle);
5576     - list_for_each_entry(client, &mousedev->client_list, node)
5577     - kill_fasync(&client->fasync, SIGIO, POLL_HUP);
5578     - wake_up_interruptible(&mousedev->wait);
5579     +static int mixdev_add_device(struct mousedev *mousedev)
5580     +{
5581     + int retval;
5582     +
5583     + retval = mutex_lock_interruptible(&mousedev_mix->mutex);
5584     + if (retval)
5585     + return retval;
5586     +
5587     + if (mousedev_mix->open) {
5588     + retval = mousedev_open_device(mousedev);
5589     + if (retval)
5590     + goto out;
5591     +
5592     + mousedev->mixdev_open = 1;
5593     }
5594    
5595     + get_device(&mousedev->dev);
5596     + list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list);
5597     +
5598     + out:
5599     + mutex_unlock(&mousedev_mix->mutex);
5600     + return retval;
5601     +}
5602     +
5603     +static void mixdev_remove_device(struct mousedev *mousedev)
5604     +{
5605     + mutex_lock(&mousedev_mix->mutex);
5606     +
5607     + if (mousedev->mixdev_open) {
5608     + mousedev->mixdev_open = 0;
5609     + mousedev_close_device(mousedev);
5610     + }
5611     +
5612     + list_del_init(&mousedev->mixdev_node);
5613     + mutex_unlock(&mousedev_mix->mutex);
5614     +
5615     put_device(&mousedev->dev);
5616     }
5617    
5618     -static int mousedev_connect(struct input_handler *handler, struct input_dev *dev,
5619     +static int mousedev_connect(struct input_handler *handler,
5620     + struct input_dev *dev,
5621     const struct input_device_id *id)
5622     {
5623     struct mousedev *mousedev;
5624     int minor;
5625     int error;
5626    
5627     - for (minor = 0; minor < MOUSEDEV_MINORS && mousedev_table[minor]; minor++);
5628     + for (minor = 0; minor < MOUSEDEV_MINORS; minor++)
5629     + if (!mousedev_table[minor])
5630     + break;
5631     +
5632     if (minor == MOUSEDEV_MINORS) {
5633     printk(KERN_ERR "mousedev: no more free mousedev devices\n");
5634     return -ENFILE;
5635     @@ -779,21 +979,13 @@ static int mousedev_connect(struct input_handler *handler, struct input_dev *dev
5636     if (IS_ERR(mousedev))
5637     return PTR_ERR(mousedev);
5638    
5639     - error = input_register_handle(&mousedev->handle);
5640     - if (error)
5641     - goto err_delete_mousedev;
5642     -
5643     error = mixdev_add_device(mousedev);
5644     - if (error)
5645     - goto err_unregister_handle;
5646     + if (error) {
5647     + mousedev_destroy(mousedev);
5648     + return error;
5649     + }
5650    
5651     return 0;
5652     -
5653     - err_unregister_handle:
5654     - input_unregister_handle(&mousedev->handle);
5655     - err_delete_mousedev:
5656     - device_unregister(&mousedev->dev);
5657     - return error;
5658     }
5659    
5660     static void mousedev_disconnect(struct input_handle *handle)
5661     @@ -801,33 +993,42 @@ static void mousedev_disconnect(struct input_handle *handle)
5662     struct mousedev *mousedev = handle->private;
5663    
5664     mixdev_remove_device(mousedev);
5665     - input_unregister_handle(handle);
5666     mousedev_destroy(mousedev);
5667     }
5668    
5669     static const struct input_device_id mousedev_ids[] = {
5670     {
5671     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_RELBIT,
5672     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
5673     + INPUT_DEVICE_ID_MATCH_KEYBIT |
5674     + INPUT_DEVICE_ID_MATCH_RELBIT,
5675     .evbit = { BIT(EV_KEY) | BIT(EV_REL) },
5676     .keybit = { [LONG(BTN_LEFT)] = BIT(BTN_LEFT) },
5677     .relbit = { BIT(REL_X) | BIT(REL_Y) },
5678     - }, /* A mouse like device, at least one button, two relative axes */
5679     + }, /* A mouse like device, at least one button,
5680     + two relative axes */
5681     {
5682     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_RELBIT,
5683     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
5684     + INPUT_DEVICE_ID_MATCH_RELBIT,
5685     .evbit = { BIT(EV_KEY) | BIT(EV_REL) },
5686     .relbit = { BIT(REL_WHEEL) },
5687     }, /* A separate scrollwheel */
5688     {
5689     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
5690     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
5691     + INPUT_DEVICE_ID_MATCH_KEYBIT |
5692     + INPUT_DEVICE_ID_MATCH_ABSBIT,
5693     .evbit = { BIT(EV_KEY) | BIT(EV_ABS) },
5694     .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) },
5695     .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
5696     - }, /* A tablet like device, at least touch detection, two absolute axes */
5697     + }, /* A tablet like device, at least touch detection,
5698     + two absolute axes */
5699     {
5700     - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
5701     + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
5702     + INPUT_DEVICE_ID_MATCH_KEYBIT |
5703     + INPUT_DEVICE_ID_MATCH_ABSBIT,
5704     .evbit = { BIT(EV_KEY) | BIT(EV_ABS) },
5705     .keybit = { [LONG(BTN_TOOL_FINGER)] = BIT(BTN_TOOL_FINGER) },
5706     - .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) | BIT(ABS_TOOL_WIDTH) },
5707     + .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) |
5708     + BIT(ABS_TOOL_WIDTH) },
5709     }, /* A touchpad */
5710    
5711     { }, /* Terminating entry */
5712     diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c
5713     index d2f882e..1202334 100644
5714     --- a/drivers/input/tsdev.c
5715     +++ b/drivers/input/tsdev.c
5716     @@ -112,6 +112,8 @@ struct tsdev {
5717     struct input_handle handle;
5718     wait_queue_head_t wait;
5719     struct list_head client_list;
5720     + spinlock_t client_lock; /* protects client_list */
5721     + struct mutex mutex;
5722     struct device dev;
5723    
5724     int x, y, pressure;
5725     @@ -122,8 +124,9 @@ struct tsdev_client {
5726     struct fasync_struct *fasync;
5727     struct list_head node;
5728     struct tsdev *tsdev;
5729     + struct ts_event buffer[TSDEV_BUFFER_SIZE];
5730     int head, tail;
5731     - struct ts_event event[TSDEV_BUFFER_SIZE];
5732     + spinlock_t buffer_lock; /* protects access to buffer, head and tail */
5733     int raw;
5734     };
5735    
5736     @@ -137,6 +140,7 @@ struct tsdev_client {
5737     #define TS_SET_CAL _IOW(IOC_H3600_TS_MAGIC, 11, struct ts_calibration)
5738    
5739     static struct tsdev *tsdev_table[TSDEV_MINORS/2];
5740     +static DEFINE_MUTEX(tsdev_table_mutex);
5741    
5742     static int tsdev_fasync(int fd, struct file *file, int on)
5743     {
5744     @@ -144,9 +148,94 @@ static int tsdev_fasync(int fd, struct file *file, int on)
5745     int retval;
5746    
5747     retval = fasync_helper(fd, file, on, &client->fasync);
5748     +
5749     return retval < 0 ? retval : 0;
5750     }
5751    
5752     +static void tsdev_free(struct device *dev)
5753     +{
5754     + struct tsdev *tsdev = container_of(dev, struct tsdev, dev);
5755     +
5756     + kfree(tsdev);
5757     +}
5758     +
5759     +static void tsdev_attach_client(struct tsdev *tsdev, struct tsdev_client *client)
5760     +{
5761     + spin_lock(&tsdev->client_lock);
5762     + list_add_tail_rcu(&client->node, &tsdev->client_list);
5763     + spin_unlock(&tsdev->client_lock);
5764     + synchronize_sched();
5765     +}
5766     +
5767     +static void tsdev_detach_client(struct tsdev *tsdev, struct tsdev_client *client)
5768     +{
5769     + spin_lock(&tsdev->client_lock);
5770     + list_del_rcu(&client->node);
5771     + spin_unlock(&tsdev->client_lock);
5772     + synchronize_sched();
5773     +}
5774     +
5775     +static int tsdev_open_device(struct tsdev *tsdev)
5776     +{
5777     + int retval;
5778     +
5779     + retval = mutex_lock_interruptible(&tsdev->mutex);
5780     + if (retval)
5781     + return retval;
5782     +
5783     + if (!tsdev->exist)
5784     + retval = -ENODEV;
5785     + else if (!tsdev->open++) {
5786     + retval = input_open_device(&tsdev->handle);
5787     + if (retval)
5788     + tsdev->open--;
5789     + }
5790     +
5791     + mutex_unlock(&tsdev->mutex);
5792     + return retval;
5793     +}
5794     +
5795     +static void tsdev_close_device(struct tsdev *tsdev)
5796     +{
5797     + mutex_lock(&tsdev->mutex);
5798     +
5799     + if (tsdev->exist && !--tsdev->open)
5800     + input_close_device(&tsdev->handle);
5801     +
5802     + mutex_unlock(&tsdev->mutex);
5803     +}
5804     +
5805     +/*
5806     + * Wake up users waiting for IO so they can disconnect from
5807     + * dead device.
5808     + */
5809     +static void tsdev_hangup(struct tsdev *tsdev)
5810     +{
5811     + struct tsdev_client *client;
5812     +
5813     + spin_lock(&tsdev->client_lock);
5814     + list_for_each_entry(client, &tsdev->client_list, node)
5815     + kill_fasync(&client->fasync, SIGIO, POLL_HUP);
5816     + spin_unlock(&tsdev->client_lock);
5817     +
5818     + wake_up_interruptible(&tsdev->wait);
5819     +}
5820     +
5821     +static int tsdev_release(struct inode *inode, struct file *file)
5822     +{
5823     + struct tsdev_client *client = file->private_data;
5824     + struct tsdev *tsdev = client->tsdev;
5825     +
5826     + tsdev_fasync(-1, file, 0);
5827     + tsdev_detach_client(tsdev, client);
5828     + kfree(client);
5829     +
5830     + tsdev_close_device(tsdev);
5831     + put_device(&tsdev->dev);
5832     +
5833     + return 0;
5834     +}
5835     +
5836     static int tsdev_open(struct inode *inode, struct file *file)
5837     {
5838     int i = iminor(inode) - TSDEV_MINOR_BASE;
5839     @@ -161,11 +250,16 @@ static int tsdev_open(struct inode *inode, struct file *file)
5840     if (i >= TSDEV_MINORS)
5841     return -ENODEV;
5842    
5843     + error = mutex_lock_interruptible(&tsdev_table_mutex);
5844     + if (error)
5845     + return error;
5846     tsdev = tsdev_table[i & TSDEV_MINOR_MASK];
5847     - if (!tsdev || !tsdev->exist)
5848     - return -ENODEV;
5849     + if (tsdev)
5850     + get_device(&tsdev->dev);
5851     + mutex_unlock(&tsdev_table_mutex);
5852    
5853     - get_device(&tsdev->dev);
5854     + if (!tsdev)
5855     + return -ENODEV;
5856    
5857     client = kzalloc(sizeof(struct tsdev_client), GFP_KERNEL);
5858     if (!client) {
5859     @@ -173,51 +267,42 @@ static int tsdev_open(struct inode *inode, struct file *file)
5860     goto err_put_tsdev;
5861     }
5862    
5863     + spin_lock_init(&client->buffer_lock);
5864     client->tsdev = tsdev;
5865     - client->raw = (i >= TSDEV_MINORS / 2) ? 1 : 0;
5866     - list_add_tail(&client->node, &tsdev->client_list);
5867     + client->raw = i >= TSDEV_MINORS / 2;
5868     + tsdev_attach_client(tsdev, client);
5869    
5870     - if (!tsdev->open++ && tsdev->exist) {
5871     - error = input_open_device(&tsdev->handle);
5872     - if (error)
5873     - goto err_free_client;
5874     - }
5875     + error = tsdev_open_device(tsdev);
5876     + if (error)
5877     + goto err_free_client;
5878    
5879     file->private_data = client;
5880     return 0;
5881    
5882     err_free_client:
5883     - list_del(&client->node);
5884     + tsdev_detach_client(tsdev, client);
5885     kfree(client);
5886     err_put_tsdev:
5887     put_device(&tsdev->dev);
5888     return error;
5889     }
5890    
5891     -static void tsdev_free(struct device *dev)
5892     -{
5893     - struct tsdev *tsdev = container_of(dev, struct tsdev, dev);
5894     -
5895     - tsdev_table[tsdev->minor] = NULL;
5896     - kfree(tsdev);
5897     -}
5898     -
5899     -static int tsdev_release(struct inode *inode, struct file *file)
5900     +static int tsdev_fetch_next_event(struct tsdev_client *client,
5901     + struct ts_event *event)
5902     {
5903     - struct tsdev_client *client = file->private_data;
5904     - struct tsdev *tsdev = client->tsdev;
5905     + int have_event;
5906    
5907     - tsdev_fasync(-1, file, 0);
5908     -
5909     - list_del(&client->node);
5910     - kfree(client);
5911     + spin_lock_irq(&client->buffer_lock);
5912    
5913     - if (!--tsdev->open && tsdev->exist)
5914     - input_close_device(&tsdev->handle);
5915     + have_event = client->head != client->tail;
5916     + if (have_event) {
5917     + *event = client->buffer[client->tail++];
5918     + client->tail &= TSDEV_BUFFER_SIZE - 1;
5919     + }
5920    
5921     - put_device(&tsdev->dev);
5922     + spin_unlock_irq(&client->buffer_lock);
5923    
5924     - return 0;
5925     + return have_event;
5926     }
5927    
5928     static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count,
5929     @@ -225,9 +310,11 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count,
5930     {
5931     struct tsdev_client *client = file->private_data;
5932     struct tsdev *tsdev = client->tsdev;
5933     - int retval = 0;
5934     + struct ts_event event;
5935     + int retval;
5936    
5937     - if (client->head == client->tail && tsdev->exist && (file->f_flags & O_NONBLOCK))
5938     + if (client->head == client->tail && tsdev->exist &&
5939     + (file->f_flags & O_NONBLOCK))
5940     return -EAGAIN;
5941    
5942     retval = wait_event_interruptible(tsdev->wait,
5943     @@ -238,13 +325,14 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count,
5944     if (!tsdev->exist)
5945     return -ENODEV;
5946    
5947     - while (client->head != client->tail &&
5948     - retval + sizeof (struct ts_event) <= count) {
5949     - if (copy_to_user (buffer + retval, client->event + client->tail,
5950     - sizeof (struct ts_event)))
5951     + while (retval + sizeof(struct ts_event) <= count &&
5952     + tsdev_fetch_next_event(client, &event)) {
5953     +
5954     + if (copy_to_user(buffer + retval, &event,
5955     + sizeof(struct ts_event)))
5956     return -EFAULT;
5957     - client->tail = (client->tail + 1) & (TSDEV_BUFFER_SIZE - 1);
5958     - retval += sizeof (struct ts_event);
5959     +
5960     + retval += sizeof(struct ts_event);
5961     }
5962    
5963     return retval;
5964     @@ -261,14 +349,23 @@ static unsigned int tsdev_poll(struct file *file, poll_table *wait)
5965     (tsdev->exist ? 0 : (POLLHUP | POLLERR));
5966     }
5967    
5968     -static int tsdev_ioctl(struct inode *inode, struct file *file,
5969     - unsigned int cmd, unsigned long arg)
5970     +static long tsdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5971     {
5972     struct tsdev_client *client = file->private_data;
5973     struct tsdev *tsdev = client->tsdev;
5974     int retval = 0;
5975    
5976     + retval = mutex_lock_interruptible(&tsdev->mutex);
5977     + if (retval)
5978     + return retval;
5979     +
5980     + if (!tsdev->exist) {
5981     + retval = -ENODEV;
5982     + goto out;
5983     + }
5984     +
5985     switch (cmd) {
5986     +
5987     case TS_GET_CAL:
5988     if (copy_to_user((void __user *)arg, &tsdev->cal,
5989     sizeof (struct ts_calibration)))
5990     @@ -277,7 +374,7 @@ static int tsdev_ioctl(struct inode *inode, struct file *file,
5991    
5992     case TS_SET_CAL:
5993     if (copy_from_user(&tsdev->cal, (void __user *)arg,
5994     - sizeof (struct ts_calibration)))
5995     + sizeof(struct ts_calibration)))
5996     retval = -EFAULT;
5997     break;
5998    
5999     @@ -286,29 +383,79 @@ static int tsdev_ioctl(struct inode *inode, struct file *file,
6000     break;
6001     }
6002    
6003     + out:
6004     + mutex_unlock(&tsdev->mutex);
6005     return retval;
6006     }
6007    
6008     static const struct file_operations tsdev_fops = {
6009     - .owner = THIS_MODULE,
6010     - .open = tsdev_open,
6011     - .release = tsdev_release,
6012     - .read = tsdev_read,
6013     - .poll = tsdev_poll,
6014     - .fasync = tsdev_fasync,
6015     - .ioctl = tsdev_ioctl,
6016     + .owner = THIS_MODULE,
6017     + .open = tsdev_open,
6018     + .release = tsdev_release,
6019     + .read = tsdev_read,
6020     + .poll = tsdev_poll,
6021     + .fasync = tsdev_fasync,
6022     + .unlocked_ioctl = tsdev_ioctl,
6023     };
6024    
6025     +static void tsdev_pass_event(struct tsdev *tsdev, struct tsdev_client *client,
6026     + int x, int y, int pressure, int millisecs)
6027     +{
6028     + struct ts_event *event;
6029     + int tmp;
6030     +
6031     + /* Interrupts are already disabled, just acquire the lock */
6032     + spin_lock(&client->buffer_lock);
6033     +
6034     + event = &client->buffer[client->head++];
6035     + client->head &= TSDEV_BUFFER_SIZE - 1;
6036     +
6037     + /* Calibration */
6038     + if (!client->raw) {
6039     + x = ((x * tsdev->cal.xscale) >> 8) + tsdev->cal.xtrans;
6040     + y = ((y * tsdev->cal.yscale) >> 8) + tsdev->cal.ytrans;
6041     + if (tsdev->cal.xyswap) {
6042     + tmp = x; x = y; y = tmp;
6043     + }
6044     + }
6045     +
6046     + event->millisecs = millisecs;
6047     + event->x = x;
6048     + event->y = y;
6049     + event->pressure = pressure;
6050     +
6051     + spin_unlock(&client->buffer_lock);
6052     +
6053     + kill_fasync(&client->fasync, SIGIO, POLL_IN);
6054     +}
6055     +
6056     +static void tsdev_distribute_event(struct tsdev *tsdev)
6057     +{
6058     + struct tsdev_client *client;
6059     + struct timeval time;
6060     + int millisecs;
6061     +
6062     + do_gettimeofday(&time);
6063     + millisecs = time.tv_usec / 1000;
6064     +
6065     + list_for_each_entry_rcu(client, &tsdev->client_list, node)
6066     + tsdev_pass_event(tsdev, client,
6067     + tsdev->x, tsdev->y,
6068     + tsdev->pressure, millisecs);
6069     +}
6070     +
6071     static void tsdev_event(struct input_handle *handle, unsigned int type,
6072     unsigned int code, int value)
6073     {
6074     struct tsdev *tsdev = handle->private;
6075     - struct tsdev_client *client;
6076     - struct timeval time;
6077     + struct input_dev *dev = handle->dev;
6078     + int wake_up_readers = 0;
6079    
6080     switch (type) {
6081     +
6082     case EV_ABS:
6083     switch (code) {
6084     +
6085     case ABS_X:
6086     tsdev->x = value;
6087     break;
6088     @@ -318,9 +465,9 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
6089     break;
6090    
6091     case ABS_PRESSURE:
6092     - if (value > handle->dev->absmax[ABS_PRESSURE])
6093     - value = handle->dev->absmax[ABS_PRESSURE];
6094     - value -= handle->dev->absmin[ABS_PRESSURE];
6095     + if (value > dev->absmax[ABS_PRESSURE])
6096     + value = dev->absmax[ABS_PRESSURE];
6097     + value -= dev->absmin[ABS_PRESSURE];
6098     if (value < 0)
6099     value = 0;
6100     tsdev->pressure = value;
6101     @@ -330,6 +477,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
6102    
6103     case EV_REL:
6104     switch (code) {
6105     +
6106     case REL_X:
6107     tsdev->x += value;
6108     if (tsdev->x < 0)
6109     @@ -351,6 +499,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
6110     case EV_KEY:
6111     if (code == BTN_TOUCH || code == BTN_MOUSE) {
6112     switch (value) {
6113     +
6114     case 0:
6115     tsdev->pressure = 0;
6116     break;
6117     @@ -362,49 +511,71 @@ static void tsdev_event(struct input_handle *handle, unsigned int type,
6118     }
6119     }
6120     break;
6121     +
6122     + case EV_SYN:
6123     + if (code == SYN_REPORT) {
6124     + tsdev_distribute_event(tsdev);
6125     + wake_up_readers = 1;
6126     + }
6127     + break;
6128     }
6129    
6130     - if (type != EV_SYN || code != SYN_REPORT)
6131     - return;
6132     + if (wake_up_readers)
6133     + wake_up_interruptible(&tsdev->wait);
6134     +}
6135     +
6136     +static int tsdev_install_chrdev(struct tsdev *tsdev)
6137     +{
6138     + tsdev_table[tsdev->minor] = tsdev;
6139     + return 0;
6140     +}
6141    
6142     - list_for_each_entry(client, &tsdev->client_list, node) {
6143     - int x, y, tmp;
6144     +static void tsdev_remove_chrdev(struct tsdev *tsdev)
6145     +{
6146     + mutex_lock(&tsdev_table_mutex);
6147     + tsdev_table[tsdev->minor] = NULL;
6148     + mutex_unlock(&tsdev_table_mutex);
6149     +}
6150    
6151     - do_gettimeofday(&time);
6152     - client->event[client->head].millisecs = time.tv_usec / 1000;
6153     - client->event[client->head].pressure = tsdev->pressure;
6154     +/*
6155     + * Mark device non-existant. This disables writes, ioctls and
6156     + * prevents new users from opening the device. Already posted
6157     + * blocking reads will stay, however new ones will fail.
6158     + */
6159     +static void tsdev_mark_dead(struct tsdev *tsdev)
6160     +{
6161     + mutex_lock(&tsdev->mutex);
6162     + tsdev->exist = 0;
6163     + mutex_unlock(&tsdev->mutex);
6164     +}
6165    
6166     - x = tsdev->x;
6167     - y = tsdev->y;
6168     +static void tsdev_cleanup(struct tsdev *tsdev)
6169     +{
6170     + struct input_handle *handle = &tsdev->handle;
6171    
6172     - /* Calibration */
6173     - if (!client->raw) {
6174     - x = ((x * tsdev->cal.xscale) >> 8) + tsdev->cal.xtrans;
6175     - y = ((y * tsdev->cal.yscale) >> 8) + tsdev->cal.ytrans;
6176     - if (tsdev->cal.xyswap) {
6177     - tmp = x; x = y; y = tmp;
6178     - }
6179     - }
6180     + tsdev_mark_dead(tsdev);
6181     + tsdev_hangup(tsdev);
6182     + tsdev_remove_chrdev(tsdev);
6183    
6184     - client->event[client->head].x = x;
6185     - client->event[client->head].y = y;
6186     - client->head = (client->head + 1) & (TSDEV_BUFFER_SIZE - 1);
6187     - kill_fasync(&client->fasync, SIGIO, POLL_IN);
6188     - }
6189     - wake_up_interruptible(&tsdev->wait);
6190     + /* tsdev is marked dead so noone else accesses tsdev->open */
6191     + if (tsdev->open)
6192     + input_close_device(handle);
6193     }
6194    
6195     static int tsdev_connect(struct input_handler *handler, struct input_dev *dev,
6196     const struct input_device_id *id)
6197     {
6198     struct tsdev *tsdev;
6199     - int minor, delta;
6200     + int delta;
6201     + int minor;
6202     int error;
6203    
6204     - for (minor = 0; minor < TSDEV_MINORS / 2 && tsdev_table[minor]; minor++);
6205     - if (minor >= TSDEV_MINORS / 2) {
6206     - printk(KERN_ERR
6207     - "tsdev: You have way too many touchscreens\n");
6208     + for (minor = 0; minor < TSDEV_MINORS / 2; minor++)
6209     + if (!tsdev_table[minor])
6210     + break;
6211     +
6212     + if (minor == TSDEV_MINORS) {
6213     + printk(KERN_ERR "tsdev: no more free tsdev devices\n");
6214     return -ENFILE;
6215     }
6216    
6217     @@ -413,15 +584,18 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev,
6218     return -ENOMEM;
6219    
6220     INIT_LIST_HEAD(&tsdev->client_list);
6221     + spin_lock_init(&tsdev->client_lock);
6222     + mutex_init(&tsdev->mutex);
6223     init_waitqueue_head(&tsdev->wait);
6224    
6225     + snprintf(tsdev->name, sizeof(tsdev->name), "ts%d", minor);
6226     tsdev->exist = 1;
6227     tsdev->minor = minor;
6228     +
6229     tsdev->handle.dev = dev;
6230     tsdev->handle.name = tsdev->name;
6231     tsdev->handle.handler = handler;
6232     tsdev->handle.private = tsdev;
6233     - snprintf(tsdev->name, sizeof(tsdev->name), "ts%d", minor);
6234    
6235     /* Precompute the rough calibration matrix */
6236     delta = dev->absmax [ABS_X] - dev->absmin [ABS_X] + 1;
6237     @@ -436,28 +610,31 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev,
6238     tsdev->cal.yscale = (yres << 8) / delta;
6239     tsdev->cal.ytrans = - ((dev->absmin [ABS_Y] * tsdev->cal.yscale) >> 8);
6240    
6241     - snprintf(tsdev->dev.bus_id, sizeof(tsdev->dev.bus_id),
6242     - "ts%d", minor);
6243     + strlcpy(tsdev->dev.bus_id, tsdev->name, sizeof(tsdev->dev.bus_id));
6244     + tsdev->dev.devt = MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor);
6245     tsdev->dev.class = &input_class;
6246     tsdev->dev.parent = &dev->dev;
6247     - tsdev->dev.devt = MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor);
6248     tsdev->dev.release = tsdev_free;
6249     device_initialize(&tsdev->dev);
6250    
6251     - tsdev_table[minor] = tsdev;
6252     -
6253     - error = device_add(&tsdev->dev);
6254     + error = input_register_handle(&tsdev->handle);
6255     if (error)
6256     goto err_free_tsdev;
6257    
6258     - error = input_register_handle(&tsdev->handle);
6259     + error = tsdev_install_chrdev(tsdev);
6260     if (error)
6261     - goto err_delete_tsdev;
6262     + goto err_unregister_handle;
6263     +
6264     + error = device_add(&tsdev->dev);
6265     + if (error)
6266     + goto err_cleanup_tsdev;
6267    
6268     return 0;
6269    
6270     - err_delete_tsdev:
6271     - device_del(&tsdev->dev);
6272     + err_cleanup_tsdev:
6273     + tsdev_cleanup(tsdev);
6274     + err_unregister_handle:
6275     + input_unregister_handle(&tsdev->handle);
6276     err_free_tsdev:
6277     put_device(&tsdev->dev);
6278     return error;
6279     @@ -466,20 +643,10 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev,
6280     static void tsdev_disconnect(struct input_handle *handle)
6281     {
6282     struct tsdev *tsdev = handle->private;
6283     - struct tsdev_client *client;
6284    
6285     - input_unregister_handle(handle);
6286     device_del(&tsdev->dev);
6287     -
6288     - tsdev->exist = 0;
6289     -
6290     - if (tsdev->open) {
6291     - input_close_device(handle);
6292     - list_for_each_entry(client, &tsdev->client_list, node)
6293     - kill_fasync(&client->fasync, SIGIO, POLL_HUP);
6294     - wake_up_interruptible(&tsdev->wait);
6295     - }
6296     -
6297     + tsdev_cleanup(tsdev);
6298     + input_unregister_handle(handle);
6299     put_device(&tsdev->dev);
6300     }
6301    
6302     @@ -510,13 +677,13 @@ static const struct input_device_id tsdev_ids[] = {
6303     MODULE_DEVICE_TABLE(input, tsdev_ids);
6304    
6305     static struct input_handler tsdev_handler = {
6306     - .event = tsdev_event,
6307     - .connect = tsdev_connect,
6308     - .disconnect = tsdev_disconnect,
6309     - .fops = &tsdev_fops,
6310     - .minor = TSDEV_MINOR_BASE,
6311     - .name = "tsdev",
6312     - .id_table = tsdev_ids,
6313     + .event = tsdev_event,
6314     + .connect = tsdev_connect,
6315     + .disconnect = tsdev_disconnect,
6316     + .fops = &tsdev_fops,
6317     + .minor = TSDEV_MINOR_BASE,
6318     + .name = "tsdev",
6319     + .id_table = tsdev_ids,
6320     };
6321    
6322     static int __init tsdev_init(void)
6323     diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
6324     index 23b6f7b..f65b7f9 100644
6325     --- a/drivers/isdn/capi/capidrv.c
6326     +++ b/drivers/isdn/capi/capidrv.c
6327     @@ -2306,13 +2306,14 @@ static int __init capidrv_init(void)
6328    
6329     static void __exit capidrv_exit(void)
6330     {
6331     - char rev[10];
6332     + char rev[32];
6333     char *p;
6334    
6335     if ((p = strchr(revision, ':')) != 0) {
6336     - strcpy(rev, p + 1);
6337     - p = strchr(rev, '$');
6338     - *p = 0;
6339     + strncpy(rev, p + 1, sizeof(rev));
6340     + rev[sizeof(rev)-1] = 0;
6341     + if ((p = strchr(rev, '$')) != 0)
6342     + *p = 0;
6343     } else {
6344     strcpy(rev, " ??? ");
6345     }
6346     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
6347     index bdc52d6..ba2e135 100644
6348     --- a/drivers/md/dm-crypt.c
6349     +++ b/drivers/md/dm-crypt.c
6350     @@ -399,7 +399,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
6351     struct bio *clone;
6352     unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6353     gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
6354     - unsigned int i;
6355     + unsigned i, len;
6356     + struct page *page;
6357    
6358     clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
6359     if (!clone)
6360     @@ -408,10 +409,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
6361     clone_init(io, clone);
6362    
6363     for (i = 0; i < nr_iovecs; i++) {
6364     - struct bio_vec *bv = bio_iovec_idx(clone, i);
6365     -
6366     - bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
6367     - if (!bv->bv_page)
6368     + page = mempool_alloc(cc->page_pool, gfp_mask);
6369     + if (!page)
6370     break;
6371    
6372     /*
6373     @@ -422,15 +421,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
6374     if (i == (MIN_BIO_PAGES - 1))
6375     gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
6376    
6377     - bv->bv_offset = 0;
6378     - if (size > PAGE_SIZE)
6379     - bv->bv_len = PAGE_SIZE;
6380     - else
6381     - bv->bv_len = size;
6382     + len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
6383     +
6384     + if (!bio_add_page(clone, page, len, 0)) {
6385     + mempool_free(page, cc->page_pool);
6386     + break;
6387     + }
6388    
6389     - clone->bi_size += bv->bv_len;
6390     - clone->bi_vcnt++;
6391     - size -= bv->bv_len;
6392     + size -= len;
6393     }
6394    
6395     if (!clone->bi_size) {
6396     @@ -515,6 +513,9 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
6397     struct crypt_config *cc = io->target->private;
6398     unsigned read_io = bio_data_dir(clone) == READ;
6399    
6400     + if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
6401     + error = -EIO;
6402     +
6403     /*
6404     * free the processed pages, even if
6405     * it's only a partially completed write
6406     @@ -529,10 +530,8 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
6407     if (!read_io)
6408     goto out;
6409    
6410     - if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
6411     - error = -EIO;
6412     + if (unlikely(error))
6413     goto out;
6414     - }
6415    
6416     bio_put(clone);
6417     io->post_process = 1;
6418     diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
6419     index b441d82..8a4f63b 100644
6420     --- a/drivers/md/dm-ioctl.c
6421     +++ b/drivers/md/dm-ioctl.c
6422     @@ -1250,21 +1250,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
6423     if (!table)
6424     goto out_argv;
6425    
6426     - if (tmsg->sector >= dm_table_get_size(table)) {
6427     + ti = dm_table_find_target(table, tmsg->sector);
6428     + if (!dm_target_is_valid(ti)) {
6429     DMWARN("Target message sector outside device.");
6430     r = -EINVAL;
6431     - goto out_table;
6432     - }
6433     -
6434     - ti = dm_table_find_target(table, tmsg->sector);
6435     - if (ti->type->message)
6436     + } else if (ti->type->message)
6437     r = ti->type->message(ti, argc, argv);
6438     else {
6439     DMWARN("Target type does not support messages");
6440     r = -EINVAL;
6441     }
6442    
6443     - out_table:
6444     dm_table_put(table);
6445     out_argv:
6446     kfree(argv);
6447     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
6448     index 2bcde57..72d2250 100644
6449     --- a/drivers/md/dm-table.c
6450     +++ b/drivers/md/dm-table.c
6451     @@ -187,8 +187,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
6452    
6453     /*
6454     * Allocate both the target array and offset array at once.
6455     + * Append an empty entry to catch sectors beyond the end of
6456     + * the device.
6457     */
6458     - n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
6459     + n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
6460     sizeof(sector_t));
6461     if (!n_highs)
6462     return -ENOMEM;
6463     @@ -862,6 +864,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
6464    
6465     /*
6466     * Search the btree for the correct target.
6467     + *
6468     + * Caller should check returned pointer with dm_target_is_valid()
6469     + * to trap I/O beyond end of device.
6470     */
6471     struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
6472     {
6473     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
6474     index 998d450..fac09d5 100644
6475     --- a/drivers/md/dm.c
6476     +++ b/drivers/md/dm.c
6477     @@ -663,13 +663,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
6478     return clone;
6479     }
6480    
6481     -static void __clone_and_map(struct clone_info *ci)
6482     +static int __clone_and_map(struct clone_info *ci)
6483     {
6484     struct bio *clone, *bio = ci->bio;
6485     - struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
6486     - sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
6487     + struct dm_target *ti;
6488     + sector_t len = 0, max;
6489     struct dm_target_io *tio;
6490    
6491     + ti = dm_table_find_target(ci->map, ci->sector);
6492     + if (!dm_target_is_valid(ti))
6493     + return -EIO;
6494     +
6495     + max = max_io_len(ci->md, ci->sector, ti);
6496     +
6497     /*
6498     * Allocate a target io object.
6499     */
6500     @@ -727,6 +733,9 @@ static void __clone_and_map(struct clone_info *ci)
6501     do {
6502     if (offset) {
6503     ti = dm_table_find_target(ci->map, ci->sector);
6504     + if (!dm_target_is_valid(ti))
6505     + return -EIO;
6506     +
6507     max = max_io_len(ci->md, ci->sector, ti);
6508    
6509     tio = alloc_tio(ci->md);
6510     @@ -750,6 +759,8 @@ static void __clone_and_map(struct clone_info *ci)
6511    
6512     ci->idx++;
6513     }
6514     +
6515     + return 0;
6516     }
6517    
6518     /*
6519     @@ -758,6 +769,7 @@ static void __clone_and_map(struct clone_info *ci)
6520     static void __split_bio(struct mapped_device *md, struct bio *bio)
6521     {
6522     struct clone_info ci;
6523     + int error = 0;
6524    
6525     ci.map = dm_get_table(md);
6526     if (!ci.map) {
6527     @@ -777,11 +789,11 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
6528     ci.idx = bio->bi_idx;
6529    
6530     start_io_acct(ci.io);
6531     - while (ci.sector_count)
6532     - __clone_and_map(&ci);
6533     + while (ci.sector_count && !error)
6534     + error = __clone_and_map(&ci);
6535    
6536     /* drop the extra reference count */
6537     - dec_pending(ci.io, 0);
6538     + dec_pending(ci.io, error);
6539     dm_table_put(ci.map);
6540     }
6541     /*-----------------------------------------------------------------
6542     diff --git a/drivers/md/dm.h b/drivers/md/dm.h
6543     index 462ee65..07298a3 100644
6544     --- a/drivers/md/dm.h
6545     +++ b/drivers/md/dm.h
6546     @@ -113,6 +113,11 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits);
6547     void dm_table_unplug_all(struct dm_table *t);
6548     int dm_table_flush_all(struct dm_table *t);
6549    
6550     +/*
6551     + * To check the return value from dm_table_find_target().
6552     + */
6553     +#define dm_target_is_valid(t) ((t)->table)
6554     +
6555     /*-----------------------------------------------------------------
6556     * A registry of target types.
6557     *---------------------------------------------------------------*/
6558     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
6559     index e86cacb..3085228 100644
6560     --- a/drivers/md/raid5.c
6561     +++ b/drivers/md/raid5.c
6562     @@ -2875,7 +2875,8 @@ static void handle_stripe5(struct stripe_head *sh)
6563     md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
6564     }
6565    
6566     - if (s.expanding && s.locked == 0)
6567     + if (s.expanding && s.locked == 0 &&
6568     + !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
6569     handle_stripe_expansion(conf, sh, NULL);
6570    
6571     if (sh->ops.count)
6572     @@ -3077,7 +3078,8 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
6573     md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
6574     }
6575    
6576     - if (s.expanding && s.locked == 0)
6577     + if (s.expanding && s.locked == 0 &&
6578     + !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
6579     handle_stripe_expansion(conf, sh, &r6s);
6580    
6581     spin_unlock(&sh->lock);
6582     diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
6583     index 0222bba..91047c7 100644
6584     --- a/drivers/misc/thinkpad_acpi.c
6585     +++ b/drivers/misc/thinkpad_acpi.c
6586     @@ -968,9 +968,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
6587     KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
6588     KEY_UNKNOWN, /* 0x0D: FN+INSERT */
6589     KEY_UNKNOWN, /* 0x0E: FN+DELETE */
6590     - KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */
6591     + KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
6592     /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
6593     - KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */
6594     + KEY_RESERVED, /* 0x10: FN+END (brightness down) */
6595     KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
6596     KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
6597     KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
6598     diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
6599     index f23e13c..d2d4730 100644
6600     --- a/drivers/net/atl1/atl1_main.c
6601     +++ b/drivers/net/atl1/atl1_main.c
6602     @@ -121,7 +121,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
6603     struct atl1_hw *hw = &adapter->hw;
6604     struct net_device *netdev = adapter->netdev;
6605    
6606     - hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
6607     + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6608     hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
6609    
6610     adapter->wol = 0;
6611     @@ -689,7 +689,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
6612     {
6613     struct atl1_adapter *adapter = netdev_priv(netdev);
6614     int old_mtu = netdev->mtu;
6615     - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
6616     + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6617    
6618     if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
6619     (max_frame > MAX_JUMBO_FRAME_SIZE)) {
6620     @@ -854,8 +854,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
6621     /* set Interrupt Clear Timer */
6622     iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
6623    
6624     - /* set MTU, 4 : VLAN */
6625     - iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
6626     + /* set max frame size hw will accept */
6627     + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
6628    
6629     /* jumbo size & rrd retirement timer */
6630     value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
6631     diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
6632     index f6e4030..0883112 100644
6633     --- a/drivers/net/cassini.c
6634     +++ b/drivers/net/cassini.c
6635     @@ -336,30 +336,6 @@ static inline void cas_mask_intr(struct cas *cp)
6636     cas_disable_irq(cp, i);
6637     }
6638    
6639     -static inline void cas_buffer_init(cas_page_t *cp)
6640     -{
6641     - struct page *page = cp->buffer;
6642     - atomic_set((atomic_t *)&page->lru.next, 1);
6643     -}
6644     -
6645     -static inline int cas_buffer_count(cas_page_t *cp)
6646     -{
6647     - struct page *page = cp->buffer;
6648     - return atomic_read((atomic_t *)&page->lru.next);
6649     -}
6650     -
6651     -static inline void cas_buffer_inc(cas_page_t *cp)
6652     -{
6653     - struct page *page = cp->buffer;
6654     - atomic_inc((atomic_t *)&page->lru.next);
6655     -}
6656     -
6657     -static inline void cas_buffer_dec(cas_page_t *cp)
6658     -{
6659     - struct page *page = cp->buffer;
6660     - atomic_dec((atomic_t *)&page->lru.next);
6661     -}
6662     -
6663     static void cas_enable_irq(struct cas *cp, const int ring)
6664     {
6665     if (ring == 0) { /* all but TX_DONE */
6666     @@ -497,7 +473,6 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
6667     {
6668     pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
6669     PCI_DMA_FROMDEVICE);
6670     - cas_buffer_dec(page);
6671     __free_pages(page->buffer, cp->page_order);
6672     kfree(page);
6673     return 0;
6674     @@ -527,7 +502,6 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
6675     page->buffer = alloc_pages(flags, cp->page_order);
6676     if (!page->buffer)
6677     goto page_err;
6678     - cas_buffer_init(page);
6679     page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
6680     cp->page_size, PCI_DMA_FROMDEVICE);
6681     return page;
6682     @@ -606,7 +580,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
6683     list_for_each_safe(elem, tmp, &list) {
6684     cas_page_t *page = list_entry(elem, cas_page_t, list);
6685    
6686     - if (cas_buffer_count(page) > 1)
6687     + if (page_count(page->buffer) > 1)
6688     continue;
6689    
6690     list_del(elem);
6691     @@ -1374,7 +1348,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
6692     cas_page_t *page = cp->rx_pages[1][index];
6693     cas_page_t *new;
6694    
6695     - if (cas_buffer_count(page) == 1)
6696     + if (page_count(page->buffer) == 1)
6697     return page;
6698    
6699     new = cas_page_dequeue(cp);
6700     @@ -1394,7 +1368,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
6701     cas_page_t **page1 = cp->rx_pages[1];
6702    
6703     /* swap if buffer is in use */
6704     - if (cas_buffer_count(page0[index]) > 1) {
6705     + if (page_count(page0[index]->buffer) > 1) {
6706     cas_page_t *new = cas_page_spare(cp, index);
6707     if (new) {
6708     page1[index] = page0[index];
6709     @@ -1979,6 +1953,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
6710     struct cas_page *page;
6711     struct sk_buff *skb;
6712     void *addr, *crcaddr;
6713     + __sum16 csum;
6714     char *p;
6715    
6716     hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
6717     @@ -2062,10 +2037,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
6718    
6719     skb_shinfo(skb)->nr_frags++;
6720     skb->data_len += hlen - swivel;
6721     + skb->truesize += hlen - swivel;
6722     skb->len += hlen - swivel;
6723    
6724     get_page(page->buffer);
6725     - cas_buffer_inc(page);
6726     frag->page = page->buffer;
6727     frag->page_offset = off;
6728     frag->size = hlen - swivel;
6729     @@ -2090,7 +2065,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
6730     frag++;
6731    
6732     get_page(page->buffer);
6733     - cas_buffer_inc(page);
6734     frag->page = page->buffer;
6735     frag->page_offset = 0;
6736     frag->size = hlen;
6737     @@ -2158,14 +2132,15 @@ end_copy_pkt:
6738     skb_put(skb, alloclen);
6739     }
6740    
6741     - i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
6742     + csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
6743     if (cp->crc_size) {
6744     /* checksum includes FCS. strip it out. */
6745     - i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
6746     + csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
6747     + csum_unfold(csum)));
6748     if (addr)
6749     cas_page_unmap(addr);
6750     }
6751     - skb->csum = ntohs(i ^ 0xffff);
6752     + skb->csum = csum_unfold(~csum);
6753     skb->ip_summed = CHECKSUM_COMPLETE;
6754     skb->protocol = eth_type_trans(skb, cp->dev);
6755     return len;
6756     @@ -2253,7 +2228,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
6757     released = 0;
6758     while (entry != last) {
6759     /* make a new buffer if it's still in use */
6760     - if (cas_buffer_count(page[entry]) > 1) {
6761     + if (page_count(page[entry]->buffer) > 1) {
6762     cas_page_t *new = cas_page_dequeue(cp);
6763     if (!new) {
6764     /* let the timer know that we need to
6765     diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
6766     index a970804..a201431 100644
6767     --- a/drivers/net/cassini.h
6768     +++ b/drivers/net/cassini.h
6769     @@ -4122,8 +4122,8 @@ cas_saturn_patch_t cas_saturn_patch[] = {
6770     inserted into
6771     outgoing frame. */
6772     struct cas_tx_desc {
6773     - u64 control;
6774     - u64 buffer;
6775     + __le64 control;
6776     + __le64 buffer;
6777     };
6778    
6779     /* descriptor ring for free buffers contains page-sized buffers. the index
6780     @@ -4131,8 +4131,8 @@ struct cas_tx_desc {
6781     * the completion ring.
6782     */
6783     struct cas_rx_desc {
6784     - u64 index;
6785     - u64 buffer;
6786     + __le64 index;
6787     + __le64 buffer;
6788     };
6789    
6790     /* received packets are put on the completion ring. */
6791     @@ -4210,10 +4210,10 @@ struct cas_rx_desc {
6792     #define RX_INDEX_RELEASE 0x0000000000002000ULL
6793    
6794     struct cas_rx_comp {
6795     - u64 word1;
6796     - u64 word2;
6797     - u64 word3;
6798     - u64 word4;
6799     + __le64 word1;
6800     + __le64 word2;
6801     + __le64 word3;
6802     + __le64 word4;
6803     };
6804    
6805     enum link_state {
6806     @@ -4252,7 +4252,7 @@ struct cas_init_block {
6807     struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
6808     struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
6809     struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
6810     - u64 tx_compwb;
6811     + __le64 tx_compwb;
6812     };
6813    
6814     /* tiny buffers to deal with target abort issue. we allocate a bit
6815     diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
6816     index 231ce43..a82a1fa 100644
6817     --- a/drivers/net/chelsio/cxgb2.c
6818     +++ b/drivers/net/chelsio/cxgb2.c
6819     @@ -370,6 +370,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
6820     "TxInternalMACXmitError",
6821     "TxFramesWithExcessiveDeferral",
6822     "TxFCSErrors",
6823     + "TxJumboFramesOk",
6824     + "TxJumboOctetsOk",
6825    
6826     "RxOctetsOK",
6827     "RxOctetsBad",
6828     @@ -388,15 +390,16 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
6829     "RxInRangeLengthErrors",
6830     "RxOutOfRangeLengthField",
6831     "RxFrameTooLongErrors",
6832     + "RxJumboFramesOk",
6833     + "RxJumboOctetsOk",
6834    
6835     /* Port stats */
6836     - "RxPackets",
6837     "RxCsumGood",
6838     - "TxPackets",
6839     "TxCsumOffload",
6840     "TxTso",
6841     "RxVlan",
6842     "TxVlan",
6843     + "TxNeedHeadroom",
6844    
6845     /* Interrupt stats */
6846     "rx drops",
6847     @@ -454,23 +457,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
6848     const struct cmac_statistics *s;
6849     const struct sge_intr_counts *t;
6850     struct sge_port_stats ss;
6851     - unsigned int len;
6852    
6853     s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
6854     -
6855     - len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
6856     - memcpy(data, &s->TxOctetsOK, len);
6857     - data += len;
6858     -
6859     - len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
6860     - memcpy(data, &s->RxOctetsOK, len);
6861     - data += len;
6862     -
6863     + t = t1_sge_get_intr_counts(adapter->sge);
6864     t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
6865     - memcpy(data, &ss, sizeof(ss));
6866     - data += sizeof(ss);
6867    
6868     - t = t1_sge_get_intr_counts(adapter->sge);
6869     + *data++ = s->TxOctetsOK;
6870     + *data++ = s->TxOctetsBad;
6871     + *data++ = s->TxUnicastFramesOK;
6872     + *data++ = s->TxMulticastFramesOK;
6873     + *data++ = s->TxBroadcastFramesOK;
6874     + *data++ = s->TxPauseFrames;
6875     + *data++ = s->TxFramesWithDeferredXmissions;
6876     + *data++ = s->TxLateCollisions;
6877     + *data++ = s->TxTotalCollisions;
6878     + *data++ = s->TxFramesAbortedDueToXSCollisions;
6879     + *data++ = s->TxUnderrun;
6880     + *data++ = s->TxLengthErrors;
6881     + *data++ = s->TxInternalMACXmitError;
6882     + *data++ = s->TxFramesWithExcessiveDeferral;
6883     + *data++ = s->TxFCSErrors;
6884     + *data++ = s->TxJumboFramesOK;
6885     + *data++ = s->TxJumboOctetsOK;
6886     +
6887     + *data++ = s->RxOctetsOK;
6888     + *data++ = s->RxOctetsBad;
6889     + *data++ = s->RxUnicastFramesOK;
6890     + *data++ = s->RxMulticastFramesOK;
6891     + *data++ = s->RxBroadcastFramesOK;
6892     + *data++ = s->RxPauseFrames;
6893     + *data++ = s->RxFCSErrors;
6894     + *data++ = s->RxAlignErrors;
6895     + *data++ = s->RxSymbolErrors;
6896     + *data++ = s->RxDataErrors;
6897     + *data++ = s->RxSequenceErrors;
6898     + *data++ = s->RxRuntErrors;
6899     + *data++ = s->RxJabberErrors;
6900     + *data++ = s->RxInternalMACRcvError;
6901     + *data++ = s->RxInRangeLengthErrors;
6902     + *data++ = s->RxOutOfRangeLengthField;
6903     + *data++ = s->RxFrameTooLongErrors;
6904     + *data++ = s->RxJumboFramesOK;
6905     + *data++ = s->RxJumboOctetsOK;
6906     +
6907     + *data++ = ss.rx_cso_good;
6908     + *data++ = ss.tx_cso;
6909     + *data++ = ss.tx_tso;
6910     + *data++ = ss.vlan_xtract;
6911     + *data++ = ss.vlan_insert;
6912     + *data++ = ss.tx_need_hdrroom;
6913     +
6914     *data++ = t->rx_drops;
6915     *data++ = t->pure_rsps;
6916     *data++ = t->unhandled_irqs;
6917     diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
6918     index 678778a..2117c4f 100644
6919     --- a/drivers/net/chelsio/pm3393.c
6920     +++ b/drivers/net/chelsio/pm3393.c
6921     @@ -45,7 +45,7 @@
6922    
6923     #include <linux/crc32.h>
6924    
6925     -#define OFFSET(REG_ADDR) (REG_ADDR << 2)
6926     +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
6927    
6928     /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
6929     #define MAX_FRAME_SIZE 9600
6930     @@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
6931     return 0;
6932     }
6933    
6934     -static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
6935     - int over)
6936     -{
6937     - u32 val0, val1, val2;
6938     -
6939     - t1_tpi_read(adapter, offs, &val0);
6940     - t1_tpi_read(adapter, offs + 4, &val1);
6941     - t1_tpi_read(adapter, offs + 8, &val2);
6942     -
6943     - *val &= ~0ull << 40;
6944     - *val |= val0 & 0xffff;
6945     - *val |= (val1 & 0xffff) << 16;
6946     - *val |= (u64)(val2 & 0xff) << 32;
6947     -
6948     - if (over)
6949     - *val += 1ull << 40;
6950     +#define RMON_UPDATE(mac, name, stat_name) \
6951     +{ \
6952     + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
6953     + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
6954     + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
6955     + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
6956     + ((u64)(val1 & 0xffff) << 16) | \
6957     + ((u64)(val2 & 0xff) << 32) | \
6958     + ((mac)->stats.stat_name & \
6959     + 0xffffff0000000000ULL); \
6960     + if (ro & \
6961     + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
6962     + (mac)->stats.stat_name += 1ULL << 40; \
6963     }
6964    
6965     static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
6966     int flag)
6967     {
6968     - static struct {
6969     - unsigned int reg;
6970     - unsigned int offset;
6971     - } hw_stats [] = {
6972     -
6973     -#define HW_STAT(name, stat_name) \
6974     - { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
6975     -
6976     - /* Rx stats */
6977     - HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
6978     - HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
6979     - HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
6980     - HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
6981     - HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
6982     - HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
6983     - HW_STAT(RxFramesLostDueToInternalMACErrors,
6984     - RxInternalMACRcvError),
6985     - HW_STAT(RxSymbolErrors, RxSymbolErrors),
6986     - HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
6987     - HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
6988     - HW_STAT(RxJabbers, RxJabberErrors),
6989     - HW_STAT(RxFragments, RxRuntErrors),
6990     - HW_STAT(RxUndersizedFrames, RxRuntErrors),
6991     - HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
6992     - HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
6993     -
6994     - /* Tx stats */
6995     - HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
6996     - HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
6997     - TxInternalMACXmitError),
6998     - HW_STAT(TxTransmitSystemError, TxFCSErrors),
6999     - HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
7000     - HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
7001     - HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
7002     - HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
7003     - HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
7004     - HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
7005     - }, *p = hw_stats;
7006     - u64 ro;
7007     - u32 val0, val1, val2, val3;
7008     - u64 *stats = (u64 *) &mac->stats;
7009     - unsigned int i;
7010     + u64 ro;
7011     + u32 val0, val1, val2, val3;
7012    
7013     /* Snap the counters */
7014     pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
7015     @@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
7016     ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
7017     (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
7018    
7019     - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
7020     - unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
7021     -
7022     - pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
7023     - stats + p->offset, ro & (reg >> 2));
7024     - }
7025     -
7026     -
7027     + /* Rx stats */
7028     + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
7029     + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
7030     + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
7031     + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
7032     + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
7033     + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
7034     + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
7035     + RxInternalMACRcvError);
7036     + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
7037     + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
7038     + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
7039     + RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
7040     + RMON_UPDATE(mac, RxFragments, RxRuntErrors);
7041     + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
7042     + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
7043     + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
7044     +
7045     + /* Tx stats */
7046     + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
7047     + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
7048     + TxInternalMACXmitError);
7049     + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
7050     + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
7051     + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
7052     + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
7053     + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
7054     + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
7055     + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
7056    
7057     return &mac->stats;
7058     }
7059     diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
7060     index e4f874a..d77f1eb 100644
7061     --- a/drivers/net/chelsio/sge.c
7062     +++ b/drivers/net/chelsio/sge.c
7063     @@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port,
7064     for_each_possible_cpu(cpu) {
7065     struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
7066    
7067     - ss->rx_packets += st->rx_packets;
7068     ss->rx_cso_good += st->rx_cso_good;
7069     - ss->tx_packets += st->tx_packets;
7070     ss->tx_cso += st->tx_cso;
7071     ss->tx_tso += st->tx_tso;
7072     + ss->tx_need_hdrroom += st->tx_need_hdrroom;
7073     ss->vlan_xtract += st->vlan_xtract;
7074     ss->vlan_insert += st->vlan_insert;
7075     }
7076     @@ -1379,11 +1378,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
7077     }
7078     __skb_pull(skb, sizeof(*p));
7079    
7080     - skb->dev->last_rx = jiffies;
7081     st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
7082     - st->rx_packets++;
7083    
7084     skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
7085     + skb->dev->last_rx = jiffies;
7086     if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
7087     skb->protocol == htons(ETH_P_IP) &&
7088     (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
7089     @@ -1851,7 +1849,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
7090     {
7091     struct adapter *adapter = dev->priv;
7092     struct sge *sge = adapter->sge;
7093     - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
7094     + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
7095     + smp_processor_id());
7096     struct cpl_tx_pkt *cpl;
7097     struct sk_buff *orig_skb = skb;
7098     int ret;
7099     @@ -1859,6 +1858,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
7100     if (skb->protocol == htons(ETH_P_CPL5))
7101     goto send;
7102    
7103     + /*
7104     + * We are using a non-standard hard_header_len.
7105     + * Allocate more header room in the rare cases it is not big enough.
7106     + */
7107     + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
7108     + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
7109     + ++st->tx_need_hdrroom;
7110     + dev_kfree_skb_any(orig_skb);
7111     + if (!skb)
7112     + return NETDEV_TX_OK;
7113     + }
7114     +
7115     if (skb_shinfo(skb)->gso_size) {
7116     int eth_type;
7117     struct cpl_tx_pkt_lso *hdr;
7118     @@ -1892,24 +1903,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
7119     return NETDEV_TX_OK;
7120     }
7121    
7122     - /*
7123     - * We are using a non-standard hard_header_len and some kernel
7124     - * components, such as pktgen, do not handle it right.
7125     - * Complain when this happens but try to fix things up.
7126     - */
7127     - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
7128     - pr_debug("%s: headroom %d header_len %d\n", dev->name,
7129     - skb_headroom(skb), dev->hard_header_len);
7130     -
7131     - if (net_ratelimit())
7132     - printk(KERN_ERR "%s: inadequate headroom in "
7133     - "Tx packet\n", dev->name);
7134     - skb = skb_realloc_headroom(skb, sizeof(*cpl));
7135     - dev_kfree_skb_any(orig_skb);
7136     - if (!skb)
7137     - return NETDEV_TX_OK;
7138     - }
7139     -
7140     if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
7141     skb->ip_summed == CHECKSUM_PARTIAL &&
7142     ip_hdr(skb)->protocol == IPPROTO_UDP) {
7143     @@ -1955,7 +1948,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
7144     cpl->vlan_valid = 0;
7145    
7146     send:
7147     - st->tx_packets++;
7148     dev->trans_start = jiffies;
7149     ret = t1_sge_tx(skb, adapter, 0, dev);
7150    
7151     diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
7152     index d132a0e..80165f9 100644
7153     --- a/drivers/net/chelsio/sge.h
7154     +++ b/drivers/net/chelsio/sge.h
7155     @@ -57,13 +57,12 @@ struct sge_intr_counts {
7156     };
7157    
7158     struct sge_port_stats {
7159     - u64 rx_packets; /* # of Ethernet packets received */
7160     u64 rx_cso_good; /* # of successful RX csum offloads */
7161     - u64 tx_packets; /* # of TX packets */
7162     u64 tx_cso; /* # of TX checksum offloads */
7163     u64 tx_tso; /* # of TSO requests */
7164     u64 vlan_xtract; /* # of VLAN tag extractions */
7165     u64 vlan_insert; /* # of VLAN tag insertions */
7166     + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
7167     };
7168    
7169     struct sk_buff;
7170     diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
7171     index fcbe508..cbcdf14 100644
7172     --- a/drivers/net/forcedeth.c
7173     +++ b/drivers/net/forcedeth.c
7174     @@ -5564,35 +5564,35 @@ static struct pci_device_id pci_tbl[] = {
7175     },
7176     { /* MCP77 Ethernet Controller */
7177     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
7178     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7179     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7180     },
7181     { /* MCP77 Ethernet Controller */
7182     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
7183     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7184     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7185     },
7186     { /* MCP77 Ethernet Controller */
7187     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
7188     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7189     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7190     },
7191     { /* MCP77 Ethernet Controller */
7192     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
7193     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7194     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7195     },
7196     { /* MCP79 Ethernet Controller */
7197     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
7198     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7199     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7200     },
7201     { /* MCP79 Ethernet Controller */
7202     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
7203     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7204     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7205     },
7206     { /* MCP79 Ethernet Controller */
7207     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
7208     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7209     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7210     },
7211     { /* MCP79 Ethernet Controller */
7212     PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
7213     - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
7214     + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
7215     },
7216     {0,},
7217     };
7218     diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
7219     index 2575077..3ed45a3 100644
7220     --- a/drivers/net/sky2.c
7221     +++ b/drivers/net/sky2.c
7222     @@ -812,8 +812,13 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
7223    
7224     sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
7225    
7226     - /* Flush Rx MAC FIFO on any flow control or error */
7227     - sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
7228     + if (hw->chip_id == CHIP_ID_YUKON_XL) {
7229     + /* Hardware errata - clear flush mask */
7230     + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
7231     + } else {
7232     + /* Flush Rx MAC FIFO on any flow control or error */
7233     + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
7234     + }
7235    
7236     /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */
7237     reg = RX_GMF_FL_THR_DEF + 1;
7238     @@ -1307,15 +1312,11 @@ static int sky2_up(struct net_device *dev)
7239     */
7240     if (otherdev && netif_running(otherdev) &&
7241     (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
7242     - struct sky2_port *osky2 = netdev_priv(otherdev);
7243     u16 cmd;
7244    
7245     cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
7246     cmd &= ~PCI_X_CMD_MAX_SPLIT;
7247     sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
7248     -
7249     - sky2->rx_csum = 0;
7250     - osky2->rx_csum = 0;
7251     }
7252    
7253     if (netif_msg_ifup(sky2))
7254     @@ -4017,7 +4018,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
7255     sky2->duplex = -1;
7256     sky2->speed = -1;
7257     sky2->advertising = sky2_supported_modes(hw);
7258     - sky2->rx_csum = 1;
7259     + sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
7260     sky2->wol = wol;
7261    
7262     spin_lock_init(&sky2->phy_lock);
7263     diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
7264     index 524dc5f..9057d71 100644
7265     --- a/drivers/net/usb/kaweth.c
7266     +++ b/drivers/net/usb/kaweth.c
7267     @@ -70,7 +70,7 @@
7268     #define KAWETH_TX_TIMEOUT (5 * HZ)
7269     #define KAWETH_SCRATCH_SIZE 32
7270     #define KAWETH_FIRMWARE_BUF_SIZE 4096
7271     -#define KAWETH_CONTROL_TIMEOUT (30 * HZ)
7272     +#define KAWETH_CONTROL_TIMEOUT (30000)
7273    
7274     #define KAWETH_STATUS_BROKEN 0x0000001
7275     #define KAWETH_STATUS_CLOSING 0x0000002
7276     diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
7277     index 6240b97..3bbc5c4 100644
7278     --- a/drivers/net/usb/mcs7830.c
7279     +++ b/drivers/net/usb/mcs7830.c
7280     @@ -94,7 +94,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
7281    
7282     ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
7283     MCS7830_RD_BMREQ, 0x0000, index, data,
7284     - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
7285     + size, MCS7830_CTRL_TIMEOUT);
7286     return ret;
7287     }
7288    
7289     @@ -105,7 +105,7 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
7290    
7291     ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
7292     MCS7830_WR_BMREQ, 0x0000, index, data,
7293     - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
7294     + size, MCS7830_CTRL_TIMEOUT);
7295     return ret;
7296     }
7297    
7298     diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
7299     index 027f686..02a09d5 100644
7300     --- a/drivers/pci/hotplug/fakephp.c
7301     +++ b/drivers/pci/hotplug/fakephp.c
7302     @@ -39,6 +39,7 @@
7303     #include <linux/init.h>
7304     #include <linux/string.h>
7305     #include <linux/slab.h>
7306     +#include <linux/workqueue.h>
7307     #include "../pci.h"
7308    
7309     #if !defined(MODULE)
7310     @@ -63,10 +64,16 @@ struct dummy_slot {
7311     struct list_head node;
7312     struct hotplug_slot *slot;
7313     struct pci_dev *dev;
7314     + struct work_struct remove_work;
7315     + unsigned long removed;
7316     };
7317    
7318     static int debug;
7319     static LIST_HEAD(slot_list);
7320     +static struct workqueue_struct *dummyphp_wq;
7321     +
7322     +static void pci_rescan_worker(struct work_struct *work);
7323     +static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
7324    
7325     static int enable_slot (struct hotplug_slot *slot);
7326     static int disable_slot (struct hotplug_slot *slot);
7327     @@ -109,7 +116,7 @@ static int add_slot(struct pci_dev *dev)
7328     slot->name = &dev->dev.bus_id[0];
7329     dbg("slot->name = %s\n", slot->name);
7330    
7331     - dslot = kmalloc(sizeof(struct dummy_slot), GFP_KERNEL);
7332     + dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
7333     if (!dslot)
7334     goto error_info;
7335    
7336     @@ -164,6 +171,14 @@ static void remove_slot(struct dummy_slot *dslot)
7337     err("Problem unregistering a slot %s\n", dslot->slot->name);
7338     }
7339    
7340     +/* called from the single-threaded workqueue handler to remove a slot */
7341     +static void remove_slot_worker(struct work_struct *work)
7342     +{
7343     + struct dummy_slot *dslot =
7344     + container_of(work, struct dummy_slot, remove_work);
7345     + remove_slot(dslot);
7346     +}
7347     +
7348     /**
7349     * Rescan slot.
7350     * Tries hard not to re-enable already existing devices
7351     @@ -267,11 +282,17 @@ static inline void pci_rescan(void) {
7352     pci_rescan_buses(&pci_root_buses);
7353     }
7354    
7355     +/* called from the single-threaded workqueue handler to rescan all pci buses */
7356     +static void pci_rescan_worker(struct work_struct *work)
7357     +{
7358     + pci_rescan();
7359     +}
7360    
7361     static int enable_slot(struct hotplug_slot *hotplug_slot)
7362     {
7363     /* mis-use enable_slot for rescanning of the pci bus */
7364     - pci_rescan();
7365     + cancel_work_sync(&pci_rescan_work);
7366     + queue_work(dummyphp_wq, &pci_rescan_work);
7367     return -ENODEV;
7368     }
7369    
7370     @@ -306,6 +327,10 @@ static int disable_slot(struct hotplug_slot *slot)
7371     err("Can't remove PCI devices with other PCI devices behind it yet.\n");
7372     return -ENODEV;
7373     }
7374     + if (test_and_set_bit(0, &dslot->removed)) {
7375     + dbg("Slot already scheduled for removal\n");
7376     + return -ENODEV;
7377     + }
7378     /* search for subfunctions and disable them first */
7379     if (!(dslot->dev->devfn & 7)) {
7380     for (func = 1; func < 8; func++) {
7381     @@ -328,8 +353,9 @@ static int disable_slot(struct hotplug_slot *slot)
7382     /* remove the device from the pci core */
7383     pci_remove_bus_device(dslot->dev);
7384    
7385     - /* blow away this sysfs entry and other parts. */
7386     - remove_slot(dslot);
7387     + /* queue work item to blow away this sysfs entry and other parts. */
7388     + INIT_WORK(&dslot->remove_work, remove_slot_worker);
7389     + queue_work(dummyphp_wq, &dslot->remove_work);
7390    
7391     return 0;
7392     }
7393     @@ -340,6 +366,7 @@ static void cleanup_slots (void)
7394     struct list_head *next;
7395     struct dummy_slot *dslot;
7396    
7397     + destroy_workqueue(dummyphp_wq);
7398     list_for_each_safe (tmp, next, &slot_list) {
7399     dslot = list_entry (tmp, struct dummy_slot, node);
7400     remove_slot(dslot);
7401     @@ -351,6 +378,10 @@ static int __init dummyphp_init(void)
7402     {
7403     info(DRIVER_DESC "\n");
7404    
7405     + dummyphp_wq = create_singlethread_workqueue(MY_NAME);
7406     + if (!dummyphp_wq)
7407     + return -ENOMEM;
7408     +
7409     return pci_scan_buses();
7410     }
7411    
7412     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
7413     index 50f2dd9..75831c8 100644
7414     --- a/drivers/pci/quirks.c
7415     +++ b/drivers/pci/quirks.c
7416     @@ -465,6 +465,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk
7417     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi );
7418     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi );
7419     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi );
7420     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi );
7421     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi );
7422     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi );
7423     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi );
7424     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi );
7425     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi );
7426    
7427     /*
7428     * VIA ACPI: One IO region pointed to by longword at
7429     diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
7430     index 6b357cd..5a827ea 100644
7431     --- a/drivers/spi/omap2_mcspi.c
7432     +++ b/drivers/spi/omap2_mcspi.c
7433     @@ -350,6 +350,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7434     tx = xfer->tx_buf;
7435    
7436     do {
7437     + c -= 1;
7438     if (tx != NULL) {
7439     if (mcspi_wait_for_reg_bit(chstat_reg,
7440     OMAP2_MCSPI_CHSTAT_TXS) < 0) {
7441     @@ -380,7 +381,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7442     word_len, *(rx - 1));
7443     #endif
7444     }
7445     - c -= 1;
7446     } while (c);
7447     } else if (word_len <= 16) {
7448     u16 *rx;
7449     @@ -389,6 +389,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7450     rx = xfer->rx_buf;
7451     tx = xfer->tx_buf;
7452     do {
7453     + c -= 2;
7454     if (tx != NULL) {
7455     if (mcspi_wait_for_reg_bit(chstat_reg,
7456     OMAP2_MCSPI_CHSTAT_TXS) < 0) {
7457     @@ -419,7 +420,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7458     word_len, *(rx - 1));
7459     #endif
7460     }
7461     - c -= 2;
7462     } while (c);
7463     } else if (word_len <= 32) {
7464     u32 *rx;
7465     @@ -428,6 +428,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7466     rx = xfer->rx_buf;
7467     tx = xfer->tx_buf;
7468     do {
7469     + c -= 4;
7470     if (tx != NULL) {
7471     if (mcspi_wait_for_reg_bit(chstat_reg,
7472     OMAP2_MCSPI_CHSTAT_TXS) < 0) {
7473     @@ -458,7 +459,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
7474     word_len, *(rx - 1));
7475     #endif
7476     }
7477     - c -= 4;
7478     } while (c);
7479     }
7480    
7481     diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
7482     index 0bb8de4..7cf21d7 100644
7483     --- a/drivers/usb/serial/sierra.c
7484     +++ b/drivers/usb/serial/sierra.c
7485     @@ -100,6 +100,7 @@ static struct usb_device_id id_table [] = {
7486     { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
7487     { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
7488     { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
7489     + { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
7490     { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
7491     { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
7492     { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
7493     @@ -108,6 +109,7 @@ static struct usb_device_id id_table [] = {
7494     { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
7495     { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
7496     { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
7497     + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */
7498     { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
7499     { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
7500     { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
7501     @@ -136,6 +138,7 @@ static struct usb_device_id id_table_3port [] = {
7502     { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
7503     { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
7504     { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
7505     + { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
7506     { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
7507     { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
7508     { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U*/
7509     @@ -144,6 +147,7 @@ static struct usb_device_id id_table_3port [] = {
7510     { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
7511     { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
7512     { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
7513     + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */
7514     { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
7515     { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
7516     { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
7517     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
7518     index dd41677..48a61da 100644
7519     --- a/fs/cifs/inode.c
7520     +++ b/fs/cifs/inode.c
7521     @@ -919,6 +919,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
7522     goto mkdir_out;
7523     }
7524    
7525     + mode &= ~current->fs->umask;
7526     rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
7527     mode, NULL /* netfid */, pInfo, &oplock,
7528     full_path, cifs_sb->local_nls,
7529     diff --git a/fs/exec.c b/fs/exec.c
7530     index 073b0b8..401b850 100644
7531     --- a/fs/exec.c
7532     +++ b/fs/exec.c
7533     @@ -1786,6 +1786,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
7534     but keep the previous behaviour for now. */
7535     if (!ispipe && !S_ISREG(inode->i_mode))
7536     goto close_fail;
7537     + /*
7538     + * Dont allow local users get cute and trick others to coredump
7539     + * into their pre-created files:
7540     + */
7541     + if (inode->i_uid != current->fsuid)
7542     + goto close_fail;
7543     if (!file->f_op)
7544     goto close_fail;
7545     if (!file->f_op->write)
7546     diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
7547     index a94473d..5d8dcb9 100644
7548     --- a/fs/ncpfs/mmap.c
7549     +++ b/fs/ncpfs/mmap.c
7550     @@ -50,10 +50,6 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
7551     pos = vmf->pgoff << PAGE_SHIFT;
7552    
7553     count = PAGE_SIZE;
7554     - if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) {
7555     - WARN_ON(1); /* shouldn't happen? */
7556     - count = area->vm_end - (unsigned long)vmf->virtual_address;
7557     - }
7558     /* what we can read in one go */
7559     bufsize = NCP_SERVER(inode)->buffer_size;
7560    
7561     diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
7562     index 10f6e7d..2dc0a54 100644
7563     --- a/fs/nfsd/nfs3xdr.c
7564     +++ b/fs/nfsd/nfs3xdr.c
7565     @@ -396,8 +396,11 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
7566     * Round the length of the data which was specified up to
7567     * the next multiple of XDR units and then compare that
7568     * against the length which was actually received.
7569     + * Note that when RPCSEC/GSS (for example) is used, the
7570     + * data buffer can be padded so dlen might be larger
7571     + * than required. It must never be smaller.
7572     */
7573     - if (dlen != XDR_QUADLEN(len)*4)
7574     + if (dlen < XDR_QUADLEN(len)*4)
7575     return 0;
7576    
7577     if (args->count > max_blocksize) {
7578     diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
7579     index cb3e7fa..bd3d5b9 100644
7580     --- a/fs/nfsd/nfsxdr.c
7581     +++ b/fs/nfsd/nfsxdr.c
7582     @@ -313,8 +313,11 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
7583     * Round the length of the data which was specified up to
7584     * the next multiple of XDR units and then compare that
7585     * against the length which was actually received.
7586     + * Note that when RPCSEC/GSS (for example) is used, the
7587     + * data buffer can be padded so dlen might be larger
7588     + * than required. It must never be smaller.
7589     */
7590     - if (dlen != XDR_QUADLEN(len)*4)
7591     + if (dlen < XDR_QUADLEN(len)*4)
7592     return 0;
7593    
7594     rqstp->rq_vec[0].iov_base = (void*)p;
7595     diff --git a/fs/splice.c b/fs/splice.c
7596     index 02c39ae..2aa8f5a 100644
7597     --- a/fs/splice.c
7598     +++ b/fs/splice.c
7599     @@ -1234,6 +1234,9 @@ static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
7600     {
7601     int partial;
7602    
7603     + if (!access_ok(VERIFY_READ, src, n))
7604     + return -EFAULT;
7605     +
7606     pagefault_disable();
7607     partial = __copy_from_user_inatomic(dst, src, n);
7608     pagefault_enable();
7609     @@ -1442,6 +1445,11 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
7610     break;
7611     }
7612    
7613     + if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
7614     + error = -EFAULT;
7615     + break;
7616     + }
7617     +
7618     sd.len = 0;
7619     sd.total_len = len;
7620     sd.flags = flags;
7621     diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild
7622     index c68e168..1a922fa 100644
7623     --- a/include/asm-m68k/Kbuild
7624     +++ b/include/asm-m68k/Kbuild
7625     @@ -1 +1,2 @@
7626     include include/asm-generic/Kbuild.asm
7627     +header-y += cachectl.h
7628     diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
7629     index cc6d872..11d5383 100644
7630     --- a/include/asm-powerpc/systbl.h
7631     +++ b/include/asm-powerpc/systbl.h
7632     @@ -308,8 +308,8 @@ COMPAT_SYS_SPU(move_pages)
7633     SYSCALL_SPU(getcpu)
7634     COMPAT_SYS(epoll_pwait)
7635     COMPAT_SYS_SPU(utimensat)
7636     -COMPAT_SYS(fallocate)
7637     COMPAT_SYS_SPU(signalfd)
7638     COMPAT_SYS_SPU(timerfd)
7639     SYSCALL_SPU(eventfd)
7640     COMPAT_SYS_SPU(sync_file_range2)
7641     +COMPAT_SYS(fallocate)
7642     diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
7643     index 1fc6554..38cbec7 100644
7644     --- a/include/asm-sparc64/dma-mapping.h
7645     +++ b/include/asm-sparc64/dma-mapping.h
7646     @@ -25,15 +25,9 @@ struct dma_ops {
7647     void (*sync_single_for_cpu)(struct device *dev,
7648     dma_addr_t dma_handle, size_t size,
7649     enum dma_data_direction direction);
7650     - void (*sync_single_for_device)(struct device *dev,
7651     - dma_addr_t dma_handle, size_t size,
7652     - enum dma_data_direction direction);
7653     void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
7654     int nelems,
7655     enum dma_data_direction direction);
7656     - void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
7657     - int nelems,
7658     - enum dma_data_direction direction);
7659     };
7660     extern const struct dma_ops *dma_ops;
7661    
7662     @@ -105,7 +99,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
7663     size_t size,
7664     enum dma_data_direction direction)
7665     {
7666     - dma_ops->sync_single_for_device(dev, dma_handle, size, direction);
7667     + /* No flushing needed to sync cpu writes to the device. */
7668     }
7669    
7670     static inline void dma_sync_single_range_for_cpu(struct device *dev,
7671     @@ -123,7 +117,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
7672     size_t size,
7673     enum dma_data_direction direction)
7674     {
7675     - dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
7676     + /* No flushing needed to sync cpu writes to the device. */
7677     }
7678    
7679    
7680     @@ -138,7 +132,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
7681     struct scatterlist *sg, int nelems,
7682     enum dma_data_direction direction)
7683     {
7684     - dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
7685     + /* No flushing needed to sync cpu writes to the device. */
7686     }
7687    
7688     static inline int dma_mapping_error(dma_addr_t dma_addr)
7689     diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
7690     index 524d498..3ad45df 100644
7691     --- a/include/asm-sparc64/hypervisor.h
7692     +++ b/include/asm-sparc64/hypervisor.h
7693     @@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
7694     */
7695     #define HV_FAST_MMU_DEMAP_ALL 0x24
7696    
7697     +#ifndef __ASSEMBLY__
7698     +extern void sun4v_mmu_demap_all(void);
7699     +#endif
7700     +
7701     /* mmu_map_perm_addr()
7702     * TRAP: HV_FAST_TRAP
7703     * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
7704     diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
7705     index 1393e57..f59f257 100644
7706     --- a/include/asm-sparc64/pci.h
7707     +++ b/include/asm-sparc64/pci.h
7708     @@ -200,6 +200,10 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
7709     struct device_node;
7710     extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
7711    
7712     +#define HAVE_ARCH_PCI_RESOURCE_TO_USER
7713     +extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
7714     + const struct resource *rsrc,
7715     + resource_size_t *start, resource_size_t *end);
7716     #endif /* __KERNEL__ */
7717    
7718     #endif /* __SPARC64_PCI_H */
7719     diff --git a/include/linux/acpi.h b/include/linux/acpi.h
7720     index bf5e000..919e0a5 100644
7721     --- a/include/linux/acpi.h
7722     +++ b/include/linux/acpi.h
7723     @@ -40,6 +40,7 @@
7724     #include <acpi/acpi_drivers.h>
7725     #include <acpi/acpi_numa.h>
7726     #include <asm/acpi.h>
7727     +#include <linux/dmi.h>
7728    
7729    
7730     #ifdef CONFIG_ACPI
7731     @@ -187,7 +188,9 @@ extern int ec_transaction(u8 command,
7732     #endif /*CONFIG_ACPI_EC*/
7733    
7734     extern int acpi_blacklisted(void);
7735     -extern void acpi_bios_year(char *s);
7736     +#ifdef CONFIG_DMI
7737     +extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
7738     +#endif
7739    
7740     #define ACPI_CSTATE_LIMIT_DEFINED /* for driver builds */
7741     #ifdef CONFIG_ACPI
7742     @@ -247,5 +250,5 @@ static inline int acpi_boot_table_init(void)
7743     return 0;
7744     }
7745    
7746     -#endif /* CONFIG_ACPI */
7747     +#endif /* !CONFIG_ACPI */
7748     #endif /*_LINUX_ACPI_H*/
7749     diff --git a/include/linux/dmi.h b/include/linux/dmi.h
7750     index b8ac7b0..d8a946f 100644
7751     --- a/include/linux/dmi.h
7752     +++ b/include/linux/dmi.h
7753     @@ -78,6 +78,7 @@ extern struct dmi_device * dmi_find_device(int type, const char *name,
7754     extern void dmi_scan_machine(void);
7755     extern int dmi_get_year(int field);
7756     extern int dmi_name_in_vendors(char *str);
7757     +extern int dmi_available;
7758    
7759     #else
7760    
7761     @@ -87,6 +88,7 @@ static inline struct dmi_device * dmi_find_device(int type, const char *name,
7762     struct dmi_device *from) { return NULL; }
7763     static inline int dmi_get_year(int year) { return 0; }
7764     static inline int dmi_name_in_vendors(char *s) { return 0; }
7765     +#define dmi_available 0
7766    
7767     #endif
7768    
7769     diff --git a/include/linux/freezer.h b/include/linux/freezer.h
7770     index efded00..7fa9500 100644
7771     --- a/include/linux/freezer.h
7772     +++ b/include/linux/freezer.h
7773     @@ -4,6 +4,7 @@
7774     #define FREEZER_H_INCLUDED
7775    
7776     #include <linux/sched.h>
7777     +#include <linux/wait.h>
7778    
7779     #ifdef CONFIG_PM_SLEEP
7780     /*
7781     @@ -126,6 +127,24 @@ static inline void set_freezable(void)
7782     current->flags &= ~PF_NOFREEZE;
7783     }
7784    
7785     +/*
7786     + * Freezer-friendly wrapper around wait_event_interruptible(), originally
7787     + * defined in <linux/wait.h>
7788     + */
7789     +
7790     +#define wait_event_freezable(wq, condition) \
7791     +({ \
7792     + int __retval; \
7793     + do { \
7794     + __retval = wait_event_interruptible(wq, \
7795     + (condition) || freezing(current)); \
7796     + if (__retval && !freezing(current)) \
7797     + break; \
7798     + else if (!(condition)) \
7799     + __retval = -ERESTARTSYS; \
7800     + } while (try_to_freeze()); \
7801     + __retval; \
7802     +})
7803     #else /* !CONFIG_PM_SLEEP */
7804     static inline int frozen(struct task_struct *p) { return 0; }
7805     static inline int freezing(struct task_struct *p) { return 0; }
7806     @@ -143,6 +162,10 @@ static inline void freezer_do_not_count(void) {}
7807     static inline void freezer_count(void) {}
7808     static inline int freezer_should_skip(struct task_struct *p) { return 0; }
7809     static inline void set_freezable(void) {}
7810     +
7811     +#define wait_event_freezable(wq, condition) \
7812     + wait_event_interruptible(wq, condition)
7813     +
7814     #endif /* !CONFIG_PM_SLEEP */
7815    
7816     #endif /* FREEZER_H_INCLUDED */
7817     diff --git a/include/linux/input.h b/include/linux/input.h
7818     index 36e00aa..5ec6b68 100644
7819     --- a/include/linux/input.h
7820     +++ b/include/linux/input.h
7821     @@ -853,7 +853,7 @@ struct ff_rumble_effect {
7822     * defining effect parameters
7823     *
7824     * This structure is sent through ioctl from the application to the driver.
7825     - * To create a new effect aplication should set its @id to -1; the kernel
7826     + * To create a new effect application should set its @id to -1; the kernel
7827     * will return assigned @id which can later be used to update or delete
7828     * this effect.
7829     *
7830     @@ -933,9 +933,82 @@ struct ff_effect {
7831     #define BIT(x) (1UL<<((x)%BITS_PER_LONG))
7832     #define LONG(x) ((x)/BITS_PER_LONG)
7833    
7834     +/**
7835     + * struct input_dev - represents an input device
7836     + * @name: name of the device
7837     + * @phys: physical path to the device in the system hierarchy
7838     + * @uniq: unique identification code for the device (if device has it)
7839     + * @id: id of the device (struct input_id)
7840     + * @evbit: bitmap of types of events supported by the device (EV_KEY,
7841     + * EV_REL, etc.)
7842     + * @keybit: bitmap of keys/buttons this device has
7843     + * @relbit: bitmap of relative axes for the device
7844     + * @absbit: bitmap of absolute axes for the device
7845     + * @mscbit: bitmap of miscellaneous events supported by the device
7846     + * @ledbit: bitmap of leds present on the device
7847     + * @sndbit: bitmap of sound effects supported by the device
7848     + * @ffbit: bitmap of force feedback effects supported by the device
7849     + * @swbit: bitmap of switches present on the device
7850     + * @keycodemax: size of keycode table
7851     + * @keycodesize: size of elements in keycode table
7852     + * @keycode: map of scancodes to keycodes for this device
7853     + * @setkeycode: optional method to alter current keymap, used to implement
7854     + * sparse keymaps. If not supplied default mechanism will be used
7855     + * @getkeycode: optional method to retrieve current keymap. If not supplied
7856     + * default mechanism will be used
7857     + * @ff: force feedback structure associated with the device if device
7858     + * supports force feedback effects
7859     + * @repeat_key: stores key code of the last key pressed; used to implement
7860     + * software autorepeat
7861     + * @timer: timer for software autorepeat
7862     + * @sync: set to 1 when there were no new events since last EV_SYNC
7863     + * @abs: current values for reports from absolute axes
7864     + * @rep: current values for autorepeat parameters (delay, rate)
7865     + * @key: reflects current state of device's keys/buttons
7866     + * @led: reflects current state of device's LEDs
7867     + * @snd: reflects current state of sound effects
7868     + * @sw: reflects current state of device's switches
7869     + * @absmax: maximum values for events coming from absolute axes
7870     + * @absmin: minimum values for events coming from absolute axes
7871     + * @absfuzz: describes noisiness for axes
7872     + * @absflat: size of the center flat position (used by joydev)
7873     + * @open: this method is called when the very first user calls
7874     + * input_open_device(). The driver must prepare the device
7875     + * to start generating events (start polling thread,
7876     + * request an IRQ, submit URB, etc.)
7877     + * @close: this method is called when the very last user calls
7878     + * input_close_device().
7879     + * @flush: purges the device. Most commonly used to get rid of force
7880     + * feedback effects loaded into the device when disconnecting
7881     + * from it
7882     + * @event: event handler for events sent _to_ the device, like EV_LED
7883     + * or EV_SND. The device is expected to carry out the requested
7884     + * action (turn on a LED, play sound, etc.) The call is protected
7885     + * by @event_lock and must not sleep
7886     + * @grab: input handle that currently has the device grabbed (via
7887     + * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole
7888     + * recipient for all input events coming from the device
7889     + * @event_lock: this spinlock is is taken when input core receives
7890     + * and processes a new event for the device (in input_event()).
7891     + * Code that accesses and/or modifies parameters of a device
7892     + * (such as keymap or absmin, absmax, absfuzz, etc.) after device
7893     + * has been registered with input core must take this lock.
7894     + * @mutex: serializes calls to open(), close() and flush() methods
7895     + * @users: stores number of users (input handlers) that opened this
7896     + * device. It is used by input_open_device() and input_close_device()
7897     + * to make sure that dev->open() is only called when the first
7898     + * user opens device and dev->close() is called when the very
7899     + * last user closes the device
7900     + * @going_away: marks devices that are in a middle of unregistering and
7901     + * causes input_open_device*() fail with -ENODEV.
7902     + * @dev: driver model's view of this device
7903     + * @h_list: list of input handles associated with the device. When
7904     + * accessing the list dev->mutex must be held
7905     + * @node: used to place the device onto input_dev_list
7906     + */
7907     struct input_dev {
7908    
7909     - void *private;
7910     + void *private; /* do not use */
7911    
7912     const char *name;
7913     const char *phys;
7914     @@ -963,8 +1036,6 @@ struct input_dev {
7915     unsigned int repeat_key;
7916     struct timer_list timer;
7917    
7918     - int state;
7919     -
7920     int sync;
7921    
7922     int abs[ABS_MAX + 1];
7923     @@ -987,8 +1058,11 @@ struct input_dev {
7924    
7925     struct input_handle *grab;
7926    
7927     - struct mutex mutex; /* serializes open and close operations */
7928     + spinlock_t event_lock;
7929     + struct mutex mutex;
7930     +
7931     unsigned int users;
7932     + int going_away;
7933    
7934     struct device dev;
7935     union { /* temporarily so while we switching to struct device */
7936     @@ -1054,7 +1128,9 @@ struct input_handle;
7937     /**
7938     * struct input_handler - implements one of interfaces for input devices
7939     * @private: driver-specific data
7940     - * @event: event handler
7941     + * @event: event handler. This method is being called by input core with
7942     + * interrupts disabled and dev->event_lock spinlock held and so
7943     + * it may not sleep
7944     * @connect: called when attaching a handler to an input device
7945     * @disconnect: disconnects a handler from input device
7946     * @start: starts handler for given handle. This function is called by
7947     @@ -1066,10 +1142,18 @@ struct input_handle;
7948     * @name: name of the handler, to be shown in /proc/bus/input/handlers
7949     * @id_table: pointer to a table of input_device_ids this driver can
7950     * handle
7951     - * @blacklist: prointer to a table of input_device_ids this driver should
7952     + * @blacklist: pointer to a table of input_device_ids this driver should
7953     * ignore even if they match @id_table
7954     * @h_list: list of input handles associated with the handler
7955     * @node: for placing the driver onto input_handler_list
7956     + *
7957     + * Input handlers attach to input devices and create input handles. There
7958     + * are likely several handlers attached to any given input device at the
7959     + * same time. All of them will get their copy of input event generated by
7960     + * the device.
7961     + *
7962     + * Note that input core serializes calls to connect() and disconnect()
7963     + * methods.
7964     */
7965     struct input_handler {
7966    
7967     @@ -1091,6 +1175,18 @@ struct input_handler {
7968     struct list_head node;
7969     };
7970    
7971     +/**
7972     + * struct input_handle - links input device with an input handler
7973     + * @private: handler-specific data
7974     + * @open: counter showing whether the handle is 'open', i.e. should deliver
7975     + * events from its device
7976     + * @name: name given to the handle by handler that created it
7977     + * @dev: input device the handle is attached to
7978     + * @handler: handler that works with the device through this handle
7979     + * @d_node: used to put the handle on device's list of attached handles
7980     + * @h_node: used to put the handle on handler's list of handles from which
7981     + * it gets events
7982     + */
7983     struct input_handle {
7984    
7985     void *private;
7986     @@ -1213,7 +1309,7 @@ extern struct class input_class;
7987     * @max_effects: maximum number of effects supported by device
7988     * @effects: pointer to an array of effects currently loaded into device
7989     * @effect_owners: array of effect owners; when file handle owning
7990     - * an effect gets closed the effcet is automatically erased
7991     + * an effect gets closed the effect is automatically erased
7992     *
7993     * Every force-feedback device must implement upload() and playback()
7994     * methods; erase() is optional. set_gain() and set_autocenter() need
7995     diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
7996     index 97de8aa..0349e82 100644
7997     --- a/include/linux/pci_ids.h
7998     +++ b/include/linux/pci_ids.h
7999     @@ -2287,6 +2287,8 @@
8000     #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
8001     #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
8002     #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
8003     +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
8004     +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
8005     #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
8006     #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
8007     #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
8008     diff --git a/include/linux/pm.h b/include/linux/pm.h
8009     index 48b71ba..71e589b 100644
8010     --- a/include/linux/pm.h
8011     +++ b/include/linux/pm.h
8012     @@ -344,6 +344,15 @@ static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
8013     device_set_wakeup_enable(dev,val); \
8014     } while(0)
8015    
8016     +/*
8017     + * Global Power Management flags
8018     + * Used to keep APM and ACPI from both being active
8019     + */
8020     +extern unsigned int pm_flags;
8021     +
8022     +#define PM_APM 1
8023     +#define PM_ACPI 2
8024     +
8025     #endif /* __KERNEL__ */
8026    
8027     #endif /* _LINUX_PM_H */
8028     diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
8029     index 514729a..446f4f4 100644
8030     --- a/include/linux/pm_legacy.h
8031     +++ b/include/linux/pm_legacy.h
8032     @@ -4,10 +4,6 @@
8033    
8034     #ifdef CONFIG_PM_LEGACY
8035    
8036     -extern int pm_active;
8037     -
8038     -#define PM_IS_ACTIVE() (pm_active != 0)
8039     -
8040     /*
8041     * Register a device with power management
8042     */
8043     @@ -21,8 +17,6 @@ int __deprecated pm_send_all(pm_request_t rqst, void *data);
8044    
8045     #else /* CONFIG_PM_LEGACY */
8046    
8047     -#define PM_IS_ACTIVE() 0
8048     -
8049     static inline struct pm_dev *pm_register(pm_dev_t type,
8050     unsigned long id,
8051     pm_callback callback)
8052     diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
8053     index 9371c61..39b6671 100644
8054     --- a/include/linux/quicklist.h
8055     +++ b/include/linux/quicklist.h
8056     @@ -56,14 +56,6 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
8057     struct page *page)
8058     {
8059     struct quicklist *q;
8060     - int nid = page_to_nid(page);
8061     -
8062     - if (unlikely(nid != numa_node_id())) {
8063     - if (dtor)
8064     - dtor(p);
8065     - __free_page(page);
8066     - return;
8067     - }
8068    
8069     q = &get_cpu_var(quicklist)[nr];
8070     *(void **)p = q->page;
8071     diff --git a/kernel/kmod.c b/kernel/kmod.c
8072     index c6a4f8a..bb7df2a 100644
8073     --- a/kernel/kmod.c
8074     +++ b/kernel/kmod.c
8075     @@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
8076     enum umh_wait wait)
8077     {
8078     DECLARE_COMPLETION_ONSTACK(done);
8079     - int retval;
8080     + int retval = 0;
8081    
8082     helper_lock();
8083     - if (sub_info->path[0] == '\0') {
8084     - retval = 0;
8085     + if (sub_info->path[0] == '\0')
8086     goto out;
8087     - }
8088    
8089     if (!khelper_wq || usermodehelper_disabled) {
8090     retval = -EBUSY;
8091     @@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
8092     sub_info->wait = wait;
8093    
8094     queue_work(khelper_wq, &sub_info->work);
8095     - if (wait == UMH_NO_WAIT) /* task has freed sub_info */
8096     - return 0;
8097     + if (wait == UMH_NO_WAIT) /* task has freed sub_info */
8098     + goto unlock;
8099     wait_for_completion(&done);
8100     retval = sub_info->retval;
8101    
8102     - out:
8103     +out:
8104     call_usermodehelper_freeinfo(sub_info);
8105     +unlock:
8106     helper_unlock();
8107     return retval;
8108     }
8109     diff --git a/kernel/power/main.c b/kernel/power/main.c
8110     index 350b485..0e44534 100644
8111     --- a/kernel/power/main.c
8112     +++ b/kernel/power/main.c
8113     @@ -27,6 +27,9 @@ BLOCKING_NOTIFIER_HEAD(pm_chain_head);
8114    
8115     DEFINE_MUTEX(pm_mutex);
8116    
8117     +unsigned int pm_flags;
8118     +EXPORT_SYMBOL(pm_flags);
8119     +
8120     #ifdef CONFIG_SUSPEND
8121    
8122     /* This is just an arbitrary number */
8123     diff --git a/kernel/power/pm.c b/kernel/power/pm.c
8124     index c50d152..60c73fa 100644
8125     --- a/kernel/power/pm.c
8126     +++ b/kernel/power/pm.c
8127     @@ -27,8 +27,6 @@
8128     #include <linux/interrupt.h>
8129     #include <linux/mutex.h>
8130    
8131     -int pm_active;
8132     -
8133     /*
8134     * Locking notes:
8135     * pm_devs_lock can be a semaphore providing pm ops are not called
8136     @@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data)
8137    
8138     EXPORT_SYMBOL(pm_register);
8139     EXPORT_SYMBOL(pm_send_all);
8140     -EXPORT_SYMBOL(pm_active);
8141     -
8142    
8143     diff --git a/kernel/relay.c b/kernel/relay.c
8144     index ad85501..91bbfb7 100644
8145     --- a/kernel/relay.c
8146     +++ b/kernel/relay.c
8147     @@ -92,6 +92,7 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
8148     return -EINVAL;
8149    
8150     vma->vm_ops = &relay_file_mmap_ops;
8151     + vma->vm_flags |= VM_DONTEXPAND;
8152     vma->vm_private_data = buf;
8153     buf->chan->cb->buf_mapped(buf, filp);
8154    
8155     diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
8156     index 0962e05..1984669 100644
8157     --- a/kernel/time/tick-broadcast.c
8158     +++ b/kernel/time/tick-broadcast.c
8159     @@ -387,45 +387,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
8160     }
8161    
8162     /*
8163     - * Reprogram the broadcast device:
8164     - *
8165     - * Called with tick_broadcast_lock held and interrupts disabled.
8166     - */
8167     -static int tick_broadcast_reprogram(void)
8168     -{
8169     - ktime_t expires = { .tv64 = KTIME_MAX };
8170     - struct tick_device *td;
8171     - int cpu;
8172     -
8173     - /*
8174     - * Find the event which expires next:
8175     - */
8176     - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
8177     - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
8178     - td = &per_cpu(tick_cpu_device, cpu);
8179     - if (td->evtdev->next_event.tv64 < expires.tv64)
8180     - expires = td->evtdev->next_event;
8181     - }
8182     -
8183     - if (expires.tv64 == KTIME_MAX)
8184     - return 0;
8185     -
8186     - return tick_broadcast_set_event(expires, 0);
8187     -}
8188     -
8189     -/*
8190     * Handle oneshot mode broadcasting
8191     */
8192     static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
8193     {
8194     struct tick_device *td;
8195     cpumask_t mask;
8196     - ktime_t now;
8197     + ktime_t now, next_event;
8198     int cpu;
8199    
8200     spin_lock(&tick_broadcast_lock);
8201     again:
8202     dev->next_event.tv64 = KTIME_MAX;
8203     + next_event.tv64 = KTIME_MAX;
8204     mask = CPU_MASK_NONE;
8205     now = ktime_get();
8206     /* Find all expired events */
8207     @@ -434,19 +408,31 @@ again:
8208     td = &per_cpu(tick_cpu_device, cpu);
8209     if (td->evtdev->next_event.tv64 <= now.tv64)
8210     cpu_set(cpu, mask);
8211     + else if (td->evtdev->next_event.tv64 < next_event.tv64)
8212     + next_event.tv64 = td->evtdev->next_event.tv64;
8213     }
8214    
8215     /*
8216     - * Wakeup the cpus which have an expired event. The broadcast
8217     - * device is reprogrammed in the return from idle code.
8218     + * Wakeup the cpus which have an expired event.
8219     + */
8220     + tick_do_broadcast(mask);
8221     +
8222     + /*
8223     + * Two reasons for reprogram:
8224     + *
8225     + * - The global event did not expire any CPU local
8226     + * events. This happens in dyntick mode, as the maximum PIT
8227     + * delta is quite small.
8228     + *
8229     + * - There are pending events on sleeping CPUs which were not
8230     + * in the event mask
8231     */
8232     - if (!tick_do_broadcast(mask)) {
8233     + if (next_event.tv64 != KTIME_MAX) {
8234     /*
8235     - * The global event did not expire any CPU local
8236     - * events. This happens in dyntick mode, as the
8237     - * maximum PIT delta is quite small.
8238     + * Rearm the broadcast device. If event expired,
8239     + * repeat the above
8240     */
8241     - if (tick_broadcast_reprogram())
8242     + if (tick_broadcast_set_event(next_event, 0))
8243     goto again;
8244     }
8245     spin_unlock(&tick_broadcast_lock);
8246     diff --git a/mm/mmap.c b/mm/mmap.c
8247     index 0d40e66..f6058f6 100644
8248     --- a/mm/mmap.c
8249     +++ b/mm/mmap.c
8250     @@ -1619,6 +1619,12 @@ static inline int expand_downwards(struct vm_area_struct *vma,
8251     */
8252     if (unlikely(anon_vma_prepare(vma)))
8253     return -ENOMEM;
8254     +
8255     + address &= PAGE_MASK;
8256     + error = security_file_mmap(0, 0, 0, 0, address, 1);
8257     + if (error)
8258     + return error;
8259     +
8260     anon_vma_lock(vma);
8261    
8262     /*
8263     @@ -1626,8 +1632,6 @@ static inline int expand_downwards(struct vm_area_struct *vma,
8264     * is required to hold the mmap_sem in read mode. We need the
8265     * anon_vma lock to serialize against concurrent expand_stacks.
8266     */
8267     - address &= PAGE_MASK;
8268     - error = 0;
8269    
8270     /* Somebody else might have raced and expanded it already */
8271     if (address < vma->vm_start) {
8272     @@ -1938,6 +1942,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
8273     if (is_hugepage_only_range(mm, addr, len))
8274     return -EINVAL;
8275    
8276     + error = security_file_mmap(0, 0, 0, 0, addr, 1);
8277     + if (error)
8278     + return error;
8279     +
8280     flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
8281    
8282     error = arch_mmap_check(addr, len, flags);
8283     @@ -2209,7 +2217,7 @@ int install_special_mapping(struct mm_struct *mm,
8284     vma->vm_start = addr;
8285     vma->vm_end = addr + len;
8286    
8287     - vma->vm_flags = vm_flags | mm->def_flags;
8288     + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
8289     vma->vm_page_prot = protection_map[vma->vm_flags & 7];
8290    
8291     vma->vm_ops = &special_mapping_vmops;
8292     diff --git a/mm/quicklist.c b/mm/quicklist.c
8293     index ae8189c..3f703f7 100644
8294     --- a/mm/quicklist.c
8295     +++ b/mm/quicklist.c
8296     @@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
8297     static unsigned long max_pages(unsigned long min_pages)
8298     {
8299     unsigned long node_free_pages, max;
8300     + struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
8301     +
8302     + node_free_pages =
8303     +#ifdef CONFIG_ZONE_DMA
8304     + zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
8305     +#endif
8306     +#ifdef CONFIG_ZONE_DMA32
8307     + zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
8308     +#endif
8309     + zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
8310    
8311     - node_free_pages = node_page_state(numa_node_id(),
8312     - NR_FREE_PAGES);
8313     max = node_free_pages / FRACTION_OF_NODE_MEM;
8314     return max(max, min_pages);
8315     }
8316     diff --git a/mm/truncate.c b/mm/truncate.c
8317     index 5cdfbc1..39da569 100644
8318     --- a/mm/truncate.c
8319     +++ b/mm/truncate.c
8320     @@ -95,11 +95,11 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
8321     if (page->mapping != mapping)
8322     return;
8323    
8324     - cancel_dirty_page(page, PAGE_CACHE_SIZE);
8325     -
8326     if (PagePrivate(page))
8327     do_invalidatepage(page, 0);
8328    
8329     + cancel_dirty_page(page, PAGE_CACHE_SIZE);
8330     +
8331     remove_from_page_cache(page);
8332     ClearPageUptodate(page);
8333     ClearPageMappedToDisk(page);
8334     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
8335     index ef3f789..21af441 100644
8336     --- a/net/8021q/vlan.c
8337     +++ b/net/8021q/vlan.c
8338     @@ -768,7 +768,7 @@ static int vlan_ioctl_handler(void __user *arg)
8339     case SET_VLAN_NAME_TYPE_CMD:
8340     err = -EPERM;
8341     if (!capable(CAP_NET_ADMIN))
8342     - return -EPERM;
8343     + break;
8344     if ((args.u.name_type >= 0) &&
8345     (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
8346     vlan_name_type = args.u.name_type;
8347     diff --git a/net/atm/mpc.c b/net/atm/mpc.c
8348     index 7c85aa5..181c1c8 100644
8349     --- a/net/atm/mpc.c
8350     +++ b/net/atm/mpc.c
8351     @@ -542,6 +542,13 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
8352     if (eth->h_proto != htons(ETH_P_IP))
8353     goto non_ip; /* Multi-Protocol Over ATM :-) */
8354    
8355     + /* Weed out funny packets (e.g., AF_PACKET or raw). */
8356     + if (skb->len < ETH_HLEN + sizeof(struct iphdr))
8357     + goto non_ip;
8358     + skb_set_network_header(skb, ETH_HLEN);
8359     + if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
8360     + goto non_ip;
8361     +
8362     while (i < mpc->number_of_mps_macs) {
8363     if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
8364     if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
8365     diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
8366     index 0ddaff0..8a9f0ac 100644
8367     --- a/net/ax25/ax25_in.c
8368     +++ b/net/ax25/ax25_in.c
8369     @@ -124,7 +124,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
8370     }
8371    
8372     skb_pull(skb, 1); /* Remove PID */
8373     - skb_reset_mac_header(skb);
8374     + skb->mac_header = skb->network_header;
8375     skb_reset_network_header(skb);
8376     skb->dev = ax25->ax25_dev->dev;
8377     skb->pkt_type = PACKET_HOST;
8378     diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
8379     index fc13130..22545bd 100644
8380     --- a/net/bridge/br_netfilter.c
8381     +++ b/net/bridge/br_netfilter.c
8382     @@ -142,6 +142,23 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
8383     return skb->nf_bridge;
8384     }
8385    
8386     +static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
8387     +{
8388     + struct nf_bridge_info *nf_bridge = skb->nf_bridge;
8389     +
8390     + if (atomic_read(&nf_bridge->use) > 1) {
8391     + struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
8392     +
8393     + if (tmp) {
8394     + memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
8395     + atomic_set(&tmp->use, 1);
8396     + nf_bridge_put(nf_bridge);
8397     + }
8398     + nf_bridge = tmp;
8399     + }
8400     + return nf_bridge;
8401     +}
8402     +
8403     static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
8404     {
8405     unsigned int len = nf_bridge_encap_header_len(skb);
8406     @@ -247,8 +264,9 @@ static void __br_dnat_complain(void)
8407     * Let us first consider the case that ip_route_input() succeeds:
8408     *
8409     * If skb->dst->dev equals the logical bridge device the packet
8410     - * came in on, we can consider this bridging. We then call
8411     - * skb->dst->output() which will make the packet enter br_nf_local_out()
8412     + * came in on, we can consider this bridging. The packet is passed
8413     + * through the neighbour output function to build a new destination
8414     + * MAC address, which will make the packet enter br_nf_local_out()
8415     * not much later. In that function it is assured that the iptables
8416     * FORWARD chain is traversed for the packet.
8417     *
8418     @@ -285,12 +303,17 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
8419     skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
8420    
8421     skb->dev = bridge_parent(skb->dev);
8422     - if (!skb->dev)
8423     - kfree_skb(skb);
8424     - else {
8425     + if (skb->dev) {
8426     + struct dst_entry *dst = skb->dst;
8427     +
8428     nf_bridge_pull_encap_header(skb);
8429     - skb->dst->output(skb);
8430     +
8431     + if (dst->hh)
8432     + return neigh_hh_output(dst->hh, skb);
8433     + else if (dst->neighbour)
8434     + return dst->neighbour->output(skb);
8435     }
8436     + kfree_skb(skb);
8437     return 0;
8438     }
8439    
8440     @@ -638,6 +661,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
8441     if (!skb->nf_bridge)
8442     return NF_ACCEPT;
8443    
8444     + /* Need exclusive nf_bridge_info since we might have multiple
8445     + * different physoutdevs. */
8446     + if (!nf_bridge_unshare(skb))
8447     + return NF_DROP;
8448     +
8449     parent = bridge_parent(out);
8450     if (!parent)
8451     return NF_DROP;
8452     @@ -721,6 +749,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
8453     if (!skb->nf_bridge)
8454     return NF_ACCEPT;
8455    
8456     + /* Need exclusive nf_bridge_info since we might have multiple
8457     + * different physoutdevs. */
8458     + if (!nf_bridge_unshare(skb))
8459     + return NF_DROP;
8460     +
8461     nf_bridge = skb->nf_bridge;
8462     if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT))
8463     return NF_ACCEPT;
8464     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
8465     index 5dbe580..5ccc2d1 100644
8466     --- a/net/ipv4/devinet.c
8467     +++ b/net/ipv4/devinet.c
8468     @@ -1030,7 +1030,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
8469     memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
8470     if (named++ == 0)
8471     continue;
8472     - dot = strchr(ifa->ifa_label, ':');
8473     + dot = strchr(old, ':');
8474     if (dot == NULL) {
8475     sprintf(old, ":%d", named);
8476     dot = old;
8477     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
8478     index 5c14ed6..4b09b25 100644
8479     --- a/net/ipv4/ip_gre.c
8480     +++ b/net/ipv4/ip_gre.c
8481     @@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb)
8482     offset += 4;
8483     }
8484    
8485     - skb_reset_mac_header(skb);
8486     + skb->mac_header = skb->network_header;
8487     __pskb_pull(skb, offset);
8488     skb_reset_network_header(skb);
8489     skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
8490     diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
8491     index c6d7152..b45a610 100644
8492     --- a/net/ipv4/raw.c
8493     +++ b/net/ipv4/raw.c
8494     @@ -270,6 +270,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
8495     int hh_len;
8496     struct iphdr *iph;
8497     struct sk_buff *skb;
8498     + unsigned int iphlen;
8499     int err;
8500    
8501     if (length > rt->u.dst.dev->mtu) {
8502     @@ -303,7 +304,8 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
8503     goto error_fault;
8504    
8505     /* We don't modify invalid header */
8506     - if (length >= sizeof(*iph) && iph->ihl * 4U <= length) {
8507     + iphlen = iph->ihl * 4;
8508     + if (iphlen >= sizeof(*iph) && iphlen <= length) {
8509     if (!iph->saddr)
8510     iph->saddr = rt->rt_src;
8511     iph->check = 0;
8512     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
8513     index 198b732..efc4a3d 100644
8514     --- a/net/ipv4/route.c
8515     +++ b/net/ipv4/route.c
8516     @@ -2648,11 +2648,10 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
8517     int idx, s_idx;
8518    
8519     s_h = cb->args[0];
8520     + if (s_h < 0)
8521     + s_h = 0;
8522     s_idx = idx = cb->args[1];
8523     - for (h = 0; h <= rt_hash_mask; h++) {
8524     - if (h < s_h) continue;
8525     - if (h > s_h)
8526     - s_idx = 0;
8527     + for (h = s_h; h <= rt_hash_mask; h++) {
8528     rcu_read_lock_bh();
8529     for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
8530     rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
8531     @@ -2669,6 +2668,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
8532     dst_release(xchg(&skb->dst, NULL));
8533     }
8534     rcu_read_unlock_bh();
8535     + s_idx = 0;
8536     }
8537    
8538     done:
8539     diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
8540     index 4c670cf..82fdca2 100644
8541     --- a/net/irda/af_irda.c
8542     +++ b/net/irda/af_irda.c
8543     @@ -1115,8 +1115,6 @@ static int irda_create(struct socket *sock, int protocol)
8544     self->max_sdu_size_rx = TTP_SAR_UNBOUND;
8545     break;
8546     default:
8547     - IRDA_ERROR("%s: protocol not supported!\n",
8548     - __FUNCTION__);
8549     return -ESOCKTNOSUPPORT;
8550     }
8551     break;
8552     diff --git a/net/key/af_key.c b/net/key/af_key.c
8553     index 7a5e993..9c75b9e 100644
8554     --- a/net/key/af_key.c
8555     +++ b/net/key/af_key.c
8556     @@ -2780,12 +2780,22 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
8557    
8558     static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
8559     {
8560     - return t->aalgos & (1 << d->desc.sadb_alg_id);
8561     + unsigned int id = d->desc.sadb_alg_id;
8562     +
8563     + if (id >= sizeof(t->aalgos) * 8)
8564     + return 0;
8565     +
8566     + return (t->aalgos >> id) & 1;
8567     }
8568    
8569     static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
8570     {
8571     - return t->ealgos & (1 << d->desc.sadb_alg_id);
8572     + unsigned int id = d->desc.sadb_alg_id;
8573     +
8574     + if (id >= sizeof(t->ealgos) * 8)
8575     + return 0;
8576     +
8577     + return (t->ealgos >> id) & 1;
8578     }
8579    
8580     static int count_ah_combs(struct xfrm_tmpl *t)
8581     diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
8582     index c7b5d93..69e77d5 100644
8583     --- a/net/netrom/nr_dev.c
8584     +++ b/net/netrom/nr_dev.c
8585     @@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
8586    
8587     /* Spoof incoming device */
8588     skb->dev = dev;
8589     - skb_reset_mac_header(skb);
8590     + skb->mac_header = skb->network_header;
8591     skb_reset_network_header(skb);
8592     skb->pkt_type = PACKET_HOST;
8593    
8594     diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
8595     index 8738ec7..3447803 100644
8596     --- a/net/x25/x25_forward.c
8597     +++ b/net/x25/x25_forward.c
8598     @@ -118,13 +118,14 @@ int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
8599     goto out;
8600    
8601     if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
8602     - goto out;
8603     + goto output;
8604    
8605     }
8606     x25_transmit_link(skbn, nb);
8607    
8608     - x25_neigh_put(nb);
8609     rc = 1;
8610     +output:
8611     + x25_neigh_put(nb);
8612     out:
8613     return rc;
8614     }
8615     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
8616     index 7012891..75629f4 100644
8617     --- a/net/xfrm/xfrm_policy.c
8618     +++ b/net/xfrm/xfrm_policy.c
8619     @@ -1479,8 +1479,9 @@ restart:
8620    
8621     if (sk && sk->sk_policy[1]) {
8622     policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
8623     + err = PTR_ERR(policy);
8624     if (IS_ERR(policy))
8625     - return PTR_ERR(policy);
8626     + goto dropdst;
8627     }
8628    
8629     if (!policy) {
8630     @@ -1491,8 +1492,9 @@ restart:
8631    
8632     policy = flow_cache_lookup(fl, dst_orig->ops->family,
8633     dir, xfrm_policy_lookup);
8634     + err = PTR_ERR(policy);
8635     if (IS_ERR(policy))
8636     - return PTR_ERR(policy);
8637     + goto dropdst;
8638     }
8639    
8640     if (!policy)
8641     @@ -1661,8 +1663,9 @@ restart:
8642     return 0;
8643    
8644     error:
8645     - dst_release(dst_orig);
8646     xfrm_pols_put(pols, npols);
8647     +dropdst:
8648     + dst_release(dst_orig);
8649     *dst_p = NULL;
8650     return err;
8651     }
8652     diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
8653     index 5d3c037..f95aa09 100644
8654     --- a/sound/oss/via82cxxx_audio.c
8655     +++ b/sound/oss/via82cxxx_audio.c
8656     @@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
8657     {
8658     struct via_info *card = vma->vm_private_data;
8659     struct via_channel *chan = &card->ch_out;
8660     + unsigned long max_bufs;
8661     struct page *dmapage;
8662     unsigned long pgoff;
8663     int rd, wr;
8664     @@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
8665     rd = card->ch_in.is_mapped;
8666     wr = card->ch_out.is_mapped;
8667    
8668     -#ifndef VIA_NDEBUG
8669     - {
8670     - unsigned long max_bufs = chan->frag_number;
8671     - if (rd && wr) max_bufs *= 2;
8672     - /* via_dsp_mmap() should ensure this */
8673     - assert (pgoff < max_bufs);
8674     - }
8675     -#endif
8676     + max_bufs = chan->frag_number;
8677     + if (rd && wr)
8678     + max_bufs *= 2;
8679     + if (pgoff >= max_bufs)
8680     + return NOPAGE_SIGBUS;
8681    
8682     /* if full-duplex (read+write) and we have two sets of bufs,
8683     * then the playback buffers come first, sez soundcard.c */
8684     diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
8685     index b76b3dd..e617d7e 100644
8686     --- a/sound/usb/usx2y/usX2Yhwdep.c
8687     +++ b/sound/usb/usx2y/usX2Yhwdep.c
8688     @@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
8689     us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
8690     }
8691     area->vm_ops = &us428ctls_vm_ops;
8692     - area->vm_flags |= VM_RESERVED;
8693     + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
8694     area->vm_private_data = hw->private_data;
8695     return 0;
8696     }
8697     diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
8698     index a5e7bcd..6e70520 100644
8699     --- a/sound/usb/usx2y/usx2yhwdeppcm.c
8700     +++ b/sound/usb/usx2y/usx2yhwdeppcm.c
8701     @@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st
8702     return -ENODEV;
8703     }
8704     area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
8705     - area->vm_flags |= VM_RESERVED;
8706     + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
8707     area->vm_private_data = hw->private_data;
8708     return 0;
8709     }