Contents of /trunk/kernel26-alx/patches-2.6.23-r1/0114-2.6.23.15-all-fixes.patch
Parent Directory | Revision Log
Revision 659 -
(show annotations)
(download)
Mon Jun 23 21:49:40 2008 UTC (16 years, 4 months ago) by niro
File size: 262663 byte(s)
Mon Jun 23 21:49:40 2008 UTC (16 years, 4 months ago) by niro
File size: 262663 byte(s)
-fixed patch, cvs headers get interprated
1 | diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c |
2 | index f02a8ac..14dc111 100644 |
3 | --- a/arch/i386/kernel/apm.c |
4 | +++ b/arch/i386/kernel/apm.c |
5 | @@ -2256,14 +2256,12 @@ static int __init apm_init(void) |
6 | apm_info.disabled = 1; |
7 | return -ENODEV; |
8 | } |
9 | - if (PM_IS_ACTIVE()) { |
10 | + if (pm_flags & PM_ACPI) { |
11 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); |
12 | apm_info.disabled = 1; |
13 | return -ENODEV; |
14 | } |
15 | -#ifdef CONFIG_PM_LEGACY |
16 | - pm_active = 1; |
17 | -#endif |
18 | + pm_flags |= PM_APM; |
19 | |
20 | /* |
21 | * Set up a segment that references the real mode segment 0x40 |
22 | @@ -2366,9 +2364,7 @@ static void __exit apm_exit(void) |
23 | kthread_stop(kapmd_task); |
24 | kapmd_task = NULL; |
25 | } |
26 | -#ifdef CONFIG_PM_LEGACY |
27 | - pm_active = 0; |
28 | -#endif |
29 | + pm_flags &= ~PM_APM; |
30 | } |
31 | |
32 | module_init(apm_init); |
33 | diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c |
34 | index fe6aa5a..ecf401b 100644 |
35 | --- a/arch/ia64/kernel/unaligned.c |
36 | +++ b/arch/ia64/kernel/unaligned.c |
37 | @@ -1487,16 +1487,19 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) |
38 | case LDFA_OP: |
39 | case LDFCCLR_OP: |
40 | case LDFCNC_OP: |
41 | - case LDF_IMM_OP: |
42 | - case LDFA_IMM_OP: |
43 | - case LDFCCLR_IMM_OP: |
44 | - case LDFCNC_IMM_OP: |
45 | if (u.insn.x) |
46 | ret = emulate_load_floatpair(ifa, u.insn, regs); |
47 | else |
48 | ret = emulate_load_float(ifa, u.insn, regs); |
49 | break; |
50 | |
51 | + case LDF_IMM_OP: |
52 | + case LDFA_IMM_OP: |
53 | + case LDFCCLR_IMM_OP: |
54 | + case LDFCNC_IMM_OP: |
55 | + ret = emulate_load_float(ifa, u.insn, regs); |
56 | + break; |
57 | + |
58 | case STF_OP: |
59 | case STF_IMM_OP: |
60 | ret = emulate_store_float(ifa, u.insn, regs); |
61 | diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c |
62 | index 777d345..6d4f02e 100644 |
63 | --- a/arch/sparc64/kernel/chmc.c |
64 | +++ b/arch/sparc64/kernel/chmc.c |
65 | @@ -16,6 +15,7 @@ |
66 | #include <linux/init.h> |
67 | #include <asm/spitfire.h> |
68 | #include <asm/chmctrl.h> |
69 | +#include <asm/cpudata.h> |
70 | #include <asm/oplib.h> |
71 | #include <asm/prom.h> |
72 | #include <asm/io.h> |
73 | @@ -242,8 +242,11 @@ int chmc_getunumber(int syndrome_code, |
74 | */ |
75 | static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset) |
76 | { |
77 | - unsigned long ret; |
78 | - int this_cpu = get_cpu(); |
79 | + unsigned long ret, this_cpu; |
80 | + |
81 | + preempt_disable(); |
82 | + |
83 | + this_cpu = real_hard_smp_processor_id(); |
84 | |
85 | if (mp->portid == this_cpu) { |
86 | __asm__ __volatile__("ldxa [%1] %2, %0" |
87 | @@ -255,7 +258,8 @@ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset) |
88 | : "r" (mp->regs + offset), |
89 | "i" (ASI_PHYS_BYPASS_EC_E)); |
90 | } |
91 | - put_cpu(); |
92 | + |
93 | + preempt_enable(); |
94 | |
95 | return ret; |
96 | } |
97 | diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S |
98 | index 8059531..193791c 100644 |
99 | --- a/arch/sparc64/kernel/entry.S |
100 | +++ b/arch/sparc64/kernel/entry.S |
101 | @@ -2593,3 +2593,15 @@ sun4v_mmustat_info: |
102 | retl |
103 | nop |
104 | .size sun4v_mmustat_info, .-sun4v_mmustat_info |
105 | + |
106 | + .globl sun4v_mmu_demap_all |
107 | + .type sun4v_mmu_demap_all,#function |
108 | +sun4v_mmu_demap_all: |
109 | + clr %o0 |
110 | + clr %o1 |
111 | + mov HV_MMU_ALL, %o2 |
112 | + mov HV_FAST_MMU_DEMAP_ALL, %o5 |
113 | + ta HV_FAST_TRAP |
114 | + retl |
115 | + nop |
116 | + .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all |
117 | diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c |
118 | index e8dac81..9bc05cf 100644 |
119 | --- a/arch/sparc64/kernel/pci.c |
120 | +++ b/arch/sparc64/kernel/pci.c |
121 | @@ -1276,4 +1276,20 @@ int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) |
122 | return (device_mask & dma_addr_mask) == dma_addr_mask; |
123 | } |
124 | |
125 | +void pci_resource_to_user(const struct pci_dev *pdev, int bar, |
126 | + const struct resource *rp, resource_size_t *start, |
127 | + resource_size_t *end) |
128 | +{ |
129 | + struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
130 | + unsigned long offset; |
131 | + |
132 | + if (rp->flags & IORESOURCE_IO) |
133 | + offset = pbm->io_space.start; |
134 | + else |
135 | + offset = pbm->mem_space.start; |
136 | + |
137 | + *start = rp->start - offset; |
138 | + *end = rp->end - offset; |
139 | +} |
140 | + |
141 | #endif /* !(CONFIG_PCI) */ |
142 | diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c |
143 | index c73b7a4..34e8a01 100644 |
144 | --- a/arch/sparc64/kernel/smp.c |
145 | +++ b/arch/sparc64/kernel/smp.c |
146 | @@ -476,7 +476,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c |
147 | */ |
148 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
149 | { |
150 | - u64 pstate, ver; |
151 | + u64 pstate, ver, busy_mask; |
152 | int nack_busy_id, is_jbus, need_more; |
153 | |
154 | if (cpus_empty(mask)) |
155 | @@ -508,14 +508,20 @@ retry: |
156 | "i" (ASI_INTR_W)); |
157 | |
158 | nack_busy_id = 0; |
159 | + busy_mask = 0; |
160 | { |
161 | int i; |
162 | |
163 | for_each_cpu_mask(i, mask) { |
164 | u64 target = (i << 14) | 0x70; |
165 | |
166 | - if (!is_jbus) |
167 | + if (is_jbus) { |
168 | + busy_mask |= (0x1UL << (i * 2)); |
169 | + } else { |
170 | target |= (nack_busy_id << 24); |
171 | + busy_mask |= (0x1UL << |
172 | + (nack_busy_id * 2)); |
173 | + } |
174 | __asm__ __volatile__( |
175 | "stxa %%g0, [%0] %1\n\t" |
176 | "membar #Sync\n\t" |
177 | @@ -531,15 +537,16 @@ retry: |
178 | |
179 | /* Now, poll for completion. */ |
180 | { |
181 | - u64 dispatch_stat; |
182 | + u64 dispatch_stat, nack_mask; |
183 | long stuck; |
184 | |
185 | stuck = 100000 * nack_busy_id; |
186 | + nack_mask = busy_mask << 1; |
187 | do { |
188 | __asm__ __volatile__("ldxa [%%g0] %1, %0" |
189 | : "=r" (dispatch_stat) |
190 | : "i" (ASI_INTR_DISPATCH_STAT)); |
191 | - if (dispatch_stat == 0UL) { |
192 | + if (!(dispatch_stat & (busy_mask | nack_mask))) { |
193 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
194 | : : "r" (pstate)); |
195 | if (unlikely(need_more)) { |
196 | @@ -556,12 +563,12 @@ retry: |
197 | } |
198 | if (!--stuck) |
199 | break; |
200 | - } while (dispatch_stat & 0x5555555555555555UL); |
201 | + } while (dispatch_stat & busy_mask); |
202 | |
203 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
204 | : : "r" (pstate)); |
205 | |
206 | - if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { |
207 | + if (dispatch_stat & busy_mask) { |
208 | /* Busy bits will not clear, continue instead |
209 | * of freezing up on this cpu. |
210 | */ |
211 | diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c |
212 | index 3010227..ed2484d 100644 |
213 | --- a/arch/sparc64/mm/init.c |
214 | +++ b/arch/sparc64/mm/init.c |
215 | @@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) |
216 | } |
217 | } |
218 | |
219 | -static void __init kernel_physical_mapping_init(void) |
220 | +static void __init init_kpte_bitmap(void) |
221 | { |
222 | unsigned long i; |
223 | -#ifdef CONFIG_DEBUG_PAGEALLOC |
224 | - unsigned long mem_alloced = 0UL; |
225 | -#endif |
226 | - |
227 | - read_obp_memory("reg", &pall[0], &pall_ents); |
228 | |
229 | for (i = 0; i < pall_ents; i++) { |
230 | unsigned long phys_start, phys_end; |
231 | @@ -1151,14 +1146,24 @@ static void __init kernel_physical_mapping_init(void) |
232 | phys_end = phys_start + pall[i].reg_size; |
233 | |
234 | mark_kpte_bitmap(phys_start, phys_end); |
235 | + } |
236 | +} |
237 | |
238 | +static void __init kernel_physical_mapping_init(void) |
239 | +{ |
240 | #ifdef CONFIG_DEBUG_PAGEALLOC |
241 | + unsigned long i, mem_alloced = 0UL; |
242 | + |
243 | + for (i = 0; i < pall_ents; i++) { |
244 | + unsigned long phys_start, phys_end; |
245 | + |
246 | + phys_start = pall[i].phys_addr; |
247 | + phys_end = phys_start + pall[i].reg_size; |
248 | + |
249 | mem_alloced += kernel_map_range(phys_start, phys_end, |
250 | PAGE_KERNEL); |
251 | -#endif |
252 | } |
253 | |
254 | -#ifdef CONFIG_DEBUG_PAGEALLOC |
255 | printk("Allocated %ld bytes for kernel page tables.\n", |
256 | mem_alloced); |
257 | |
258 | @@ -1400,6 +1405,10 @@ void __init paging_init(void) |
259 | |
260 | inherit_prom_mappings(); |
261 | |
262 | + read_obp_memory("reg", &pall[0], &pall_ents); |
263 | + |
264 | + init_kpte_bitmap(); |
265 | + |
266 | /* Ok, we can use our TLB miss and window trap handlers safely. */ |
267 | setup_tba(); |
268 | |
269 | @@ -1854,7 +1863,9 @@ void __flush_tlb_all(void) |
270 | "wrpr %0, %1, %%pstate" |
271 | : "=r" (pstate) |
272 | : "i" (PSTATE_IE)); |
273 | - if (tlb_type == spitfire) { |
274 | + if (tlb_type == hypervisor) { |
275 | + sun4v_mmu_demap_all(); |
276 | + } else if (tlb_type == spitfire) { |
277 | for (i = 0; i < 64; i++) { |
278 | /* Spitfire Errata #32 workaround */ |
279 | /* NOTE: Always runs on spitfire, so no |
280 | diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c |
281 | index 3ec110c..d2e5298 100644 |
282 | --- a/drivers/acpi/blacklist.c |
283 | +++ b/drivers/acpi/blacklist.c |
284 | @@ -3,6 +3,7 @@ |
285 | * |
286 | * Check to see if the given machine has a known bad ACPI BIOS |
287 | * or if the BIOS is too old. |
288 | + * Check given machine against acpi_osi_dmi_table[]. |
289 | * |
290 | * Copyright (C) 2004 Len Brown <len.brown@intel.com> |
291 | * Copyright (C) 2002 Andy Grover <andrew.grover@intel.com> |
292 | @@ -50,6 +51,8 @@ struct acpi_blacklist_item { |
293 | u32 is_critical_error; |
294 | }; |
295 | |
296 | +static struct dmi_system_id acpi_osi_dmi_table[] __initdata; |
297 | + |
298 | /* |
299 | * POLICY: If *anything* doesn't work, put it on the blacklist. |
300 | * If they are critical errors, mark it critical, and abort driver load. |
301 | @@ -67,8 +70,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = { |
302 | /* IBM 600E - _ADR should return 7, but it returns 1 */ |
303 | {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, |
304 | "Incorrect _ADR", 1}, |
305 | - {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions, |
306 | - "Bogus PCI routing", 1}, |
307 | |
308 | {""} |
309 | }; |
310 | @@ -165,5 +166,388 @@ int __init acpi_blacklisted(void) |
311 | |
312 | blacklisted += blacklist_by_year(); |
313 | |
314 | + dmi_check_system(acpi_osi_dmi_table); |
315 | + |
316 | return blacklisted; |
317 | } |
318 | +#ifdef CONFIG_DMI |
319 | +static int __init dmi_enable_osi_linux(struct dmi_system_id *d) |
320 | +{ |
321 | + acpi_dmi_osi_linux(1, d); /* enable */ |
322 | + return 0; |
323 | +} |
324 | +static int __init dmi_disable_osi_linux(struct dmi_system_id *d) |
325 | +{ |
326 | + acpi_dmi_osi_linux(0, d); /* disable */ |
327 | + return 0; |
328 | +} |
329 | +static int __init dmi_unknown_osi_linux(struct dmi_system_id *d) |
330 | +{ |
331 | + acpi_dmi_osi_linux(-1, d); /* unknown */ |
332 | + return 0; |
333 | +} |
334 | + |
335 | +/* |
336 | + * Most BIOS that invoke OSI(Linux) do nothing with it. |
337 | + * But some cause Linux to break. |
338 | + * Only a couple use it to make Linux run better. |
339 | + * |
340 | + * Thus, Linux should continue to disable OSI(Linux) by default, |
341 | + * should continue to discourage BIOS writers from using it, and |
342 | + * should whitelist the few existing systems that require it. |
343 | + * |
344 | + * If it appears clear a vendor isn't using OSI(Linux) |
345 | + * for anything constructive, blacklist them by name to disable |
346 | + * unnecessary dmesg warnings on all of their products. |
347 | + */ |
348 | + |
349 | +static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { |
350 | + /* |
351 | + * Disable OSI(Linux) warnings on all "Acer, inc." |
352 | + * |
353 | + * _OSI(Linux) disables the latest Windows BIOS code: |
354 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"), |
355 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5050"), |
356 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), |
357 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5580"), |
358 | + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 3010"), |
359 | + * _OSI(Linux) effect unknown: |
360 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Ferrari 5000"), |
361 | + */ |
362 | + /* |
363 | + * note that dmi_check_system() uses strstr() |
364 | + * to match sub-strings rather than !strcmp(), |
365 | + * so "Acer" below matches "Acer, inc." above. |
366 | + */ |
367 | + /* |
368 | + * Disable OSI(Linux) warnings on all "Acer" |
369 | + * |
370 | + * _OSI(Linux) effect unknown: |
371 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), |
372 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), |
373 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720Z"), |
374 | + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"), |
375 | + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"), |
376 | + * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"), |
377 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"), |
378 | + */ |
379 | + { |
380 | + .callback = dmi_unknown_osi_linux, |
381 | + .ident = "Acer", |
382 | + .matches = { |
383 | + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
384 | + }, |
385 | + }, |
386 | + /* |
387 | + * Disable OSI(Linux) warnings on all "Apple Computer, Inc." |
388 | + * |
389 | + * _OSI(Linux) confirmed to be a NOP: |
390 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), |
391 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"), |
392 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"), |
393 | + * _OSI(Linux) effect unknown: |
394 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacPro2,1"), |
395 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"), |
396 | + * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"), |
397 | + */ |
398 | + { |
399 | + .callback = dmi_disable_osi_linux, |
400 | + .ident = "Apple", |
401 | + .matches = { |
402 | + DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), |
403 | + }, |
404 | + }, |
405 | + /* |
406 | + * Disable OSI(Linux) warnings on all "BenQ" |
407 | + * |
408 | + * _OSI(Linux) confirmed to be a NOP: |
409 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Joybook S31"), |
410 | + */ |
411 | + { |
412 | + .callback = dmi_disable_osi_linux, |
413 | + .ident = "BenQ", |
414 | + .matches = { |
415 | + DMI_MATCH(DMI_SYS_VENDOR, "BenQ"), |
416 | + }, |
417 | + }, |
418 | + /* |
419 | + * Disable OSI(Linux) warnings on all "Clevo Co." |
420 | + * |
421 | + * _OSI(Linux) confirmed to be a NOP: |
422 | + * DMI_MATCH(DMI_PRODUCT_NAME, "M570RU"), |
423 | + */ |
424 | + { |
425 | + .callback = dmi_disable_osi_linux, |
426 | + .ident = "Clevo", |
427 | + .matches = { |
428 | + DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."), |
429 | + }, |
430 | + }, |
431 | + /* |
432 | + * Disable OSI(Linux) warnings on all "COMPAL" |
433 | + * |
434 | + * _OSI(Linux) confirmed to be a NOP: |
435 | + * DMI_MATCH(DMI_BOARD_NAME, "HEL8X"), |
436 | + * _OSI(Linux) unknown effect: |
437 | + * DMI_MATCH(DMI_BOARD_NAME, "IFL91"), |
438 | + */ |
439 | + { |
440 | + .callback = dmi_unknown_osi_linux, |
441 | + .ident = "Compal", |
442 | + .matches = { |
443 | + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), |
444 | + }, |
445 | + }, |
446 | + { /* OSI(Linux) touches USB, unknown side-effect */ |
447 | + .callback = dmi_disable_osi_linux, |
448 | + .ident = "Dell Dimension 5150", |
449 | + .matches = { |
450 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
451 | + DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM051"), |
452 | + }, |
453 | + }, |
454 | + { /* OSI(Linux) is a NOP */ |
455 | + .callback = dmi_disable_osi_linux, |
456 | + .ident = "Dell", |
457 | + .matches = { |
458 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
459 | + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1501"), |
460 | + }, |
461 | + }, |
462 | + { /* OSI(Linux) effect unknown */ |
463 | + .callback = dmi_unknown_osi_linux, |
464 | + .ident = "Dell", |
465 | + .matches = { |
466 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
467 | + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D830"), |
468 | + }, |
469 | + }, |
470 | + { /* OSI(Linux) effect unknown */ |
471 | + .callback = dmi_unknown_osi_linux, |
472 | + .ident = "Dell", |
473 | + .matches = { |
474 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
475 | + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"), |
476 | + }, |
477 | + }, |
478 | + { /* OSI(Linux) effect unknown */ |
479 | + .callback = dmi_unknown_osi_linux, |
480 | + .ident = "Dell", |
481 | + .matches = { |
482 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
483 | + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1900"), |
484 | + }, |
485 | + }, |
486 | + { /* OSI(Linux) touches USB */ |
487 | + .callback = dmi_disable_osi_linux, |
488 | + .ident = "Dell", |
489 | + .matches = { |
490 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
491 | + DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"), |
492 | + }, |
493 | + }, |
494 | + { /* OSI(Linux) is a NOP */ |
495 | + .callback = dmi_disable_osi_linux, |
496 | + .ident = "Dell Vostro 1000", |
497 | + .matches = { |
498 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
499 | + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1000"), |
500 | + }, |
501 | + }, |
502 | + { /* OSI(Linux) effect unknown */ |
503 | + .callback = dmi_unknown_osi_linux, |
504 | + .ident = "Dell", |
505 | + .matches = { |
506 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
507 | + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge SC440"), |
508 | + }, |
509 | + }, |
510 | + { /* OSI(Linux) effect unknown */ |
511 | + .callback = dmi_unknown_osi_linux, |
512 | + .ident = "Dialogue Flybook V5", |
513 | + .matches = { |
514 | + DMI_MATCH(DMI_SYS_VENDOR, "Dialogue Technology Corporation"), |
515 | + DMI_MATCH(DMI_PRODUCT_NAME, "Flybook V5"), |
516 | + }, |
517 | + }, |
518 | + /* |
519 | + * Disable OSI(Linux) warnings on all "FUJITSU SIEMENS" |
520 | + * |
521 | + * _OSI(Linux) disables latest Windows BIOS code: |
522 | + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 2510"), |
523 | + * _OSI(Linux) confirmed to be a NOP: |
524 | + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"), |
525 | + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"), |
526 | + * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"), |
527 | + * _OSI(Linux) unknown effect: |
528 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"), |
529 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"), |
530 | + * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"), |
531 | + */ |
532 | + { |
533 | + .callback = dmi_disable_osi_linux, |
534 | + .ident = "Fujitsu Siemens", |
535 | + .matches = { |
536 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), |
537 | + }, |
538 | + }, |
539 | + /* |
540 | + * Disable OSI(Linux) warnings on all "Hewlett-Packard" |
541 | + * |
542 | + * _OSI(Linux) confirmed to be a NOP: |
543 | + * .ident = "HP Pavilion tx 1000" |
544 | + * DMI_MATCH(DMI_BOARD_NAME, "30BF"), |
545 | + * .ident = "HP Pavilion dv2000" |
546 | + * DMI_MATCH(DMI_BOARD_NAME, "30B5"), |
547 | + * .ident = "HP Pavilion dv5000", |
548 | + * DMI_MATCH(DMI_BOARD_NAME, "30A7"), |
549 | + * .ident = "HP Pavilion dv6300 30BC", |
550 | + * DMI_MATCH(DMI_BOARD_NAME, "30BC"), |
551 | + * .ident = "HP Pavilion dv6000", |
552 | + * DMI_MATCH(DMI_BOARD_NAME, "30B7"), |
553 | + * DMI_MATCH(DMI_BOARD_NAME, "30B8"), |
554 | + * .ident = "HP Pavilion dv9000", |
555 | + * DMI_MATCH(DMI_BOARD_NAME, "30B9"), |
556 | + * .ident = "HP Pavilion dv9500", |
557 | + * DMI_MATCH(DMI_BOARD_NAME, "30CB"), |
558 | + * .ident = "HP/Compaq Presario C500", |
559 | + * DMI_MATCH(DMI_BOARD_NAME, "30C6"), |
560 | + * .ident = "HP/Compaq Presario F500", |
561 | + * DMI_MATCH(DMI_BOARD_NAME, "30D3"), |
562 | + * _OSI(Linux) unknown effect: |
563 | + * .ident = "HP Pavilion dv6500", |
564 | + * DMI_MATCH(DMI_BOARD_NAME, "30D0"), |
565 | + */ |
566 | + { |
567 | + .callback = dmi_disable_osi_linux, |
568 | + .ident = "Hewlett-Packard", |
569 | + .matches = { |
570 | + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
571 | + }, |
572 | + }, |
573 | + /* |
574 | + * Lenovo has a mix of systems OSI(Linux) situations |
575 | + * and thus we can not wildcard the vendor. |
576 | + * |
577 | + * _OSI(Linux) helps sound |
578 | + * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"), |
579 | + * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), |
580 | + * _OSI(Linux) is a NOP: |
581 | + * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), |
582 | + */ |
583 | + { |
584 | + .callback = dmi_enable_osi_linux, |
585 | + .ident = "Lenovo ThinkPad R61", |
586 | + .matches = { |
587 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
588 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"), |
589 | + }, |
590 | + }, |
591 | + { |
592 | + .callback = dmi_enable_osi_linux, |
593 | + .ident = "Lenovo ThinkPad T61", |
594 | + .matches = { |
595 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
596 | + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), |
597 | + }, |
598 | + }, |
599 | + { |
600 | + .callback = dmi_unknown_osi_linux, |
601 | + .ident = "Lenovo 3000 V100", |
602 | + .matches = { |
603 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
604 | + DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"), |
605 | + }, |
606 | + }, |
607 | + { |
608 | + .callback = dmi_disable_osi_linux, |
609 | + .ident = "Lenovo 3000 N100", |
610 | + .matches = { |
611 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
612 | + DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), |
613 | + }, |
614 | + }, |
615 | + /* |
616 | + * Disable OSI(Linux) warnings on all "LG Electronics" |
617 | + * |
618 | + * _OSI(Linux) confirmed to be a NOP: |
619 | + * DMI_MATCH(DMI_PRODUCT_NAME, "P1-J150B"), |
620 | + * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"), |
621 | + * |
622 | + * unknown: |
623 | + * DMI_MATCH(DMI_PRODUCT_NAME, "S1-MDGDG"), |
624 | + * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"), |
625 | + */ |
626 | + { |
627 | + .callback = dmi_disable_osi_linux, |
628 | + .ident = "LG", |
629 | + .matches = { |
630 | + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), |
631 | + }, |
632 | + }, |
633 | + /* NEC - OSI(Linux) effect unknown */ |
634 | + { |
635 | + .callback = dmi_unknown_osi_linux, |
636 | + .ident = "NEC VERSA M360", |
637 | + .matches = { |
638 | + DMI_MATCH(DMI_SYS_VENDOR, "NEC Computers SAS"), |
639 | + DMI_MATCH(DMI_PRODUCT_NAME, "NEC VERSA M360"), |
640 | + }, |
641 | + }, |
642 | + /* |
643 | + * Disable OSI(Linux) warnings on all "Samsung Electronics" |
644 | + * |
645 | + * OSI(Linux) disables PNP0C32 and other BIOS code for Windows: |
646 | + * DMI_MATCH(DMI_PRODUCT_NAME, "R40P/R41P"), |
647 | + * DMI_MATCH(DMI_PRODUCT_NAME, "R59P/R60P/R61P"), |
648 | + */ |
649 | + { |
650 | + .callback = dmi_disable_osi_linux, |
651 | + .ident = "Samsung", |
652 | + .matches = { |
653 | + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), |
654 | + }, |
655 | + }, |
656 | + /* |
657 | + * Disable OSI(Linux) warnings on all "Sony Corporation" |
658 | + * |
659 | + * _OSI(Linux) is a NOP: |
660 | + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"), |
661 | + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"), |
662 | + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"), |
663 | + * _OSI(Linux) unknown effect: |
664 | + * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"), |
665 | + */ |
666 | + { |
667 | + .callback = dmi_unknown_osi_linux, |
668 | + .ident = "Sony", |
669 | + .matches = { |
670 | + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
671 | + }, |
672 | + }, |
673 | + /* |
674 | + * Disable OSI(Linux) warnings on all "TOSHIBA" |
675 | + * |
676 | + * _OSI(Linux) breaks sound (bugzilla 7787): |
677 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P100"), |
678 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P105"), |
679 | + * _OSI(Linux) is a NOP: |
680 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A100"), |
681 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A210"), |
682 | + * _OSI(Linux) unknown effect: |
683 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A135"), |
684 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A200"), |
685 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P205"), |
686 | + * DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U305"), |
687 | + */ |
688 | + { |
689 | + .callback = dmi_disable_osi_linux, |
690 | + .ident = "Toshiba", |
691 | + .matches = { |
692 | + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
693 | + }, |
694 | + }, |
695 | + {} |
696 | +}; |
697 | + |
698 | +#endif /* CONFIG_DMI */ |
699 | diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c |
700 | index 9ba778a..222fcec 100644 |
701 | --- a/drivers/acpi/bus.c |
702 | +++ b/drivers/acpi/bus.c |
703 | @@ -29,7 +29,6 @@ |
704 | #include <linux/list.h> |
705 | #include <linux/sched.h> |
706 | #include <linux/pm.h> |
707 | -#include <linux/pm_legacy.h> |
708 | #include <linux/device.h> |
709 | #include <linux/proc_fs.h> |
710 | #ifdef CONFIG_X86 |
711 | @@ -757,16 +756,14 @@ static int __init acpi_init(void) |
712 | result = acpi_bus_init(); |
713 | |
714 | if (!result) { |
715 | -#ifdef CONFIG_PM_LEGACY |
716 | - if (!PM_IS_ACTIVE()) |
717 | - pm_active = 1; |
718 | + if (!(pm_flags & PM_APM)) |
719 | + pm_flags |= PM_ACPI; |
720 | else { |
721 | printk(KERN_INFO PREFIX |
722 | "APM is already active, exiting\n"); |
723 | disable_acpi(); |
724 | result = -ENODEV; |
725 | } |
726 | -#endif |
727 | } else |
728 | disable_acpi(); |
729 | |
730 | diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c |
731 | index a474ca2..954ac8c 100644 |
732 | --- a/drivers/acpi/dispatcher/dsobject.c |
733 | +++ b/drivers/acpi/dispatcher/dsobject.c |
734 | @@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, |
735 | return_ACPI_STATUS(status); |
736 | } |
737 | } |
738 | + |
739 | + /* Special object resolution for elements of a package */ |
740 | + |
741 | + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || |
742 | + (op->common.parent->common.aml_opcode == |
743 | + AML_VAR_PACKAGE_OP)) { |
744 | + /* |
745 | + * Attempt to resolve the node to a value before we insert it into |
746 | + * the package. If this is a reference to a common data type, |
747 | + * resolve it immediately. According to the ACPI spec, package |
748 | + * elements can only be "data objects" or method references. |
749 | + * Attempt to resolve to an Integer, Buffer, String or Package. |
750 | + * If cannot, return the named reference (for things like Devices, |
751 | + * Methods, etc.) Buffer Fields and Fields will resolve to simple |
752 | + * objects (int/buf/str/pkg). |
753 | + * |
754 | + * NOTE: References to things like Devices, Methods, Mutexes, etc. |
755 | + * will remain as named references. This behavior is not described |
756 | + * in the ACPI spec, but it appears to be an oversight. |
757 | + */ |
758 | + obj_desc = (union acpi_operand_object *)op->common.node; |
759 | + |
760 | + status = |
761 | + acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR |
762 | + (struct |
763 | + acpi_namespace_node, |
764 | + &obj_desc), |
765 | + walk_state); |
766 | + if (ACPI_FAILURE(status)) { |
767 | + return_ACPI_STATUS(status); |
768 | + } |
769 | + |
770 | + switch (op->common.node->type) { |
771 | + /* |
772 | + * For these types, we need the actual node, not the subobject. |
773 | + * However, the subobject got an extra reference count above. |
774 | + */ |
775 | + case ACPI_TYPE_MUTEX: |
776 | + case ACPI_TYPE_METHOD: |
777 | + case ACPI_TYPE_POWER: |
778 | + case ACPI_TYPE_PROCESSOR: |
779 | + case ACPI_TYPE_EVENT: |
780 | + case ACPI_TYPE_REGION: |
781 | + case ACPI_TYPE_DEVICE: |
782 | + case ACPI_TYPE_THERMAL: |
783 | + |
784 | + obj_desc = |
785 | + (union acpi_operand_object *)op->common. |
786 | + node; |
787 | + break; |
788 | + |
789 | + default: |
790 | + break; |
791 | + } |
792 | + |
793 | + /* |
794 | + * If above resolved to an operand object, we are done. Otherwise, |
795 | + * we have a NS node, we must create the package entry as a named |
796 | + * reference. |
797 | + */ |
798 | + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != |
799 | + ACPI_DESC_TYPE_NAMED) { |
800 | + goto exit; |
801 | + } |
802 | + } |
803 | } |
804 | |
805 | /* Create and init a new internal ACPI object */ |
806 | @@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, |
807 | return_ACPI_STATUS(status); |
808 | } |
809 | |
810 | + exit: |
811 | *obj_desc_ptr = obj_desc; |
812 | return_ACPI_STATUS(AE_OK); |
813 | } |
814 | @@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, |
815 | arg = arg->common.next; |
816 | for (i = 0; arg && (i < element_count); i++) { |
817 | if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { |
818 | - |
819 | - /* This package element is already built, just get it */ |
820 | - |
821 | - obj_desc->package.elements[i] = |
822 | - ACPI_CAST_PTR(union acpi_operand_object, |
823 | - arg->common.node); |
824 | + if (arg->common.node->type == ACPI_TYPE_METHOD) { |
825 | + /* |
826 | + * A method reference "looks" to the parser to be a method |
827 | + * invocation, so we special case it here |
828 | + */ |
829 | + arg->common.aml_opcode = AML_INT_NAMEPATH_OP; |
830 | + status = |
831 | + acpi_ds_build_internal_object(walk_state, |
832 | + arg, |
833 | + &obj_desc-> |
834 | + package. |
835 | + elements[i]); |
836 | + } else { |
837 | + /* This package element is already built, just get it */ |
838 | + |
839 | + obj_desc->package.elements[i] = |
840 | + ACPI_CAST_PTR(union acpi_operand_object, |
841 | + arg->common.node); |
842 | + } |
843 | } else { |
844 | status = acpi_ds_build_internal_object(walk_state, arg, |
845 | &obj_desc-> |
846 | diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c |
847 | index e99f0c4..58ad097 100644 |
848 | --- a/drivers/acpi/events/evregion.c |
849 | +++ b/drivers/acpi/events/evregion.c |
850 | @@ -344,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, |
851 | * setup will potentially execute control methods |
852 | * (e.g., _REG method for this region) |
853 | */ |
854 | - acpi_ex_relinquish_interpreter(); |
855 | + acpi_ex_exit_interpreter(); |
856 | |
857 | status = region_setup(region_obj, ACPI_REGION_ACTIVATE, |
858 | handler_desc->address_space.context, |
859 | @@ -352,7 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, |
860 | |
861 | /* Re-enter the interpreter */ |
862 | |
863 | - acpi_ex_reacquire_interpreter(); |
864 | + acpi_ex_enter_interpreter(); |
865 | |
866 | /* Check for failure of the Region Setup */ |
867 | |
868 | @@ -405,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, |
869 | * exit the interpreter because the handler *might* block -- we don't |
870 | * know what it will do, so we can't hold the lock on the intepreter. |
871 | */ |
872 | - acpi_ex_relinquish_interpreter(); |
873 | + acpi_ex_exit_interpreter(); |
874 | } |
875 | |
876 | /* Call the handler */ |
877 | @@ -426,7 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, |
878 | * We just returned from a non-default handler, we must re-enter the |
879 | * interpreter |
880 | */ |
881 | - acpi_ex_reacquire_interpreter(); |
882 | + acpi_ex_enter_interpreter(); |
883 | } |
884 | |
885 | return_ACPI_STATUS(status); |
886 | diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c |
887 | index 12c09fa..cd573e4 100644 |
888 | --- a/drivers/acpi/osl.c |
889 | +++ b/drivers/acpi/osl.c |
890 | @@ -77,11 +77,55 @@ static struct workqueue_struct *kacpi_notify_wq; |
891 | #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ |
892 | static char osi_additional_string[OSI_STRING_LENGTH_MAX]; |
893 | |
894 | -static int osi_linux; /* disable _OSI(Linux) by default */ |
895 | +/* |
896 | + * "Ode to _OSI(Linux)" |
897 | + * |
898 | + * osi_linux -- Control response to BIOS _OSI(Linux) query. |
899 | + * |
900 | + * As Linux evolves, the features that it supports change. |
901 | + * So an OSI string such as "Linux" is not specific enough |
902 | + * to be useful across multiple versions of Linux. It |
903 | + * doesn't identify any particular feature, interface, |
904 | + * or even any particular version of Linux... |
905 | + * |
906 | + * Unfortunately, Linux-2.6.22 and earlier responded "yes" |
907 | + * to a BIOS _OSI(Linux) query. When |
908 | + * a reference mobile BIOS started using it, its use |
909 | + * started to spread to many vendor platforms. |
910 | + * As it is not supportable, we need to halt that spread. |
911 | + * |
912 | + * Today, most BIOS references to _OSI(Linux) are noise -- |
913 | + * they have no functional effect and are just dead code |
914 | + * carried over from the reference BIOS. |
915 | + * |
916 | + * The next most common case is that _OSI(Linux) harms Linux, |
917 | + * usually by causing the BIOS to follow paths that are |
918 | + * not tested during Windows validation. |
919 | + * |
920 | + * Finally, there is a short list of platforms |
921 | + * where OSI(Linux) benefits Linux. |
922 | + * |
923 | + * In Linux-2.6.23, OSI(Linux) is first disabled by default. |
924 | + * DMI is used to disable the dmesg warning about OSI(Linux) |
925 | + * on platforms where it is known to have no effect. |
926 | + * But a dmesg warning remains for systems where |
927 | + * we do not know if OSI(Linux) is good or bad for the system. |
928 | + * DMI is also used to enable OSI(Linux) for the machines |
929 | + * that are known to need it. |
930 | + * |
931 | + * BIOS writers should NOT query _OSI(Linux) on future systems. |
932 | + * It will be ignored by default, and to get Linux to |
933 | + * not ignore it will require a kernel source update to |
934 | + * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation. |
935 | + */ |
936 | +#define OSI_LINUX_ENABLE 0 |
937 | |
938 | -#ifdef CONFIG_DMI |
939 | -static struct __initdata dmi_system_id acpi_osl_dmi_table[]; |
940 | -#endif |
941 | +static struct osi_linux { |
942 | + unsigned int enable:1; |
943 | + unsigned int dmi:1; |
944 | + unsigned int cmdline:1; |
945 | + unsigned int known:1; |
946 | +} osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0}; |
947 | |
948 | static void __init acpi_request_region (struct acpi_generic_address *addr, |
949 | unsigned int length, char *desc) |
950 | @@ -133,7 +177,6 @@ device_initcall(acpi_reserve_resources); |
951 | |
952 | acpi_status __init acpi_os_initialize(void) |
953 | { |
954 | - dmi_check_system(acpi_osl_dmi_table); |
955 | return AE_OK; |
956 | } |
957 | |
958 | @@ -971,13 +1014,37 @@ static int __init acpi_os_name_setup(char *str) |
959 | |
960 | __setup("acpi_os_name=", acpi_os_name_setup); |
961 | |
962 | -static void enable_osi_linux(int enable) { |
963 | +static void __init set_osi_linux(unsigned int enable) |
964 | +{ |
965 | + if (osi_linux.enable != enable) { |
966 | + osi_linux.enable = enable; |
967 | + printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n", |
968 | + enable ? "Add": "Delet"); |
969 | + } |
970 | + return; |
971 | +} |
972 | |
973 | - if (osi_linux != enable) |
974 | - printk(KERN_INFO PREFIX "%sabled _OSI(Linux)\n", |
975 | - enable ? "En": "Dis"); |
976 | +static void __init acpi_cmdline_osi_linux(unsigned int enable) |
977 | +{ |
978 | + osi_linux.cmdline = 1; /* cmdline set the default */ |
979 | + set_osi_linux(enable); |
980 | + |
981 | + return; |
982 | +} |
983 | + |
984 | +void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) |
985 | +{ |
986 | + osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ |
987 | + |
988 | + printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); |
989 | + |
990 | + if (enable == -1) |
991 | + return; |
992 | + |
993 | + osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */ |
994 | + |
995 | + set_osi_linux(enable); |
996 | |
997 | - osi_linux = enable; |
998 | return; |
999 | } |
1000 | |
1001 | @@ -994,12 +1061,12 @@ static int __init acpi_osi_setup(char *str) |
1002 | printk(KERN_INFO PREFIX "_OSI method disabled\n"); |
1003 | acpi_gbl_create_osi_method = FALSE; |
1004 | } else if (!strcmp("!Linux", str)) { |
1005 | - enable_osi_linux(0); |
1006 | + acpi_cmdline_osi_linux(0); /* !enable */ |
1007 | } else if (*str == '!') { |
1008 | if (acpi_osi_invalidate(++str) == AE_OK) |
1009 | printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); |
1010 | } else if (!strcmp("Linux", str)) { |
1011 | - enable_osi_linux(1); |
1012 | + acpi_cmdline_osi_linux(1); /* enable */ |
1013 | } else if (*osi_additional_string == '\0') { |
1014 | strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX); |
1015 | printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); |
1016 | @@ -1156,6 +1223,34 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) |
1017 | return (AE_OK); |
1018 | } |
1019 | |
1020 | +/** |
1021 | + * acpi_dmi_dump - dump DMI slots needed for blacklist entry |
1022 | + * |
1023 | + * Returns 0 on success |
1024 | + */ |
1025 | +static int acpi_dmi_dump(void) |
1026 | +{ |
1027 | + |
1028 | + if (!dmi_available) |
1029 | + return -1; |
1030 | + |
1031 | + printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n", |
1032 | + dmi_get_system_info(DMI_SYS_VENDOR)); |
1033 | + printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n", |
1034 | + dmi_get_system_info(DMI_PRODUCT_NAME)); |
1035 | + printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n", |
1036 | + dmi_get_system_info(DMI_PRODUCT_VERSION)); |
1037 | + printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n", |
1038 | + dmi_get_system_info(DMI_BOARD_NAME)); |
1039 | + printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n", |
1040 | + dmi_get_system_info(DMI_BIOS_VENDOR)); |
1041 | + printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n", |
1042 | + dmi_get_system_info(DMI_BIOS_DATE)); |
1043 | + |
1044 | + return 0; |
1045 | +} |
1046 | + |
1047 | + |
1048 | /****************************************************************************** |
1049 | * |
1050 | * FUNCTION: acpi_os_validate_interface |
1051 | @@ -1175,13 +1270,29 @@ acpi_os_validate_interface (char *interface) |
1052 | if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX)) |
1053 | return AE_OK; |
1054 | if (!strcmp("Linux", interface)) { |
1055 | - printk(KERN_WARNING PREFIX |
1056 | - "System BIOS is requesting _OSI(Linux)\n"); |
1057 | - printk(KERN_WARNING PREFIX |
1058 | - "If \"acpi_osi=Linux\" works better,\n" |
1059 | - "Please send dmidecode " |
1060 | - "to linux-acpi@vger.kernel.org\n"); |
1061 | - if(osi_linux) |
1062 | + |
1063 | + printk(KERN_NOTICE PREFIX |
1064 | + "BIOS _OSI(Linux) query %s%s\n", |
1065 | + osi_linux.enable ? "honored" : "ignored", |
1066 | + osi_linux.cmdline ? " via cmdline" : |
1067 | + osi_linux.dmi ? " via DMI" : ""); |
1068 | + |
1069 | + if (!osi_linux.dmi) { |
1070 | + if (acpi_dmi_dump()) |
1071 | + printk(KERN_NOTICE PREFIX |
1072 | + "[please extract dmidecode output]\n"); |
1073 | + printk(KERN_NOTICE PREFIX |
1074 | + "Please send DMI info above to " |
1075 | + "linux-acpi@vger.kernel.org\n"); |
1076 | + } |
1077 | + if (!osi_linux.known && !osi_linux.cmdline) { |
1078 | + printk(KERN_NOTICE PREFIX |
1079 | + "If \"acpi_osi=%sLinux\" works better, " |
1080 | + "please notify linux-acpi@vger.kernel.org\n", |
1081 | + osi_linux.enable ? "!" : ""); |
1082 | + } |
1083 | + |
1084 | + if (osi_linux.enable) |
1085 | return AE_OK; |
1086 | } |
1087 | return AE_SUPPORT; |
1088 | @@ -1213,28 +1324,4 @@ acpi_os_validate_address ( |
1089 | return AE_OK; |
1090 | } |
1091 | |
1092 | -#ifdef CONFIG_DMI |
1093 | -static int dmi_osi_linux(struct dmi_system_id *d) |
1094 | -{ |
1095 | - printk(KERN_NOTICE "%s detected: enabling _OSI(Linux)\n", d->ident); |
1096 | - enable_osi_linux(1); |
1097 | - return 0; |
1098 | -} |
1099 | - |
1100 | -static struct dmi_system_id acpi_osl_dmi_table[] __initdata = { |
1101 | - /* |
1102 | - * Boxes that need _OSI(Linux) |
1103 | - */ |
1104 | - { |
1105 | - .callback = dmi_osi_linux, |
1106 | - .ident = "Intel Napa CRB", |
1107 | - .matches = { |
1108 | - DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), |
1109 | - DMI_MATCH(DMI_BOARD_NAME, "MPAD-MSAE Customer Reference Boards"), |
1110 | - }, |
1111 | - }, |
1112 | - {} |
1113 | -}; |
1114 | -#endif /* CONFIG_DMI */ |
1115 | - |
1116 | #endif |
1117 | diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c |
1118 | index dd3186a..62010c2 100644 |
1119 | --- a/drivers/acpi/pci_irq.c |
1120 | +++ b/drivers/acpi/pci_irq.c |
1121 | @@ -429,6 +429,15 @@ int acpi_pci_irq_enable(struct pci_dev *dev) |
1122 | &polarity, &link, |
1123 | acpi_pci_allocate_irq); |
1124 | |
1125 | + if (irq < 0) { |
1126 | + /* |
1127 | + * IDE legacy mode controller IRQs are magic. Why do compat |
1128 | + * extensions always make such a nasty mess. |
1129 | + */ |
1130 | + if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && |
1131 | + (dev->class & 0x05) == 0) |
1132 | + return 0; |
1133 | + } |
1134 | /* |
1135 | * No IRQ known to the ACPI subsystem - maybe the BIOS / |
1136 | * driver reported one, then use it. Exit in any case. |
1137 | diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c |
1138 | index dad84c0..9d71f25 100644 |
1139 | --- a/drivers/acpi/video.c |
1140 | +++ b/drivers/acpi/video.c |
1141 | @@ -573,7 +573,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) |
1142 | struct acpi_video_device_brightness *br = NULL; |
1143 | |
1144 | |
1145 | - memset(&device->cap, 0, 4); |
1146 | + memset(&device->cap, 0, sizeof(device->cap)); |
1147 | |
1148 | if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) { |
1149 | device->cap._ADR = 1; |
1150 | @@ -693,7 +693,7 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video) |
1151 | { |
1152 | acpi_handle h_dummy1; |
1153 | |
1154 | - memset(&video->cap, 0, 4); |
1155 | + memset(&video->cap, 0, sizeof(video->cap)); |
1156 | if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) { |
1157 | video->cap._DOS = 1; |
1158 | } |
1159 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1160 | index 98e33f9..4895a42 100644 |
1161 | --- a/drivers/ata/libata-core.c |
1162 | +++ b/drivers/ata/libata-core.c |
1163 | @@ -6121,19 +6121,6 @@ static void ata_host_release(struct device *gendev, void *res) |
1164 | if (!ap) |
1165 | continue; |
1166 | |
1167 | - if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop) |
1168 | - ap->ops->port_stop(ap); |
1169 | - } |
1170 | - |
1171 | - if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop) |
1172 | - host->ops->host_stop(host); |
1173 | - |
1174 | - for (i = 0; i < host->n_ports; i++) { |
1175 | - struct ata_port *ap = host->ports[i]; |
1176 | - |
1177 | - if (!ap) |
1178 | - continue; |
1179 | - |
1180 | if (ap->scsi_host) |
1181 | scsi_host_put(ap->scsi_host); |
1182 | |
1183 | @@ -6258,6 +6245,24 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
1184 | return host; |
1185 | } |
1186 | |
1187 | +static void ata_host_stop(struct device *gendev, void *res) |
1188 | +{ |
1189 | + struct ata_host *host = dev_get_drvdata(gendev); |
1190 | + int i; |
1191 | + |
1192 | + WARN_ON(!(host->flags & ATA_HOST_STARTED)); |
1193 | + |
1194 | + for (i = 0; i < host->n_ports; i++) { |
1195 | + struct ata_port *ap = host->ports[i]; |
1196 | + |
1197 | + if (ap->ops->port_stop) |
1198 | + ap->ops->port_stop(ap); |
1199 | + } |
1200 | + |
1201 | + if (host->ops->host_stop) |
1202 | + host->ops->host_stop(host); |
1203 | +} |
1204 | + |
1205 | /** |
1206 | * ata_host_start - start and freeze ports of an ATA host |
1207 | * @host: ATA host to start ports for |
1208 | @@ -6276,6 +6281,8 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
1209 | */ |
1210 | int ata_host_start(struct ata_host *host) |
1211 | { |
1212 | + int have_stop = 0; |
1213 | + void *start_dr = NULL; |
1214 | int i, rc; |
1215 | |
1216 | if (host->flags & ATA_HOST_STARTED) |
1217 | @@ -6287,6 +6294,22 @@ int ata_host_start(struct ata_host *host) |
1218 | if (!host->ops && !ata_port_is_dummy(ap)) |
1219 | host->ops = ap->ops; |
1220 | |
1221 | + if (ap->ops->port_stop) |
1222 | + have_stop = 1; |
1223 | + } |
1224 | + |
1225 | + if (host->ops->host_stop) |
1226 | + have_stop = 1; |
1227 | + |
1228 | + if (have_stop) { |
1229 | + start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); |
1230 | + if (!start_dr) |
1231 | + return -ENOMEM; |
1232 | + } |
1233 | + |
1234 | + for (i = 0; i < host->n_ports; i++) { |
1235 | + struct ata_port *ap = host->ports[i]; |
1236 | + |
1237 | if (ap->ops->port_start) { |
1238 | rc = ap->ops->port_start(ap); |
1239 | if (rc) { |
1240 | @@ -6299,6 +6322,8 @@ int ata_host_start(struct ata_host *host) |
1241 | ata_eh_freeze_port(ap); |
1242 | } |
1243 | |
1244 | + if (start_dr) |
1245 | + devres_add(host->dev, start_dr); |
1246 | host->flags |= ATA_HOST_STARTED; |
1247 | return 0; |
1248 | |
1249 | @@ -6309,6 +6334,7 @@ int ata_host_start(struct ata_host *host) |
1250 | if (ap->ops->port_stop) |
1251 | ap->ops->port_stop(ap); |
1252 | } |
1253 | + devres_free(start_dr); |
1254 | return rc; |
1255 | } |
1256 | |
1257 | diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c |
1258 | index 25698cf..bab694a 100644 |
1259 | --- a/drivers/ata/sata_promise.c |
1260 | +++ b/drivers/ata/sata_promise.c |
1261 | @@ -50,6 +50,7 @@ |
1262 | enum { |
1263 | PDC_MAX_PORTS = 4, |
1264 | PDC_MMIO_BAR = 3, |
1265 | + PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ |
1266 | |
1267 | /* register offsets */ |
1268 | PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ |
1269 | @@ -155,7 +156,7 @@ static struct scsi_host_template pdc_ata_sht = { |
1270 | .queuecommand = ata_scsi_queuecmd, |
1271 | .can_queue = ATA_DEF_QUEUE, |
1272 | .this_id = ATA_SHT_THIS_ID, |
1273 | - .sg_tablesize = LIBATA_MAX_PRD, |
1274 | + .sg_tablesize = PDC_MAX_PRD, |
1275 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
1276 | .emulated = ATA_SHT_EMULATED, |
1277 | .use_clustering = ATA_SHT_USE_CLUSTERING, |
1278 | @@ -527,6 +528,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc) |
1279 | memcpy(buf+31, cdb, cdb_len); |
1280 | } |
1281 | |
1282 | +/** |
1283 | + * pdc_fill_sg - Fill PCI IDE PRD table |
1284 | + * @qc: Metadata associated with taskfile to be transferred |
1285 | + * |
1286 | + * Fill PCI IDE PRD (scatter-gather) table with segments |
1287 | + * associated with the current disk command. |
1288 | + * Make sure hardware does not choke on it. |
1289 | + * |
1290 | + * LOCKING: |
1291 | + * spin_lock_irqsave(host lock) |
1292 | + * |
1293 | + */ |
1294 | +static void pdc_fill_sg(struct ata_queued_cmd *qc) |
1295 | +{ |
1296 | + struct ata_port *ap = qc->ap; |
1297 | + struct scatterlist *sg; |
1298 | + unsigned int idx; |
1299 | + const u32 SG_COUNT_ASIC_BUG = 41*4; |
1300 | + |
1301 | + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1302 | + return; |
1303 | + |
1304 | + WARN_ON(qc->__sg == NULL); |
1305 | + WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); |
1306 | + |
1307 | + idx = 0; |
1308 | + ata_for_each_sg(sg, qc) { |
1309 | + u32 addr, offset; |
1310 | + u32 sg_len, len; |
1311 | + |
1312 | + /* determine if physical DMA addr spans 64K boundary. |
1313 | + * Note h/w doesn't support 64-bit, so we unconditionally |
1314 | + * truncate dma_addr_t to u32. |
1315 | + */ |
1316 | + addr = (u32) sg_dma_address(sg); |
1317 | + sg_len = sg_dma_len(sg); |
1318 | + |
1319 | + while (sg_len) { |
1320 | + offset = addr & 0xffff; |
1321 | + len = sg_len; |
1322 | + if ((offset + sg_len) > 0x10000) |
1323 | + len = 0x10000 - offset; |
1324 | + |
1325 | + ap->prd[idx].addr = cpu_to_le32(addr); |
1326 | + ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); |
1327 | + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); |
1328 | + |
1329 | + idx++; |
1330 | + sg_len -= len; |
1331 | + addr += len; |
1332 | + } |
1333 | + } |
1334 | + |
1335 | + if (idx) { |
1336 | + u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len); |
1337 | + |
1338 | + if (len > SG_COUNT_ASIC_BUG) { |
1339 | + u32 addr; |
1340 | + |
1341 | + VPRINTK("Splitting last PRD.\n"); |
1342 | + |
1343 | + addr = le32_to_cpu(ap->prd[idx - 1].addr); |
1344 | + ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); |
1345 | + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); |
1346 | + |
1347 | + addr = addr + len - SG_COUNT_ASIC_BUG; |
1348 | + len = SG_COUNT_ASIC_BUG; |
1349 | + ap->prd[idx].addr = cpu_to_le32(addr); |
1350 | + ap->prd[idx].flags_len = cpu_to_le32(len); |
1351 | + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); |
1352 | + |
1353 | + idx++; |
1354 | + } |
1355 | + |
1356 | + ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
1357 | + } |
1358 | +} |
1359 | + |
1360 | static void pdc_qc_prep(struct ata_queued_cmd *qc) |
1361 | { |
1362 | struct pdc_port_priv *pp = qc->ap->private_data; |
1363 | @@ -536,7 +615,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) |
1364 | |
1365 | switch (qc->tf.protocol) { |
1366 | case ATA_PROT_DMA: |
1367 | - ata_qc_prep(qc); |
1368 | + pdc_fill_sg(qc); |
1369 | /* fall through */ |
1370 | |
1371 | case ATA_PROT_NODATA: |
1372 | @@ -552,11 +631,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) |
1373 | break; |
1374 | |
1375 | case ATA_PROT_ATAPI: |
1376 | - ata_qc_prep(qc); |
1377 | + pdc_fill_sg(qc); |
1378 | break; |
1379 | |
1380 | case ATA_PROT_ATAPI_DMA: |
1381 | - ata_qc_prep(qc); |
1382 | + pdc_fill_sg(qc); |
1383 | /*FALLTHROUGH*/ |
1384 | case ATA_PROT_ATAPI_NODATA: |
1385 | pdc_atapi_pkt(qc); |
1386 | diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c |
1387 | index 14ced85..0c205b0 100644 |
1388 | --- a/drivers/atm/nicstar.c |
1389 | +++ b/drivers/atm/nicstar.c |
1390 | @@ -625,14 +625,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) |
1391 | if (mac[i] == NULL) |
1392 | nicstar_init_eprom(card->membase); |
1393 | |
1394 | - if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) |
1395 | - { |
1396 | - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); |
1397 | - error = 9; |
1398 | - ns_init_card_error(card, error); |
1399 | - return error; |
1400 | - } |
1401 | - |
1402 | /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ |
1403 | writel(0x00000000, card->membase + VPM); |
1404 | |
1405 | @@ -858,8 +850,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) |
1406 | card->iovpool.count++; |
1407 | } |
1408 | |
1409 | - card->intcnt = 0; |
1410 | - |
1411 | /* Configure NICStAR */ |
1412 | if (card->rct_size == 4096) |
1413 | ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; |
1414 | @@ -868,6 +858,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) |
1415 | |
1416 | card->efbie = 1; |
1417 | |
1418 | + card->intcnt = 0; |
1419 | + if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) |
1420 | + { |
1421 | + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); |
1422 | + error = 9; |
1423 | + ns_init_card_error(card, error); |
1424 | + return error; |
1425 | + } |
1426 | + |
1427 | /* Register device */ |
1428 | card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); |
1429 | if (card->atmdev == NULL) |
1430 | diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c |
1431 | index ec116df..72183bd 100644 |
1432 | --- a/drivers/char/apm-emulation.c |
1433 | +++ b/drivers/char/apm-emulation.c |
1434 | @@ -295,7 +295,6 @@ static int |
1435 | apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) |
1436 | { |
1437 | struct apm_user *as = filp->private_data; |
1438 | - unsigned long flags; |
1439 | int err = -EINVAL; |
1440 | |
1441 | if (!as->suser || !as->writer) |
1442 | @@ -331,10 +330,16 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) |
1443 | * Wait for the suspend/resume to complete. If there |
1444 | * are pending acknowledges, we wait here for them. |
1445 | */ |
1446 | - flags = current->flags; |
1447 | + freezer_do_not_count(); |
1448 | |
1449 | wait_event(apm_suspend_waitqueue, |
1450 | as->suspend_state == SUSPEND_DONE); |
1451 | + |
1452 | + /* |
1453 | + * Since we are waiting until the suspend is done, the |
1454 | + * try_to_freeze() in freezer_count() will not trigger |
1455 | + */ |
1456 | + freezer_count(); |
1457 | } else { |
1458 | as->suspend_state = SUSPEND_WAIT; |
1459 | mutex_unlock(&state_lock); |
1460 | @@ -362,14 +367,10 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) |
1461 | * Wait for the suspend/resume to complete. If there |
1462 | * are pending acknowledges, we wait here for them. |
1463 | */ |
1464 | - flags = current->flags; |
1465 | - |
1466 | - wait_event_interruptible(apm_suspend_waitqueue, |
1467 | + wait_event_freezable(apm_suspend_waitqueue, |
1468 | as->suspend_state == SUSPEND_DONE); |
1469 | } |
1470 | |
1471 | - current->flags = flags; |
1472 | - |
1473 | mutex_lock(&state_lock); |
1474 | err = as->suspend_result; |
1475 | as->suspend_state = SUSPEND_NONE; |
1476 | diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c |
1477 | index 68e36e5..caa0bce 100644 |
1478 | --- a/drivers/char/drm/drm_vm.c |
1479 | +++ b/drivers/char/drm/drm_vm.c |
1480 | @@ -506,6 +506,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) |
1481 | vma->vm_ops = &drm_vm_dma_ops; |
1482 | |
1483 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
1484 | + vma->vm_flags |= VM_DONTEXPAND; |
1485 | |
1486 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
1487 | drm_vm_open_locked(vma); |
1488 | @@ -655,6 +656,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) |
1489 | return -EINVAL; /* This should never happen. */ |
1490 | } |
1491 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
1492 | + vma->vm_flags |= VM_DONTEXPAND; |
1493 | |
1494 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
1495 | drm_vm_open_locked(vma); |
1496 | diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c |
1497 | index 04ac155..ada142a 100644 |
1498 | --- a/drivers/char/mspec.c |
1499 | +++ b/drivers/char/mspec.c |
1500 | @@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, |
1501 | vdata->refcnt = ATOMIC_INIT(1); |
1502 | vma->vm_private_data = vdata; |
1503 | |
1504 | - vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP); |
1505 | + vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND); |
1506 | if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) |
1507 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1508 | vma->vm_ops = &mspec_vm_ops; |
1509 | diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c |
1510 | index 3ee73cf..d08c301 100644 |
1511 | --- a/drivers/char/tty_ioctl.c |
1512 | +++ b/drivers/char/tty_ioctl.c |
1513 | @@ -62,7 +62,7 @@ void tty_wait_until_sent(struct tty_struct * tty, long timeout) |
1514 | if (!timeout) |
1515 | timeout = MAX_SCHEDULE_TIMEOUT; |
1516 | if (wait_event_interruptible_timeout(tty->write_wait, |
1517 | - !tty->driver->chars_in_buffer(tty), timeout)) |
1518 | + !tty->driver->chars_in_buffer(tty), timeout) < 0) |
1519 | return; |
1520 | if (tty->driver->wait_until_sent) |
1521 | tty->driver->wait_until_sent(tty, timeout); |
1522 | diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c |
1523 | index 296f510..12ceed5 100644 |
1524 | --- a/drivers/connector/cn_queue.c |
1525 | +++ b/drivers/connector/cn_queue.c |
1526 | @@ -99,8 +99,8 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id |
1527 | spin_unlock_bh(&dev->queue_lock); |
1528 | |
1529 | if (found) { |
1530 | - atomic_dec(&dev->refcnt); |
1531 | cn_queue_free_callback(cbq); |
1532 | + atomic_dec(&dev->refcnt); |
1533 | return -EINVAL; |
1534 | } |
1535 | |
1536 | diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c |
1537 | index d4501dc..e9a23a4 100644 |
1538 | --- a/drivers/crypto/padlock-aes.c |
1539 | +++ b/drivers/crypto/padlock-aes.c |
1540 | @@ -419,13 +419,58 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
1541 | /* ====== Encryption/decryption routines ====== */ |
1542 | |
1543 | /* These are the real call to PadLock. */ |
1544 | +static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, |
1545 | + void *control_word) |
1546 | +{ |
1547 | + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
1548 | + : "+S"(input), "+D"(output) |
1549 | + : "d"(control_word), "b"(key), "c"(1)); |
1550 | +} |
1551 | + |
1552 | +static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) |
1553 | +{ |
1554 | + u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; |
1555 | + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
1556 | + |
1557 | + memcpy(tmp, in, AES_BLOCK_SIZE); |
1558 | + padlock_xcrypt(tmp, out, key, cword); |
1559 | +} |
1560 | + |
1561 | +static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, |
1562 | + struct cword *cword) |
1563 | +{ |
1564 | + asm volatile ("pushfl; popfl"); |
1565 | + |
1566 | + /* padlock_xcrypt requires at least two blocks of data. */ |
1567 | + if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & |
1568 | + (PAGE_SIZE - 1)))) { |
1569 | + aes_crypt_copy(in, out, key, cword); |
1570 | + return; |
1571 | + } |
1572 | + |
1573 | + padlock_xcrypt(in, out, key, cword); |
1574 | +} |
1575 | + |
1576 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
1577 | void *control_word, u32 count) |
1578 | { |
1579 | + if (count == 1) { |
1580 | + aes_crypt(input, output, key, control_word); |
1581 | + return; |
1582 | + } |
1583 | + |
1584 | asm volatile ("pushfl; popfl"); /* enforce key reload. */ |
1585 | - asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
1586 | + asm volatile ("test $1, %%cl;" |
1587 | + "je 1f;" |
1588 | + "lea -1(%%ecx), %%eax;" |
1589 | + "mov $1, %%ecx;" |
1590 | + ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ |
1591 | + "mov %%eax, %%ecx;" |
1592 | + "1:" |
1593 | + ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ |
1594 | : "+S"(input), "+D"(output) |
1595 | - : "d"(control_word), "b"(key), "c"(count)); |
1596 | + : "d"(control_word), "b"(key), "c"(count) |
1597 | + : "ax"); |
1598 | } |
1599 | |
1600 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
1601 | @@ -443,13 +488,13 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
1602 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1603 | { |
1604 | struct aes_ctx *ctx = aes_ctx(tfm); |
1605 | - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); |
1606 | + aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); |
1607 | } |
1608 | |
1609 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1610 | { |
1611 | struct aes_ctx *ctx = aes_ctx(tfm); |
1612 | - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); |
1613 | + aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); |
1614 | } |
1615 | |
1616 | static struct crypto_alg aes_alg = { |
1617 | diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c |
1618 | index 59c3b5a..ed0d030 100644 |
1619 | --- a/drivers/firmware/dmi-id.c |
1620 | +++ b/drivers/firmware/dmi-id.c |
1621 | @@ -159,8 +159,6 @@ static struct device *dmi_dev; |
1622 | if (dmi_get_system_info(_field)) \ |
1623 | sys_dmi_attributes[i++] = & sys_dmi_##_name##_attr.attr; |
1624 | |
1625 | -extern int dmi_available; |
1626 | - |
1627 | static int __init dmi_id_init(void) |
1628 | { |
1629 | int ret, i; |
1630 | diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c |
1631 | index f1c3d6c..27026f7 100644 |
1632 | --- a/drivers/input/evdev.c |
1633 | +++ b/drivers/input/evdev.c |
1634 | @@ -30,6 +30,8 @@ struct evdev { |
1635 | wait_queue_head_t wait; |
1636 | struct evdev_client *grab; |
1637 | struct list_head client_list; |
1638 | + spinlock_t client_lock; /* protects client_list */ |
1639 | + struct mutex mutex; |
1640 | struct device dev; |
1641 | }; |
1642 | |
1643 | @@ -37,39 +39,53 @@ struct evdev_client { |
1644 | struct input_event buffer[EVDEV_BUFFER_SIZE]; |
1645 | int head; |
1646 | int tail; |
1647 | + spinlock_t buffer_lock; /* protects access to buffer, head and tail */ |
1648 | struct fasync_struct *fasync; |
1649 | struct evdev *evdev; |
1650 | struct list_head node; |
1651 | }; |
1652 | |
1653 | static struct evdev *evdev_table[EVDEV_MINORS]; |
1654 | +static DEFINE_MUTEX(evdev_table_mutex); |
1655 | |
1656 | -static void evdev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) |
1657 | +static void evdev_pass_event(struct evdev_client *client, |
1658 | + struct input_event *event) |
1659 | +{ |
1660 | + /* |
1661 | + * Interrupts are disabled, just acquire the lock |
1662 | + */ |
1663 | + spin_lock(&client->buffer_lock); |
1664 | + client->buffer[client->head++] = *event; |
1665 | + client->head &= EVDEV_BUFFER_SIZE - 1; |
1666 | + spin_unlock(&client->buffer_lock); |
1667 | + |
1668 | + kill_fasync(&client->fasync, SIGIO, POLL_IN); |
1669 | +} |
1670 | + |
1671 | +/* |
1672 | + * Pass incoming event to all connected clients. Note that we are |
1673 | + * caleld under a spinlock with interrupts off so we don't need |
1674 | + * to use rcu_read_lock() here. Writers will be using syncronize_sched() |
1675 | + * instead of synchrnoize_rcu(). |
1676 | + */ |
1677 | +static void evdev_event(struct input_handle *handle, |
1678 | + unsigned int type, unsigned int code, int value) |
1679 | { |
1680 | struct evdev *evdev = handle->private; |
1681 | struct evdev_client *client; |
1682 | + struct input_event event; |
1683 | |
1684 | - if (evdev->grab) { |
1685 | - client = evdev->grab; |
1686 | - |
1687 | - do_gettimeofday(&client->buffer[client->head].time); |
1688 | - client->buffer[client->head].type = type; |
1689 | - client->buffer[client->head].code = code; |
1690 | - client->buffer[client->head].value = value; |
1691 | - client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1); |
1692 | - |
1693 | - kill_fasync(&client->fasync, SIGIO, POLL_IN); |
1694 | - } else |
1695 | - list_for_each_entry(client, &evdev->client_list, node) { |
1696 | - |
1697 | - do_gettimeofday(&client->buffer[client->head].time); |
1698 | - client->buffer[client->head].type = type; |
1699 | - client->buffer[client->head].code = code; |
1700 | - client->buffer[client->head].value = value; |
1701 | - client->head = (client->head + 1) & (EVDEV_BUFFER_SIZE - 1); |
1702 | + do_gettimeofday(&event.time); |
1703 | + event.type = type; |
1704 | + event.code = code; |
1705 | + event.value = value; |
1706 | |
1707 | - kill_fasync(&client->fasync, SIGIO, POLL_IN); |
1708 | - } |
1709 | + client = rcu_dereference(evdev->grab); |
1710 | + if (client) |
1711 | + evdev_pass_event(client, &event); |
1712 | + else |
1713 | + list_for_each_entry_rcu(client, &evdev->client_list, node) |
1714 | + evdev_pass_event(client, &event); |
1715 | |
1716 | wake_up_interruptible(&evdev->wait); |
1717 | } |
1718 | @@ -88,38 +104,145 @@ static int evdev_flush(struct file *file, fl_owner_t id) |
1719 | { |
1720 | struct evdev_client *client = file->private_data; |
1721 | struct evdev *evdev = client->evdev; |
1722 | + int retval; |
1723 | + |
1724 | + retval = mutex_lock_interruptible(&evdev->mutex); |
1725 | + if (retval) |
1726 | + return retval; |
1727 | |
1728 | if (!evdev->exist) |
1729 | - return -ENODEV; |
1730 | + retval = -ENODEV; |
1731 | + else |
1732 | + retval = input_flush_device(&evdev->handle, file); |
1733 | |
1734 | - return input_flush_device(&evdev->handle, file); |
1735 | + mutex_unlock(&evdev->mutex); |
1736 | + return retval; |
1737 | } |
1738 | |
1739 | static void evdev_free(struct device *dev) |
1740 | { |
1741 | struct evdev *evdev = container_of(dev, struct evdev, dev); |
1742 | |
1743 | - evdev_table[evdev->minor] = NULL; |
1744 | kfree(evdev); |
1745 | } |
1746 | |
1747 | +/* |
1748 | + * Grabs an event device (along with underlying input device). |
1749 | + * This function is called with evdev->mutex taken. |
1750 | + */ |
1751 | +static int evdev_grab(struct evdev *evdev, struct evdev_client *client) |
1752 | +{ |
1753 | + int error; |
1754 | + |
1755 | + if (evdev->grab) |
1756 | + return -EBUSY; |
1757 | + |
1758 | + error = input_grab_device(&evdev->handle); |
1759 | + if (error) |
1760 | + return error; |
1761 | + |
1762 | + rcu_assign_pointer(evdev->grab, client); |
1763 | + /* |
1764 | + * We don't use synchronize_rcu() here because read-side |
1765 | + * critical section is protected by a spinlock instead |
1766 | + * of rcu_read_lock(). |
1767 | + */ |
1768 | + synchronize_sched(); |
1769 | + |
1770 | + return 0; |
1771 | +} |
1772 | + |
1773 | +static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client) |
1774 | +{ |
1775 | + if (evdev->grab != client) |
1776 | + return -EINVAL; |
1777 | + |
1778 | + rcu_assign_pointer(evdev->grab, NULL); |
1779 | + synchronize_sched(); |
1780 | + input_release_device(&evdev->handle); |
1781 | + |
1782 | + return 0; |
1783 | +} |
1784 | + |
1785 | +static void evdev_attach_client(struct evdev *evdev, |
1786 | + struct evdev_client *client) |
1787 | +{ |
1788 | + spin_lock(&evdev->client_lock); |
1789 | + list_add_tail_rcu(&client->node, &evdev->client_list); |
1790 | + spin_unlock(&evdev->client_lock); |
1791 | + synchronize_sched(); |
1792 | +} |
1793 | + |
1794 | +static void evdev_detach_client(struct evdev *evdev, |
1795 | + struct evdev_client *client) |
1796 | +{ |
1797 | + spin_lock(&evdev->client_lock); |
1798 | + list_del_rcu(&client->node); |
1799 | + spin_unlock(&evdev->client_lock); |
1800 | + synchronize_sched(); |
1801 | +} |
1802 | + |
1803 | +static int evdev_open_device(struct evdev *evdev) |
1804 | +{ |
1805 | + int retval; |
1806 | + |
1807 | + retval = mutex_lock_interruptible(&evdev->mutex); |
1808 | + if (retval) |
1809 | + return retval; |
1810 | + |
1811 | + if (!evdev->exist) |
1812 | + retval = -ENODEV; |
1813 | + else if (!evdev->open++) { |
1814 | + retval = input_open_device(&evdev->handle); |
1815 | + if (retval) |
1816 | + evdev->open--; |
1817 | + } |
1818 | + |
1819 | + mutex_unlock(&evdev->mutex); |
1820 | + return retval; |
1821 | +} |
1822 | + |
1823 | +static void evdev_close_device(struct evdev *evdev) |
1824 | +{ |
1825 | + mutex_lock(&evdev->mutex); |
1826 | + |
1827 | + if (evdev->exist && !--evdev->open) |
1828 | + input_close_device(&evdev->handle); |
1829 | + |
1830 | + mutex_unlock(&evdev->mutex); |
1831 | +} |
1832 | + |
1833 | +/* |
1834 | + * Wake up users waiting for IO so they can disconnect from |
1835 | + * dead device. |
1836 | + */ |
1837 | +static void evdev_hangup(struct evdev *evdev) |
1838 | +{ |
1839 | + struct evdev_client *client; |
1840 | + |
1841 | + spin_lock(&evdev->client_lock); |
1842 | + list_for_each_entry(client, &evdev->client_list, node) |
1843 | + kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
1844 | + spin_unlock(&evdev->client_lock); |
1845 | + |
1846 | + wake_up_interruptible(&evdev->wait); |
1847 | +} |
1848 | + |
1849 | static int evdev_release(struct inode *inode, struct file *file) |
1850 | { |
1851 | struct evdev_client *client = file->private_data; |
1852 | struct evdev *evdev = client->evdev; |
1853 | |
1854 | - if (evdev->grab == client) { |
1855 | - input_release_device(&evdev->handle); |
1856 | - evdev->grab = NULL; |
1857 | - } |
1858 | + mutex_lock(&evdev->mutex); |
1859 | + if (evdev->grab == client) |
1860 | + evdev_ungrab(evdev, client); |
1861 | + mutex_unlock(&evdev->mutex); |
1862 | |
1863 | evdev_fasync(-1, file, 0); |
1864 | - list_del(&client->node); |
1865 | + evdev_detach_client(evdev, client); |
1866 | kfree(client); |
1867 | |
1868 | - if (!--evdev->open && evdev->exist) |
1869 | - input_close_device(&evdev->handle); |
1870 | - |
1871 | + evdev_close_device(evdev); |
1872 | put_device(&evdev->dev); |
1873 | |
1874 | return 0; |
1875 | @@ -127,41 +250,44 @@ static int evdev_release(struct inode *inode, struct file *file) |
1876 | |
1877 | static int evdev_open(struct inode *inode, struct file *file) |
1878 | { |
1879 | - struct evdev_client *client; |
1880 | struct evdev *evdev; |
1881 | + struct evdev_client *client; |
1882 | int i = iminor(inode) - EVDEV_MINOR_BASE; |
1883 | int error; |
1884 | |
1885 | if (i >= EVDEV_MINORS) |
1886 | return -ENODEV; |
1887 | |
1888 | + error = mutex_lock_interruptible(&evdev_table_mutex); |
1889 | + if (error) |
1890 | + return error; |
1891 | evdev = evdev_table[i]; |
1892 | + if (evdev) |
1893 | + get_device(&evdev->dev); |
1894 | + mutex_unlock(&evdev_table_mutex); |
1895 | |
1896 | - if (!evdev || !evdev->exist) |
1897 | + if (!evdev) |
1898 | return -ENODEV; |
1899 | |
1900 | - get_device(&evdev->dev); |
1901 | - |
1902 | client = kzalloc(sizeof(struct evdev_client), GFP_KERNEL); |
1903 | if (!client) { |
1904 | error = -ENOMEM; |
1905 | goto err_put_evdev; |
1906 | } |
1907 | |
1908 | + spin_lock_init(&client->buffer_lock); |
1909 | client->evdev = evdev; |
1910 | - list_add_tail(&client->node, &evdev->client_list); |
1911 | + evdev_attach_client(evdev, client); |
1912 | |
1913 | - if (!evdev->open++ && evdev->exist) { |
1914 | - error = input_open_device(&evdev->handle); |
1915 | - if (error) |
1916 | - goto err_free_client; |
1917 | - } |
1918 | + error = evdev_open_device(evdev); |
1919 | + if (error) |
1920 | + goto err_free_client; |
1921 | |
1922 | file->private_data = client; |
1923 | return 0; |
1924 | |
1925 | err_free_client: |
1926 | - list_del(&client->node); |
1927 | + evdev_detach_client(evdev, client); |
1928 | kfree(client); |
1929 | err_put_evdev: |
1930 | put_device(&evdev->dev); |
1931 | @@ -197,12 +323,14 @@ static inline size_t evdev_event_size(void) |
1932 | sizeof(struct input_event_compat) : sizeof(struct input_event); |
1933 | } |
1934 | |
1935 | -static int evdev_event_from_user(const char __user *buffer, struct input_event *event) |
1936 | +static int evdev_event_from_user(const char __user *buffer, |
1937 | + struct input_event *event) |
1938 | { |
1939 | if (COMPAT_TEST) { |
1940 | struct input_event_compat compat_event; |
1941 | |
1942 | - if (copy_from_user(&compat_event, buffer, sizeof(struct input_event_compat))) |
1943 | + if (copy_from_user(&compat_event, buffer, |
1944 | + sizeof(struct input_event_compat))) |
1945 | return -EFAULT; |
1946 | |
1947 | event->time.tv_sec = compat_event.time.tv_sec; |
1948 | @@ -219,7 +347,8 @@ static int evdev_event_from_user(const char __user *buffer, struct input_event * |
1949 | return 0; |
1950 | } |
1951 | |
1952 | -static int evdev_event_to_user(char __user *buffer, const struct input_event *event) |
1953 | +static int evdev_event_to_user(char __user *buffer, |
1954 | + const struct input_event *event) |
1955 | { |
1956 | if (COMPAT_TEST) { |
1957 | struct input_event_compat compat_event; |
1958 | @@ -230,7 +359,8 @@ static int evdev_event_to_user(char __user *buffer, const struct input_event *ev |
1959 | compat_event.code = event->code; |
1960 | compat_event.value = event->value; |
1961 | |
1962 | - if (copy_to_user(buffer, &compat_event, sizeof(struct input_event_compat))) |
1963 | + if (copy_to_user(buffer, &compat_event, |
1964 | + sizeof(struct input_event_compat))) |
1965 | return -EFAULT; |
1966 | |
1967 | } else { |
1968 | @@ -248,7 +378,8 @@ static inline size_t evdev_event_size(void) |
1969 | return sizeof(struct input_event); |
1970 | } |
1971 | |
1972 | -static int evdev_event_from_user(const char __user *buffer, struct input_event *event) |
1973 | +static int evdev_event_from_user(const char __user *buffer, |
1974 | + struct input_event *event) |
1975 | { |
1976 | if (copy_from_user(event, buffer, sizeof(struct input_event))) |
1977 | return -EFAULT; |
1978 | @@ -256,7 +387,8 @@ static int evdev_event_from_user(const char __user *buffer, struct input_event * |
1979 | return 0; |
1980 | } |
1981 | |
1982 | -static int evdev_event_to_user(char __user *buffer, const struct input_event *event) |
1983 | +static int evdev_event_to_user(char __user *buffer, |
1984 | + const struct input_event *event) |
1985 | { |
1986 | if (copy_to_user(buffer, event, sizeof(struct input_event))) |
1987 | return -EFAULT; |
1988 | @@ -266,37 +398,71 @@ static int evdev_event_to_user(char __user *buffer, const struct input_event *ev |
1989 | |
1990 | #endif /* CONFIG_COMPAT */ |
1991 | |
1992 | -static ssize_t evdev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) |
1993 | +static ssize_t evdev_write(struct file *file, const char __user *buffer, |
1994 | + size_t count, loff_t *ppos) |
1995 | { |
1996 | struct evdev_client *client = file->private_data; |
1997 | struct evdev *evdev = client->evdev; |
1998 | struct input_event event; |
1999 | - int retval = 0; |
2000 | + int retval; |
2001 | |
2002 | - if (!evdev->exist) |
2003 | - return -ENODEV; |
2004 | + retval = mutex_lock_interruptible(&evdev->mutex); |
2005 | + if (retval) |
2006 | + return retval; |
2007 | + |
2008 | + if (!evdev->exist) { |
2009 | + retval = -ENODEV; |
2010 | + goto out; |
2011 | + } |
2012 | |
2013 | while (retval < count) { |
2014 | |
2015 | - if (evdev_event_from_user(buffer + retval, &event)) |
2016 | - return -EFAULT; |
2017 | - input_inject_event(&evdev->handle, event.type, event.code, event.value); |
2018 | + if (evdev_event_from_user(buffer + retval, &event)) { |
2019 | + retval = -EFAULT; |
2020 | + goto out; |
2021 | + } |
2022 | + |
2023 | + input_inject_event(&evdev->handle, |
2024 | + event.type, event.code, event.value); |
2025 | retval += evdev_event_size(); |
2026 | } |
2027 | |
2028 | + out: |
2029 | + mutex_unlock(&evdev->mutex); |
2030 | return retval; |
2031 | } |
2032 | |
2033 | -static ssize_t evdev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) |
2034 | +static int evdev_fetch_next_event(struct evdev_client *client, |
2035 | + struct input_event *event) |
2036 | +{ |
2037 | + int have_event; |
2038 | + |
2039 | + spin_lock_irq(&client->buffer_lock); |
2040 | + |
2041 | + have_event = client->head != client->tail; |
2042 | + if (have_event) { |
2043 | + *event = client->buffer[client->tail++]; |
2044 | + client->tail &= EVDEV_BUFFER_SIZE - 1; |
2045 | + } |
2046 | + |
2047 | + spin_unlock_irq(&client->buffer_lock); |
2048 | + |
2049 | + return have_event; |
2050 | +} |
2051 | + |
2052 | +static ssize_t evdev_read(struct file *file, char __user *buffer, |
2053 | + size_t count, loff_t *ppos) |
2054 | { |
2055 | struct evdev_client *client = file->private_data; |
2056 | struct evdev *evdev = client->evdev; |
2057 | + struct input_event event; |
2058 | int retval; |
2059 | |
2060 | if (count < evdev_event_size()) |
2061 | return -EINVAL; |
2062 | |
2063 | - if (client->head == client->tail && evdev->exist && (file->f_flags & O_NONBLOCK)) |
2064 | + if (client->head == client->tail && evdev->exist && |
2065 | + (file->f_flags & O_NONBLOCK)) |
2066 | return -EAGAIN; |
2067 | |
2068 | retval = wait_event_interruptible(evdev->wait, |
2069 | @@ -307,14 +473,12 @@ static ssize_t evdev_read(struct file *file, char __user *buffer, size_t count, |
2070 | if (!evdev->exist) |
2071 | return -ENODEV; |
2072 | |
2073 | - while (client->head != client->tail && retval + evdev_event_size() <= count) { |
2074 | + while (retval + evdev_event_size() <= count && |
2075 | + evdev_fetch_next_event(client, &event)) { |
2076 | |
2077 | - struct input_event *event = (struct input_event *) client->buffer + client->tail; |
2078 | - |
2079 | - if (evdev_event_to_user(buffer + retval, event)) |
2080 | + if (evdev_event_to_user(buffer + retval, &event)) |
2081 | return -EFAULT; |
2082 | |
2083 | - client->tail = (client->tail + 1) & (EVDEV_BUFFER_SIZE - 1); |
2084 | retval += evdev_event_size(); |
2085 | } |
2086 | |
2087 | @@ -409,8 +573,8 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p) |
2088 | return copy_to_user(p, str, len) ? -EFAULT : len; |
2089 | } |
2090 | |
2091 | -static long evdev_ioctl_handler(struct file *file, unsigned int cmd, |
2092 | - void __user *p, int compat_mode) |
2093 | +static long evdev_do_ioctl(struct file *file, unsigned int cmd, |
2094 | + void __user *p, int compat_mode) |
2095 | { |
2096 | struct evdev_client *client = file->private_data; |
2097 | struct evdev *evdev = client->evdev; |
2098 | @@ -421,215 +585,289 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd, |
2099 | int i, t, u, v; |
2100 | int error; |
2101 | |
2102 | - if (!evdev->exist) |
2103 | - return -ENODEV; |
2104 | - |
2105 | switch (cmd) { |
2106 | |
2107 | - case EVIOCGVERSION: |
2108 | - return put_user(EV_VERSION, ip); |
2109 | + case EVIOCGVERSION: |
2110 | + return put_user(EV_VERSION, ip); |
2111 | |
2112 | - case EVIOCGID: |
2113 | - if (copy_to_user(p, &dev->id, sizeof(struct input_id))) |
2114 | - return -EFAULT; |
2115 | - return 0; |
2116 | + case EVIOCGID: |
2117 | + if (copy_to_user(p, &dev->id, sizeof(struct input_id))) |
2118 | + return -EFAULT; |
2119 | + return 0; |
2120 | |
2121 | - case EVIOCGREP: |
2122 | - if (!test_bit(EV_REP, dev->evbit)) |
2123 | - return -ENOSYS; |
2124 | - if (put_user(dev->rep[REP_DELAY], ip)) |
2125 | - return -EFAULT; |
2126 | - if (put_user(dev->rep[REP_PERIOD], ip + 1)) |
2127 | - return -EFAULT; |
2128 | - return 0; |
2129 | + case EVIOCGREP: |
2130 | + if (!test_bit(EV_REP, dev->evbit)) |
2131 | + return -ENOSYS; |
2132 | + if (put_user(dev->rep[REP_DELAY], ip)) |
2133 | + return -EFAULT; |
2134 | + if (put_user(dev->rep[REP_PERIOD], ip + 1)) |
2135 | + return -EFAULT; |
2136 | + return 0; |
2137 | |
2138 | - case EVIOCSREP: |
2139 | - if (!test_bit(EV_REP, dev->evbit)) |
2140 | - return -ENOSYS; |
2141 | - if (get_user(u, ip)) |
2142 | - return -EFAULT; |
2143 | - if (get_user(v, ip + 1)) |
2144 | - return -EFAULT; |
2145 | + case EVIOCSREP: |
2146 | + if (!test_bit(EV_REP, dev->evbit)) |
2147 | + return -ENOSYS; |
2148 | + if (get_user(u, ip)) |
2149 | + return -EFAULT; |
2150 | + if (get_user(v, ip + 1)) |
2151 | + return -EFAULT; |
2152 | |
2153 | - input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); |
2154 | - input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); |
2155 | + input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); |
2156 | + input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); |
2157 | |
2158 | - return 0; |
2159 | + return 0; |
2160 | |
2161 | - case EVIOCGKEYCODE: |
2162 | - if (get_user(t, ip)) |
2163 | - return -EFAULT; |
2164 | + case EVIOCGKEYCODE: |
2165 | + if (get_user(t, ip)) |
2166 | + return -EFAULT; |
2167 | |
2168 | - error = dev->getkeycode(dev, t, &v); |
2169 | - if (error) |
2170 | - return error; |
2171 | + error = dev->getkeycode(dev, t, &v); |
2172 | + if (error) |
2173 | + return error; |
2174 | |
2175 | - if (put_user(v, ip + 1)) |
2176 | - return -EFAULT; |
2177 | + if (put_user(v, ip + 1)) |
2178 | + return -EFAULT; |
2179 | |
2180 | - return 0; |
2181 | + return 0; |
2182 | |
2183 | - case EVIOCSKEYCODE: |
2184 | - if (get_user(t, ip) || get_user(v, ip + 1)) |
2185 | - return -EFAULT; |
2186 | + case EVIOCSKEYCODE: |
2187 | + if (get_user(t, ip) || get_user(v, ip + 1)) |
2188 | + return -EFAULT; |
2189 | |
2190 | - return dev->setkeycode(dev, t, v); |
2191 | + return dev->setkeycode(dev, t, v); |
2192 | |
2193 | - case EVIOCSFF: |
2194 | - if (copy_from_user(&effect, p, sizeof(effect))) |
2195 | - return -EFAULT; |
2196 | + case EVIOCSFF: |
2197 | + if (copy_from_user(&effect, p, sizeof(effect))) |
2198 | + return -EFAULT; |
2199 | |
2200 | - error = input_ff_upload(dev, &effect, file); |
2201 | + error = input_ff_upload(dev, &effect, file); |
2202 | |
2203 | - if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) |
2204 | - return -EFAULT; |
2205 | + if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) |
2206 | + return -EFAULT; |
2207 | |
2208 | - return error; |
2209 | + return error; |
2210 | |
2211 | - case EVIOCRMFF: |
2212 | - return input_ff_erase(dev, (int)(unsigned long) p, file); |
2213 | + case EVIOCRMFF: |
2214 | + return input_ff_erase(dev, (int)(unsigned long) p, file); |
2215 | |
2216 | - case EVIOCGEFFECTS: |
2217 | - i = test_bit(EV_FF, dev->evbit) ? dev->ff->max_effects : 0; |
2218 | - if (put_user(i, ip)) |
2219 | - return -EFAULT; |
2220 | - return 0; |
2221 | - |
2222 | - case EVIOCGRAB: |
2223 | - if (p) { |
2224 | - if (evdev->grab) |
2225 | - return -EBUSY; |
2226 | - if (input_grab_device(&evdev->handle)) |
2227 | - return -EBUSY; |
2228 | - evdev->grab = client; |
2229 | - return 0; |
2230 | - } else { |
2231 | - if (evdev->grab != client) |
2232 | - return -EINVAL; |
2233 | - input_release_device(&evdev->handle); |
2234 | - evdev->grab = NULL; |
2235 | - return 0; |
2236 | - } |
2237 | + case EVIOCGEFFECTS: |
2238 | + i = test_bit(EV_FF, dev->evbit) ? |
2239 | + dev->ff->max_effects : 0; |
2240 | + if (put_user(i, ip)) |
2241 | + return -EFAULT; |
2242 | + return 0; |
2243 | |
2244 | - default: |
2245 | + case EVIOCGRAB: |
2246 | + if (p) |
2247 | + return evdev_grab(evdev, client); |
2248 | + else |
2249 | + return evdev_ungrab(evdev, client); |
2250 | |
2251 | - if (_IOC_TYPE(cmd) != 'E') |
2252 | - return -EINVAL; |
2253 | + default: |
2254 | |
2255 | - if (_IOC_DIR(cmd) == _IOC_READ) { |
2256 | + if (_IOC_TYPE(cmd) != 'E') |
2257 | + return -EINVAL; |
2258 | |
2259 | - if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0,0))) { |
2260 | + if (_IOC_DIR(cmd) == _IOC_READ) { |
2261 | |
2262 | - unsigned long *bits; |
2263 | - int len; |
2264 | + if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) { |
2265 | |
2266 | - switch (_IOC_NR(cmd) & EV_MAX) { |
2267 | - case 0: bits = dev->evbit; len = EV_MAX; break; |
2268 | - case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; |
2269 | - case EV_REL: bits = dev->relbit; len = REL_MAX; break; |
2270 | - case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; |
2271 | - case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; |
2272 | - case EV_LED: bits = dev->ledbit; len = LED_MAX; break; |
2273 | - case EV_SND: bits = dev->sndbit; len = SND_MAX; break; |
2274 | - case EV_FF: bits = dev->ffbit; len = FF_MAX; break; |
2275 | - case EV_SW: bits = dev->swbit; len = SW_MAX; break; |
2276 | - default: return -EINVAL; |
2277 | - } |
2278 | - return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); |
2279 | - } |
2280 | + unsigned long *bits; |
2281 | + int len; |
2282 | |
2283 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) |
2284 | - return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), |
2285 | - p, compat_mode); |
2286 | + switch (_IOC_NR(cmd) & EV_MAX) { |
2287 | |
2288 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) |
2289 | - return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd), |
2290 | - p, compat_mode); |
2291 | + case 0: bits = dev->evbit; len = EV_MAX; break; |
2292 | + case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; |
2293 | + case EV_REL: bits = dev->relbit; len = REL_MAX; break; |
2294 | + case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; |
2295 | + case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; |
2296 | + case EV_LED: bits = dev->ledbit; len = LED_MAX; break; |
2297 | + case EV_SND: bits = dev->sndbit; len = SND_MAX; break; |
2298 | + case EV_FF: bits = dev->ffbit; len = FF_MAX; break; |
2299 | + case EV_SW: bits = dev->swbit; len = SW_MAX; break; |
2300 | + default: return -EINVAL; |
2301 | + } |
2302 | + return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); |
2303 | + } |
2304 | |
2305 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) |
2306 | - return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd), |
2307 | - p, compat_mode); |
2308 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) |
2309 | + return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), |
2310 | + p, compat_mode); |
2311 | |
2312 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0))) |
2313 | - return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd), |
2314 | - p, compat_mode); |
2315 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGLED(0))) |
2316 | + return bits_to_user(dev->led, LED_MAX, _IOC_SIZE(cmd), |
2317 | + p, compat_mode); |
2318 | |
2319 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) |
2320 | - return str_to_user(dev->name, _IOC_SIZE(cmd), p); |
2321 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSND(0))) |
2322 | + return bits_to_user(dev->snd, SND_MAX, _IOC_SIZE(cmd), |
2323 | + p, compat_mode); |
2324 | |
2325 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) |
2326 | - return str_to_user(dev->phys, _IOC_SIZE(cmd), p); |
2327 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0))) |
2328 | + return bits_to_user(dev->sw, SW_MAX, _IOC_SIZE(cmd), |
2329 | + p, compat_mode); |
2330 | |
2331 | - if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) |
2332 | - return str_to_user(dev->uniq, _IOC_SIZE(cmd), p); |
2333 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) |
2334 | + return str_to_user(dev->name, _IOC_SIZE(cmd), p); |
2335 | |
2336 | - if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { |
2337 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGPHYS(0))) |
2338 | + return str_to_user(dev->phys, _IOC_SIZE(cmd), p); |
2339 | |
2340 | - t = _IOC_NR(cmd) & ABS_MAX; |
2341 | + if (_IOC_NR(cmd) == _IOC_NR(EVIOCGUNIQ(0))) |
2342 | + return str_to_user(dev->uniq, _IOC_SIZE(cmd), p); |
2343 | |
2344 | - abs.value = dev->abs[t]; |
2345 | - abs.minimum = dev->absmin[t]; |
2346 | - abs.maximum = dev->absmax[t]; |
2347 | - abs.fuzz = dev->absfuzz[t]; |
2348 | - abs.flat = dev->absflat[t]; |
2349 | + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { |
2350 | |
2351 | - if (copy_to_user(p, &abs, sizeof(struct input_absinfo))) |
2352 | - return -EFAULT; |
2353 | + t = _IOC_NR(cmd) & ABS_MAX; |
2354 | |
2355 | - return 0; |
2356 | - } |
2357 | + abs.value = dev->abs[t]; |
2358 | + abs.minimum = dev->absmin[t]; |
2359 | + abs.maximum = dev->absmax[t]; |
2360 | + abs.fuzz = dev->absfuzz[t]; |
2361 | + abs.flat = dev->absflat[t]; |
2362 | |
2363 | + if (copy_to_user(p, &abs, sizeof(struct input_absinfo))) |
2364 | + return -EFAULT; |
2365 | + |
2366 | + return 0; |
2367 | } |
2368 | |
2369 | - if (_IOC_DIR(cmd) == _IOC_WRITE) { |
2370 | + } |
2371 | |
2372 | - if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { |
2373 | + if (_IOC_DIR(cmd) == _IOC_WRITE) { |
2374 | |
2375 | - t = _IOC_NR(cmd) & ABS_MAX; |
2376 | + if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { |
2377 | |
2378 | - if (copy_from_user(&abs, p, sizeof(struct input_absinfo))) |
2379 | - return -EFAULT; |
2380 | + t = _IOC_NR(cmd) & ABS_MAX; |
2381 | |
2382 | - dev->abs[t] = abs.value; |
2383 | - dev->absmin[t] = abs.minimum; |
2384 | - dev->absmax[t] = abs.maximum; |
2385 | - dev->absfuzz[t] = abs.fuzz; |
2386 | - dev->absflat[t] = abs.flat; |
2387 | + if (copy_from_user(&abs, p, |
2388 | + sizeof(struct input_absinfo))) |
2389 | + return -EFAULT; |
2390 | |
2391 | - return 0; |
2392 | - } |
2393 | + /* |
2394 | + * Take event lock to ensure that we are not |
2395 | + * changing device parameters in the middle |
2396 | + * of event. |
2397 | + */ |
2398 | + spin_lock_irq(&dev->event_lock); |
2399 | + |
2400 | + dev->abs[t] = abs.value; |
2401 | + dev->absmin[t] = abs.minimum; |
2402 | + dev->absmax[t] = abs.maximum; |
2403 | + dev->absfuzz[t] = abs.fuzz; |
2404 | + dev->absflat[t] = abs.flat; |
2405 | + |
2406 | + spin_unlock_irq(&dev->event_lock); |
2407 | + |
2408 | + return 0; |
2409 | } |
2410 | + } |
2411 | } |
2412 | return -EINVAL; |
2413 | } |
2414 | |
2415 | +static long evdev_ioctl_handler(struct file *file, unsigned int cmd, |
2416 | + void __user *p, int compat_mode) |
2417 | +{ |
2418 | + struct evdev_client *client = file->private_data; |
2419 | + struct evdev *evdev = client->evdev; |
2420 | + int retval; |
2421 | + |
2422 | + retval = mutex_lock_interruptible(&evdev->mutex); |
2423 | + if (retval) |
2424 | + return retval; |
2425 | + |
2426 | + if (!evdev->exist) { |
2427 | + retval = -ENODEV; |
2428 | + goto out; |
2429 | + } |
2430 | + |
2431 | + retval = evdev_do_ioctl(file, cmd, p, compat_mode); |
2432 | + |
2433 | + out: |
2434 | + mutex_unlock(&evdev->mutex); |
2435 | + return retval; |
2436 | +} |
2437 | + |
2438 | static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
2439 | { |
2440 | return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0); |
2441 | } |
2442 | |
2443 | #ifdef CONFIG_COMPAT |
2444 | -static long evdev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) |
2445 | +static long evdev_ioctl_compat(struct file *file, |
2446 | + unsigned int cmd, unsigned long arg) |
2447 | { |
2448 | return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1); |
2449 | } |
2450 | #endif |
2451 | |
2452 | static const struct file_operations evdev_fops = { |
2453 | - .owner = THIS_MODULE, |
2454 | - .read = evdev_read, |
2455 | - .write = evdev_write, |
2456 | - .poll = evdev_poll, |
2457 | - .open = evdev_open, |
2458 | - .release = evdev_release, |
2459 | - .unlocked_ioctl = evdev_ioctl, |
2460 | + .owner = THIS_MODULE, |
2461 | + .read = evdev_read, |
2462 | + .write = evdev_write, |
2463 | + .poll = evdev_poll, |
2464 | + .open = evdev_open, |
2465 | + .release = evdev_release, |
2466 | + .unlocked_ioctl = evdev_ioctl, |
2467 | #ifdef CONFIG_COMPAT |
2468 | - .compat_ioctl = evdev_ioctl_compat, |
2469 | + .compat_ioctl = evdev_ioctl_compat, |
2470 | #endif |
2471 | - .fasync = evdev_fasync, |
2472 | - .flush = evdev_flush |
2473 | + .fasync = evdev_fasync, |
2474 | + .flush = evdev_flush |
2475 | }; |
2476 | |
2477 | +static int evdev_install_chrdev(struct evdev *evdev) |
2478 | +{ |
2479 | + /* |
2480 | + * No need to do any locking here as calls to connect and |
2481 | + * disconnect are serialized by the input core |
2482 | + */ |
2483 | + evdev_table[evdev->minor] = evdev; |
2484 | + return 0; |
2485 | +} |
2486 | + |
2487 | +static void evdev_remove_chrdev(struct evdev *evdev) |
2488 | +{ |
2489 | + /* |
2490 | + * Lock evdev table to prevent race with evdev_open() |
2491 | + */ |
2492 | + mutex_lock(&evdev_table_mutex); |
2493 | + evdev_table[evdev->minor] = NULL; |
2494 | + mutex_unlock(&evdev_table_mutex); |
2495 | +} |
2496 | + |
2497 | +/* |
2498 | + * Mark device non-existent. This disables writes, ioctls and |
2499 | + * prevents new users from opening the device. Already posted |
2500 | + * blocking reads will stay, however new ones will fail. |
2501 | + */ |
2502 | +static void evdev_mark_dead(struct evdev *evdev) |
2503 | +{ |
2504 | + mutex_lock(&evdev->mutex); |
2505 | + evdev->exist = 0; |
2506 | + mutex_unlock(&evdev->mutex); |
2507 | +} |
2508 | + |
2509 | +static void evdev_cleanup(struct evdev *evdev) |
2510 | +{ |
2511 | + struct input_handle *handle = &evdev->handle; |
2512 | + |
2513 | + evdev_mark_dead(evdev); |
2514 | + evdev_hangup(evdev); |
2515 | + evdev_remove_chrdev(evdev); |
2516 | + |
2517 | + /* evdev is marked dead so no one else accesses evdev->open */ |
2518 | + if (evdev->open) { |
2519 | + input_flush_device(handle, NULL); |
2520 | + input_close_device(handle); |
2521 | + } |
2522 | +} |
2523 | + |
2524 | +/* |
2525 | + * Create new evdev device. Note that input core serializes calls |
2526 | + * to connect and disconnect so we don't need to lock evdev_table here. |
2527 | + */ |
2528 | static int evdev_connect(struct input_handler *handler, struct input_dev *dev, |
2529 | const struct input_device_id *id) |
2530 | { |
2531 | @@ -637,7 +875,10 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, |
2532 | int minor; |
2533 | int error; |
2534 | |
2535 | - for (minor = 0; minor < EVDEV_MINORS && evdev_table[minor]; minor++); |
2536 | + for (minor = 0; minor < EVDEV_MINORS; minor++) |
2537 | + if (!evdev_table[minor]) |
2538 | + break; |
2539 | + |
2540 | if (minor == EVDEV_MINORS) { |
2541 | printk(KERN_ERR "evdev: no more free evdev devices\n"); |
2542 | return -ENFILE; |
2543 | @@ -648,38 +889,44 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, |
2544 | return -ENOMEM; |
2545 | |
2546 | INIT_LIST_HEAD(&evdev->client_list); |
2547 | + spin_lock_init(&evdev->client_lock); |
2548 | + mutex_init(&evdev->mutex); |
2549 | init_waitqueue_head(&evdev->wait); |
2550 | |
2551 | + snprintf(evdev->name, sizeof(evdev->name), "event%d", minor); |
2552 | evdev->exist = 1; |
2553 | evdev->minor = minor; |
2554 | + |
2555 | evdev->handle.dev = dev; |
2556 | evdev->handle.name = evdev->name; |
2557 | evdev->handle.handler = handler; |
2558 | evdev->handle.private = evdev; |
2559 | - snprintf(evdev->name, sizeof(evdev->name), "event%d", minor); |
2560 | |
2561 | - snprintf(evdev->dev.bus_id, sizeof(evdev->dev.bus_id), |
2562 | - "event%d", minor); |
2563 | + strlcpy(evdev->dev.bus_id, evdev->name, sizeof(evdev->dev.bus_id)); |
2564 | + evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor); |
2565 | evdev->dev.class = &input_class; |
2566 | evdev->dev.parent = &dev->dev; |
2567 | - evdev->dev.devt = MKDEV(INPUT_MAJOR, EVDEV_MINOR_BASE + minor); |
2568 | evdev->dev.release = evdev_free; |
2569 | device_initialize(&evdev->dev); |
2570 | |
2571 | - evdev_table[minor] = evdev; |
2572 | - |
2573 | - error = device_add(&evdev->dev); |
2574 | + error = input_register_handle(&evdev->handle); |
2575 | if (error) |
2576 | goto err_free_evdev; |
2577 | |
2578 | - error = input_register_handle(&evdev->handle); |
2579 | + error = evdev_install_chrdev(evdev); |
2580 | + if (error) |
2581 | + goto err_unregister_handle; |
2582 | + |
2583 | + error = device_add(&evdev->dev); |
2584 | if (error) |
2585 | - goto err_delete_evdev; |
2586 | + goto err_cleanup_evdev; |
2587 | |
2588 | return 0; |
2589 | |
2590 | - err_delete_evdev: |
2591 | - device_del(&evdev->dev); |
2592 | + err_cleanup_evdev: |
2593 | + evdev_cleanup(evdev); |
2594 | + err_unregister_handle: |
2595 | + input_unregister_handle(&evdev->handle); |
2596 | err_free_evdev: |
2597 | put_device(&evdev->dev); |
2598 | return error; |
2599 | @@ -688,21 +935,10 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev, |
2600 | static void evdev_disconnect(struct input_handle *handle) |
2601 | { |
2602 | struct evdev *evdev = handle->private; |
2603 | - struct evdev_client *client; |
2604 | |
2605 | - input_unregister_handle(handle); |
2606 | device_del(&evdev->dev); |
2607 | - |
2608 | - evdev->exist = 0; |
2609 | - |
2610 | - if (evdev->open) { |
2611 | - input_flush_device(handle, NULL); |
2612 | - input_close_device(handle); |
2613 | - list_for_each_entry(client, &evdev->client_list, node) |
2614 | - kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
2615 | - wake_up_interruptible(&evdev->wait); |
2616 | - } |
2617 | - |
2618 | + evdev_cleanup(evdev); |
2619 | + input_unregister_handle(handle); |
2620 | put_device(&evdev->dev); |
2621 | } |
2622 | |
2623 | @@ -714,13 +950,13 @@ static const struct input_device_id evdev_ids[] = { |
2624 | MODULE_DEVICE_TABLE(input, evdev_ids); |
2625 | |
2626 | static struct input_handler evdev_handler = { |
2627 | - .event = evdev_event, |
2628 | - .connect = evdev_connect, |
2629 | - .disconnect = evdev_disconnect, |
2630 | - .fops = &evdev_fops, |
2631 | - .minor = EVDEV_MINOR_BASE, |
2632 | - .name = "evdev", |
2633 | - .id_table = evdev_ids, |
2634 | + .event = evdev_event, |
2635 | + .connect = evdev_connect, |
2636 | + .disconnect = evdev_disconnect, |
2637 | + .fops = &evdev_fops, |
2638 | + .minor = EVDEV_MINOR_BASE, |
2639 | + .name = "evdev", |
2640 | + .id_table = evdev_ids, |
2641 | }; |
2642 | |
2643 | static int __init evdev_init(void) |
2644 | diff --git a/drivers/input/input.c b/drivers/input/input.c |
2645 | index 5fe7555..c59544f 100644 |
2646 | --- a/drivers/input/input.c |
2647 | +++ b/drivers/input/input.c |
2648 | @@ -17,10 +17,10 @@ |
2649 | #include <linux/major.h> |
2650 | #include <linux/proc_fs.h> |
2651 | #include <linux/seq_file.h> |
2652 | -#include <linux/interrupt.h> |
2653 | #include <linux/poll.h> |
2654 | #include <linux/device.h> |
2655 | #include <linux/mutex.h> |
2656 | +#include <linux/rcupdate.h> |
2657 | |
2658 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); |
2659 | MODULE_DESCRIPTION("Input core"); |
2660 | @@ -31,167 +31,244 @@ MODULE_LICENSE("GPL"); |
2661 | static LIST_HEAD(input_dev_list); |
2662 | static LIST_HEAD(input_handler_list); |
2663 | |
2664 | +/* |
2665 | + * input_mutex protects access to both input_dev_list and input_handler_list. |
2666 | + * This also causes input_[un]register_device and input_[un]register_handler |
2667 | + * be mutually exclusive which simplifies locking in drivers implementing |
2668 | + * input handlers. |
2669 | + */ |
2670 | +static DEFINE_MUTEX(input_mutex); |
2671 | + |
2672 | static struct input_handler *input_table[8]; |
2673 | |
2674 | -/** |
2675 | - * input_event() - report new input event |
2676 | - * @dev: device that generated the event |
2677 | - * @type: type of the event |
2678 | - * @code: event code |
2679 | - * @value: value of the event |
2680 | - * |
2681 | - * This function should be used by drivers implementing various input devices |
2682 | - * See also input_inject_event() |
2683 | - */ |
2684 | -void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) |
2685 | +static inline int is_event_supported(unsigned int code, |
2686 | + unsigned long *bm, unsigned int max) |
2687 | { |
2688 | - struct input_handle *handle; |
2689 | + return code <= max && test_bit(code, bm); |
2690 | +} |
2691 | |
2692 | - if (type > EV_MAX || !test_bit(type, dev->evbit)) |
2693 | - return; |
2694 | +static int input_defuzz_abs_event(int value, int old_val, int fuzz) |
2695 | +{ |
2696 | + if (fuzz) { |
2697 | + if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) |
2698 | + return old_val; |
2699 | |
2700 | - add_input_randomness(type, code, value); |
2701 | + if (value > old_val - fuzz && value < old_val + fuzz) |
2702 | + return (old_val * 3 + value) / 4; |
2703 | |
2704 | - switch (type) { |
2705 | + if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) |
2706 | + return (old_val + value) / 2; |
2707 | + } |
2708 | |
2709 | - case EV_SYN: |
2710 | - switch (code) { |
2711 | - case SYN_CONFIG: |
2712 | - if (dev->event) |
2713 | - dev->event(dev, type, code, value); |
2714 | - break; |
2715 | - |
2716 | - case SYN_REPORT: |
2717 | - if (dev->sync) |
2718 | - return; |
2719 | - dev->sync = 1; |
2720 | - break; |
2721 | - } |
2722 | - break; |
2723 | + return value; |
2724 | +} |
2725 | |
2726 | - case EV_KEY: |
2727 | +/* |
2728 | + * Pass event through all open handles. This function is called with |
2729 | + * dev->event_lock held and interrupts disabled. Because of that we |
2730 | + * do not need to use rcu_read_lock() here although we are using RCU |
2731 | + * to access handle list. Note that because of that write-side uses |
2732 | + * synchronize_sched() instead of synchronize_ru(). |
2733 | + */ |
2734 | +static void input_pass_event(struct input_dev *dev, |
2735 | + unsigned int type, unsigned int code, int value) |
2736 | +{ |
2737 | + struct input_handle *handle = rcu_dereference(dev->grab); |
2738 | |
2739 | - if (code > KEY_MAX || !test_bit(code, dev->keybit) || !!test_bit(code, dev->key) == value) |
2740 | - return; |
2741 | + if (handle) |
2742 | + handle->handler->event(handle, type, code, value); |
2743 | + else |
2744 | + list_for_each_entry_rcu(handle, &dev->h_list, d_node) |
2745 | + if (handle->open) |
2746 | + handle->handler->event(handle, |
2747 | + type, code, value); |
2748 | +} |
2749 | |
2750 | - if (value == 2) |
2751 | - break; |
2752 | +/* |
2753 | + * Generate software autorepeat event. Note that we take |
2754 | + * dev->event_lock here to avoid racing with input_event |
2755 | + * which may cause keys get "stuck". |
2756 | + */ |
2757 | +static void input_repeat_key(unsigned long data) |
2758 | +{ |
2759 | + struct input_dev *dev = (void *) data; |
2760 | + unsigned long flags; |
2761 | |
2762 | - change_bit(code, dev->key); |
2763 | + spin_lock_irqsave(&dev->event_lock, flags); |
2764 | |
2765 | - if (test_bit(EV_REP, dev->evbit) && dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && dev->timer.data && value) { |
2766 | - dev->repeat_key = code; |
2767 | - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); |
2768 | - } |
2769 | + if (test_bit(dev->repeat_key, dev->key) && |
2770 | + is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { |
2771 | |
2772 | - break; |
2773 | + input_pass_event(dev, EV_KEY, dev->repeat_key, 2); |
2774 | |
2775 | - case EV_SW: |
2776 | + if (dev->sync) { |
2777 | + /* |
2778 | + * Only send SYN_REPORT if we are not in a middle |
2779 | + * of driver parsing a new hardware packet. |
2780 | + * Otherwise assume that the driver will send |
2781 | + * SYN_REPORT once it's done. |
2782 | + */ |
2783 | + input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
2784 | + } |
2785 | |
2786 | - if (code > SW_MAX || !test_bit(code, dev->swbit) || !!test_bit(code, dev->sw) == value) |
2787 | - return; |
2788 | + if (dev->rep[REP_PERIOD]) |
2789 | + mod_timer(&dev->timer, jiffies + |
2790 | + msecs_to_jiffies(dev->rep[REP_PERIOD])); |
2791 | + } |
2792 | |
2793 | - change_bit(code, dev->sw); |
2794 | + spin_unlock_irqrestore(&dev->event_lock, flags); |
2795 | +} |
2796 | |
2797 | - break; |
2798 | +static void input_start_autorepeat(struct input_dev *dev, int code) |
2799 | +{ |
2800 | + if (test_bit(EV_REP, dev->evbit) && |
2801 | + dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && |
2802 | + dev->timer.data) { |
2803 | + dev->repeat_key = code; |
2804 | + mod_timer(&dev->timer, |
2805 | + jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); |
2806 | + } |
2807 | +} |
2808 | |
2809 | - case EV_ABS: |
2810 | +#define INPUT_IGNORE_EVENT 0 |
2811 | +#define INPUT_PASS_TO_HANDLERS 1 |
2812 | +#define INPUT_PASS_TO_DEVICE 2 |
2813 | +#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) |
2814 | |
2815 | - if (code > ABS_MAX || !test_bit(code, dev->absbit)) |
2816 | - return; |
2817 | +static void input_handle_event(struct input_dev *dev, |
2818 | + unsigned int type, unsigned int code, int value) |
2819 | +{ |
2820 | + int disposition = INPUT_IGNORE_EVENT; |
2821 | |
2822 | - if (dev->absfuzz[code]) { |
2823 | - if ((value > dev->abs[code] - (dev->absfuzz[code] >> 1)) && |
2824 | - (value < dev->abs[code] + (dev->absfuzz[code] >> 1))) |
2825 | - return; |
2826 | + switch (type) { |
2827 | |
2828 | - if ((value > dev->abs[code] - dev->absfuzz[code]) && |
2829 | - (value < dev->abs[code] + dev->absfuzz[code])) |
2830 | - value = (dev->abs[code] * 3 + value) >> 2; |
2831 | + case EV_SYN: |
2832 | + switch (code) { |
2833 | + case SYN_CONFIG: |
2834 | + disposition = INPUT_PASS_TO_ALL; |
2835 | + break; |
2836 | |
2837 | - if ((value > dev->abs[code] - (dev->absfuzz[code] << 1)) && |
2838 | - (value < dev->abs[code] + (dev->absfuzz[code] << 1))) |
2839 | - value = (dev->abs[code] + value) >> 1; |
2840 | + case SYN_REPORT: |
2841 | + if (!dev->sync) { |
2842 | + dev->sync = 1; |
2843 | + disposition = INPUT_PASS_TO_HANDLERS; |
2844 | } |
2845 | - |
2846 | - if (dev->abs[code] == value) |
2847 | - return; |
2848 | - |
2849 | - dev->abs[code] = value; |
2850 | break; |
2851 | + } |
2852 | + break; |
2853 | |
2854 | - case EV_REL: |
2855 | + case EV_KEY: |
2856 | + if (is_event_supported(code, dev->keybit, KEY_MAX) && |
2857 | + !!test_bit(code, dev->key) != value) { |
2858 | |
2859 | - if (code > REL_MAX || !test_bit(code, dev->relbit) || (value == 0)) |
2860 | - return; |
2861 | + if (value != 2) { |
2862 | + __change_bit(code, dev->key); |
2863 | + if (value) |
2864 | + input_start_autorepeat(dev, code); |
2865 | + } |
2866 | |
2867 | - break; |
2868 | + disposition = INPUT_PASS_TO_HANDLERS; |
2869 | + } |
2870 | + break; |
2871 | |
2872 | - case EV_MSC: |
2873 | + case EV_SW: |
2874 | + if (is_event_supported(code, dev->swbit, SW_MAX) && |
2875 | + !!test_bit(code, dev->sw) != value) { |
2876 | |
2877 | - if (code > MSC_MAX || !test_bit(code, dev->mscbit)) |
2878 | - return; |
2879 | + __change_bit(code, dev->sw); |
2880 | + disposition = INPUT_PASS_TO_HANDLERS; |
2881 | + } |
2882 | + break; |
2883 | |
2884 | - if (dev->event) |
2885 | - dev->event(dev, type, code, value); |
2886 | + case EV_ABS: |
2887 | + if (is_event_supported(code, dev->absbit, ABS_MAX)) { |
2888 | |
2889 | - break; |
2890 | + value = input_defuzz_abs_event(value, |
2891 | + dev->abs[code], dev->absfuzz[code]); |
2892 | |
2893 | - case EV_LED: |
2894 | + if (dev->abs[code] != value) { |
2895 | + dev->abs[code] = value; |
2896 | + disposition = INPUT_PASS_TO_HANDLERS; |
2897 | + } |
2898 | + } |
2899 | + break; |
2900 | |
2901 | - if (code > LED_MAX || !test_bit(code, dev->ledbit) || !!test_bit(code, dev->led) == value) |
2902 | - return; |
2903 | + case EV_REL: |
2904 | + if (is_event_supported(code, dev->relbit, REL_MAX) && value) |
2905 | + disposition = INPUT_PASS_TO_HANDLERS; |
2906 | |
2907 | - change_bit(code, dev->led); |
2908 | + break; |
2909 | |
2910 | - if (dev->event) |
2911 | - dev->event(dev, type, code, value); |
2912 | + case EV_MSC: |
2913 | + if (is_event_supported(code, dev->mscbit, MSC_MAX)) |
2914 | + disposition = INPUT_PASS_TO_ALL; |
2915 | |
2916 | - break; |
2917 | + break; |
2918 | + |
2919 | + case EV_LED: |
2920 | + if (is_event_supported(code, dev->ledbit, LED_MAX) && |
2921 | + !!test_bit(code, dev->led) != value) { |
2922 | |
2923 | - case EV_SND: |
2924 | + __change_bit(code, dev->led); |
2925 | + disposition = INPUT_PASS_TO_ALL; |
2926 | + } |
2927 | + break; |
2928 | |
2929 | - if (code > SND_MAX || !test_bit(code, dev->sndbit)) |
2930 | - return; |
2931 | + case EV_SND: |
2932 | + if (is_event_supported(code, dev->sndbit, SND_MAX)) { |
2933 | |
2934 | if (!!test_bit(code, dev->snd) != !!value) |
2935 | - change_bit(code, dev->snd); |
2936 | + __change_bit(code, dev->snd); |
2937 | + disposition = INPUT_PASS_TO_ALL; |
2938 | + } |
2939 | + break; |
2940 | |
2941 | - if (dev->event) |
2942 | - dev->event(dev, type, code, value); |
2943 | + case EV_REP: |
2944 | + if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { |
2945 | + dev->rep[code] = value; |
2946 | + disposition = INPUT_PASS_TO_ALL; |
2947 | + } |
2948 | + break; |
2949 | |
2950 | - break; |
2951 | + case EV_FF: |
2952 | + if (value >= 0) |
2953 | + disposition = INPUT_PASS_TO_ALL; |
2954 | + break; |
2955 | + } |
2956 | |
2957 | - case EV_REP: |
2958 | + if (type != EV_SYN) |
2959 | + dev->sync = 0; |
2960 | |
2961 | - if (code > REP_MAX || value < 0 || dev->rep[code] == value) |
2962 | - return; |
2963 | + if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) |
2964 | + dev->event(dev, type, code, value); |
2965 | |
2966 | - dev->rep[code] = value; |
2967 | - if (dev->event) |
2968 | - dev->event(dev, type, code, value); |
2969 | + if (disposition & INPUT_PASS_TO_HANDLERS) |
2970 | + input_pass_event(dev, type, code, value); |
2971 | +} |
2972 | |
2973 | - break; |
2974 | +/** |
2975 | + * input_event() - report new input event |
2976 | + * @dev: device that generated the event |
2977 | + * @type: type of the event |
2978 | + * @code: event code |
2979 | + * @value: value of the event |
2980 | + * |
2981 | + * This function should be used by drivers implementing various input |
2982 | + * devices. See also input_inject_event(). |
2983 | + */ |
2984 | |
2985 | - case EV_FF: |
2986 | +void input_event(struct input_dev *dev, |
2987 | + unsigned int type, unsigned int code, int value) |
2988 | +{ |
2989 | + unsigned long flags; |
2990 | |
2991 | - if (value < 0) |
2992 | - return; |
2993 | + if (is_event_supported(type, dev->evbit, EV_MAX)) { |
2994 | |
2995 | - if (dev->event) |
2996 | - dev->event(dev, type, code, value); |
2997 | - break; |
2998 | + spin_lock_irqsave(&dev->event_lock, flags); |
2999 | + add_input_randomness(type, code, value); |
3000 | + input_handle_event(dev, type, code, value); |
3001 | + spin_unlock_irqrestore(&dev->event_lock, flags); |
3002 | } |
3003 | - |
3004 | - if (type != EV_SYN) |
3005 | - dev->sync = 0; |
3006 | - |
3007 | - if (dev->grab) |
3008 | - dev->grab->handler->event(dev->grab, type, code, value); |
3009 | - else |
3010 | - list_for_each_entry(handle, &dev->h_list, d_node) |
3011 | - if (handle->open) |
3012 | - handle->handler->event(handle, type, code, value); |
3013 | } |
3014 | EXPORT_SYMBOL(input_event); |
3015 | |
3016 | @@ -202,102 +279,230 @@ EXPORT_SYMBOL(input_event); |
3017 | * @code: event code |
3018 | * @value: value of the event |
3019 | * |
3020 | - * Similar to input_event() but will ignore event if device is "grabbed" and handle |
3021 | - * injecting event is not the one that owns the device. |
3022 | + * Similar to input_event() but will ignore event if device is |
3023 | + * "grabbed" and handle injecting event is not the one that owns |
3024 | + * the device. |
3025 | */ |
3026 | -void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) |
3027 | +void input_inject_event(struct input_handle *handle, |
3028 | + unsigned int type, unsigned int code, int value) |
3029 | { |
3030 | - if (!handle->dev->grab || handle->dev->grab == handle) |
3031 | - input_event(handle->dev, type, code, value); |
3032 | -} |
3033 | -EXPORT_SYMBOL(input_inject_event); |
3034 | - |
3035 | -static void input_repeat_key(unsigned long data) |
3036 | -{ |
3037 | - struct input_dev *dev = (void *) data; |
3038 | + struct input_dev *dev = handle->dev; |
3039 | + struct input_handle *grab; |
3040 | + unsigned long flags; |
3041 | |
3042 | - if (!test_bit(dev->repeat_key, dev->key)) |
3043 | - return; |
3044 | + if (is_event_supported(type, dev->evbit, EV_MAX)) { |
3045 | + spin_lock_irqsave(&dev->event_lock, flags); |
3046 | |
3047 | - input_event(dev, EV_KEY, dev->repeat_key, 2); |
3048 | - input_sync(dev); |
3049 | + grab = rcu_dereference(dev->grab); |
3050 | + if (!grab || grab == handle) |
3051 | + input_handle_event(dev, type, code, value); |
3052 | |
3053 | - if (dev->rep[REP_PERIOD]) |
3054 | - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(dev->rep[REP_PERIOD])); |
3055 | + spin_unlock_irqrestore(&dev->event_lock, flags); |
3056 | + } |
3057 | } |
3058 | +EXPORT_SYMBOL(input_inject_event); |
3059 | |
3060 | +/** |
3061 | + * input_grab_device - grabs device for exclusive use |
3062 | + * @handle: input handle that wants to own the device |
3063 | + * |
3064 | + * When a device is grabbed by an input handle all events generated by |
3065 | + * the device are delivered only to this handle. Also events injected |
3066 | + * by other input handles are ignored while device is grabbed. |
3067 | + */ |
3068 | int input_grab_device(struct input_handle *handle) |
3069 | { |
3070 | - if (handle->dev->grab) |
3071 | - return -EBUSY; |
3072 | + struct input_dev *dev = handle->dev; |
3073 | + int retval; |
3074 | |
3075 | - handle->dev->grab = handle; |
3076 | - return 0; |
3077 | + retval = mutex_lock_interruptible(&dev->mutex); |
3078 | + if (retval) |
3079 | + return retval; |
3080 | + |
3081 | + if (dev->grab) { |
3082 | + retval = -EBUSY; |
3083 | + goto out; |
3084 | + } |
3085 | + |
3086 | + rcu_assign_pointer(dev->grab, handle); |
3087 | + /* |
3088 | + * Not using synchronize_rcu() because read-side is protected |
3089 | + * by a spinlock with interrupts off instead of rcu_read_lock(). |
3090 | + */ |
3091 | + synchronize_sched(); |
3092 | + |
3093 | + out: |
3094 | + mutex_unlock(&dev->mutex); |
3095 | + return retval; |
3096 | } |
3097 | EXPORT_SYMBOL(input_grab_device); |
3098 | |
3099 | -void input_release_device(struct input_handle *handle) |
3100 | +static void __input_release_device(struct input_handle *handle) |
3101 | { |
3102 | struct input_dev *dev = handle->dev; |
3103 | |
3104 | if (dev->grab == handle) { |
3105 | - dev->grab = NULL; |
3106 | + rcu_assign_pointer(dev->grab, NULL); |
3107 | + /* Make sure input_pass_event() notices that grab is gone */ |
3108 | + synchronize_sched(); |
3109 | |
3110 | list_for_each_entry(handle, &dev->h_list, d_node) |
3111 | - if (handle->handler->start) |
3112 | + if (handle->open && handle->handler->start) |
3113 | handle->handler->start(handle); |
3114 | } |
3115 | } |
3116 | + |
3117 | +/** |
3118 | + * input_release_device - release previously grabbed device |
3119 | + * @handle: input handle that owns the device |
3120 | + * |
3121 | + * Releases previously grabbed device so that other input handles can |
3122 | + * start receiving input events. Upon release all handlers attached |
3123 | + * to the device have their start() method called so they have a change |
3124 | + * to synchronize device state with the rest of the system. |
3125 | + */ |
3126 | +void input_release_device(struct input_handle *handle) |
3127 | +{ |
3128 | + struct input_dev *dev = handle->dev; |
3129 | + |
3130 | + mutex_lock(&dev->mutex); |
3131 | + __input_release_device(handle); |
3132 | + mutex_unlock(&dev->mutex); |
3133 | +} |
3134 | EXPORT_SYMBOL(input_release_device); |
3135 | |
3136 | +/** |
3137 | + * input_open_device - open input device |
3138 | + * @handle: handle through which device is being accessed |
3139 | + * |
3140 | + * This function should be called by input handlers when they |
3141 | + * want to start receive events from given input device. |
3142 | + */ |
3143 | int input_open_device(struct input_handle *handle) |
3144 | { |
3145 | struct input_dev *dev = handle->dev; |
3146 | - int err; |
3147 | + int retval; |
3148 | |
3149 | - err = mutex_lock_interruptible(&dev->mutex); |
3150 | - if (err) |
3151 | - return err; |
3152 | + retval = mutex_lock_interruptible(&dev->mutex); |
3153 | + if (retval) |
3154 | + return retval; |
3155 | + |
3156 | + if (dev->going_away) { |
3157 | + retval = -ENODEV; |
3158 | + goto out; |
3159 | + } |
3160 | |
3161 | handle->open++; |
3162 | |
3163 | if (!dev->users++ && dev->open) |
3164 | - err = dev->open(dev); |
3165 | - |
3166 | - if (err) |
3167 | - handle->open--; |
3168 | + retval = dev->open(dev); |
3169 | + |
3170 | + if (retval) { |
3171 | + dev->users--; |
3172 | + if (!--handle->open) { |
3173 | + /* |
3174 | + * Make sure we are not delivering any more events |
3175 | + * through this handle |
3176 | + */ |
3177 | + synchronize_sched(); |
3178 | + } |
3179 | + } |
3180 | |
3181 | + out: |
3182 | mutex_unlock(&dev->mutex); |
3183 | - |
3184 | - return err; |
3185 | + return retval; |
3186 | } |
3187 | EXPORT_SYMBOL(input_open_device); |
3188 | |
3189 | -int input_flush_device(struct input_handle* handle, struct file* file) |
3190 | +int input_flush_device(struct input_handle *handle, struct file *file) |
3191 | { |
3192 | - if (handle->dev->flush) |
3193 | - return handle->dev->flush(handle->dev, file); |
3194 | + struct input_dev *dev = handle->dev; |
3195 | + int retval; |
3196 | |
3197 | - return 0; |
3198 | + retval = mutex_lock_interruptible(&dev->mutex); |
3199 | + if (retval) |
3200 | + return retval; |
3201 | + |
3202 | + if (dev->flush) |
3203 | + retval = dev->flush(dev, file); |
3204 | + |
3205 | + mutex_unlock(&dev->mutex); |
3206 | + return retval; |
3207 | } |
3208 | EXPORT_SYMBOL(input_flush_device); |
3209 | |
3210 | +/** |
3211 | + * input_close_device - close input device |
3212 | + * @handle: handle through which device is being accessed |
3213 | + * |
3214 | + * This function should be called by input handlers when they |
3215 | + * want to stop receive events from given input device. |
3216 | + */ |
3217 | void input_close_device(struct input_handle *handle) |
3218 | { |
3219 | struct input_dev *dev = handle->dev; |
3220 | |
3221 | - input_release_device(handle); |
3222 | - |
3223 | mutex_lock(&dev->mutex); |
3224 | |
3225 | + __input_release_device(handle); |
3226 | + |
3227 | if (!--dev->users && dev->close) |
3228 | dev->close(dev); |
3229 | - handle->open--; |
3230 | + |
3231 | + if (!--handle->open) { |
3232 | + /* |
3233 | + * synchronize_sched() makes sure that input_pass_event() |
3234 | + * completed and that no more input events are delivered |
3235 | + * through this handle |
3236 | + */ |
3237 | + synchronize_sched(); |
3238 | + } |
3239 | |
3240 | mutex_unlock(&dev->mutex); |
3241 | } |
3242 | EXPORT_SYMBOL(input_close_device); |
3243 | |
3244 | +/* |
3245 | + * Prepare device for unregistering |
3246 | + */ |
3247 | +static void input_disconnect_device(struct input_dev *dev) |
3248 | +{ |
3249 | + struct input_handle *handle; |
3250 | + int code; |
3251 | + |
3252 | + /* |
3253 | + * Mark device as going away. Note that we take dev->mutex here |
3254 | + * not to protect access to dev->going_away but rather to ensure |
3255 | + * that there are no threads in the middle of input_open_device() |
3256 | + */ |
3257 | + mutex_lock(&dev->mutex); |
3258 | + dev->going_away = 1; |
3259 | + mutex_unlock(&dev->mutex); |
3260 | + |
3261 | + spin_lock_irq(&dev->event_lock); |
3262 | + |
3263 | + /* |
3264 | + * Simulate keyup events for all pressed keys so that handlers |
3265 | + * are not left with "stuck" keys. The driver may continue |
3266 | + * generate events even after we done here but they will not |
3267 | + * reach any handlers. |
3268 | + */ |
3269 | + if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { |
3270 | + for (code = 0; code <= KEY_MAX; code++) { |
3271 | + if (is_event_supported(code, dev->keybit, KEY_MAX) && |
3272 | + test_bit(code, dev->key)) { |
3273 | + input_pass_event(dev, EV_KEY, code, 0); |
3274 | + } |
3275 | + } |
3276 | + input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
3277 | + } |
3278 | + |
3279 | + list_for_each_entry(handle, &dev->h_list, d_node) |
3280 | + handle->open = 0; |
3281 | + |
3282 | + spin_unlock_irq(&dev->event_lock); |
3283 | +} |
3284 | + |
3285 | static int input_fetch_keycode(struct input_dev *dev, int scancode) |
3286 | { |
3287 | switch (dev->keycodesize) { |
3288 | @@ -473,7 +678,8 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) |
3289 | |
3290 | static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) |
3291 | { |
3292 | - /* acquire lock here ... Yes, we do need locking, I knowi, I know... */ |
3293 | + if (mutex_lock_interruptible(&input_mutex)) |
3294 | + return NULL; |
3295 | |
3296 | return seq_list_start(&input_dev_list, *pos); |
3297 | } |
3298 | @@ -485,7 +691,7 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3299 | |
3300 | static void input_devices_seq_stop(struct seq_file *seq, void *v) |
3301 | { |
3302 | - /* release lock here */ |
3303 | + mutex_unlock(&input_mutex); |
3304 | } |
3305 | |
3306 | static void input_seq_print_bitmap(struct seq_file *seq, const char *name, |
3307 | @@ -569,7 +775,9 @@ static const struct file_operations input_devices_fileops = { |
3308 | |
3309 | static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) |
3310 | { |
3311 | - /* acquire lock here ... Yes, we do need locking, I knowi, I know... */ |
3312 | + if (mutex_lock_interruptible(&input_mutex)) |
3313 | + return NULL; |
3314 | + |
3315 | seq->private = (void *)(unsigned long)*pos; |
3316 | return seq_list_start(&input_handler_list, *pos); |
3317 | } |
3318 | @@ -582,7 +790,7 @@ static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3319 | |
3320 | static void input_handlers_seq_stop(struct seq_file *seq, void *v) |
3321 | { |
3322 | - /* release lock here */ |
3323 | + mutex_unlock(&input_mutex); |
3324 | } |
3325 | |
3326 | static int input_handlers_seq_show(struct seq_file *seq, void *v) |
3327 | @@ -1005,6 +1213,7 @@ struct input_dev *input_allocate_device(void) |
3328 | dev->dev.class = &input_class; |
3329 | device_initialize(&dev->dev); |
3330 | mutex_init(&dev->mutex); |
3331 | + spin_lock_init(&dev->event_lock); |
3332 | INIT_LIST_HEAD(&dev->h_list); |
3333 | INIT_LIST_HEAD(&dev->node); |
3334 | |
3335 | @@ -1022,7 +1231,7 @@ EXPORT_SYMBOL(input_allocate_device); |
3336 | * This function should only be used if input_register_device() |
3337 | * was not called yet or if it failed. Once device was registered |
3338 | * use input_unregister_device() and memory will be freed once last |
3339 | - * refrence to the device is dropped. |
3340 | + * reference to the device is dropped. |
3341 | * |
3342 | * Device should be allocated by input_allocate_device(). |
3343 | * |
3344 | @@ -1092,6 +1301,18 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int |
3345 | } |
3346 | EXPORT_SYMBOL(input_set_capability); |
3347 | |
3348 | +/** |
3349 | + * input_register_device - register device with input core |
3350 | + * @dev: device to be registered |
3351 | + * |
3352 | + * This function registers device with input core. The device must be |
3353 | + * allocated with input_allocate_device() and all it's capabilities |
3354 | + * set up before registering. |
3355 | + * If function fails the device must be freed with input_free_device(). |
3356 | + * Once device has been successfully registered it can be unregistered |
3357 | + * with input_unregister_device(); input_free_device() should not be |
3358 | + * called in this case. |
3359 | + */ |
3360 | int input_register_device(struct input_dev *dev) |
3361 | { |
3362 | static atomic_t input_no = ATOMIC_INIT(0); |
3363 | @@ -1099,7 +1320,7 @@ int input_register_device(struct input_dev *dev) |
3364 | const char *path; |
3365 | int error; |
3366 | |
3367 | - set_bit(EV_SYN, dev->evbit); |
3368 | + __set_bit(EV_SYN, dev->evbit); |
3369 | |
3370 | /* |
3371 | * If delay and period are pre-set by the driver, then autorepeating |
3372 | @@ -1120,8 +1341,6 @@ int input_register_device(struct input_dev *dev) |
3373 | if (!dev->setkeycode) |
3374 | dev->setkeycode = input_default_setkeycode; |
3375 | |
3376 | - list_add_tail(&dev->node, &input_dev_list); |
3377 | - |
3378 | snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), |
3379 | "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1); |
3380 | |
3381 | @@ -1137,49 +1356,79 @@ int input_register_device(struct input_dev *dev) |
3382 | dev->name ? dev->name : "Unspecified device", path ? path : "N/A"); |
3383 | kfree(path); |
3384 | |
3385 | + error = mutex_lock_interruptible(&input_mutex); |
3386 | + if (error) { |
3387 | + device_del(&dev->dev); |
3388 | + return error; |
3389 | + } |
3390 | + |
3391 | + list_add_tail(&dev->node, &input_dev_list); |
3392 | + |
3393 | list_for_each_entry(handler, &input_handler_list, node) |
3394 | input_attach_handler(dev, handler); |
3395 | |
3396 | input_wakeup_procfs_readers(); |
3397 | |
3398 | + mutex_unlock(&input_mutex); |
3399 | + |
3400 | return 0; |
3401 | } |
3402 | EXPORT_SYMBOL(input_register_device); |
3403 | |
3404 | +/** |
3405 | + * input_unregister_device - unregister previously registered device |
3406 | + * @dev: device to be unregistered |
3407 | + * |
3408 | + * This function unregisters an input device. Once device is unregistered |
3409 | + * the caller should not try to access it as it may get freed at any moment. |
3410 | + */ |
3411 | void input_unregister_device(struct input_dev *dev) |
3412 | { |
3413 | struct input_handle *handle, *next; |
3414 | - int code; |
3415 | |
3416 | - for (code = 0; code <= KEY_MAX; code++) |
3417 | - if (test_bit(code, dev->key)) |
3418 | - input_report_key(dev, code, 0); |
3419 | - input_sync(dev); |
3420 | + input_disconnect_device(dev); |
3421 | |
3422 | - del_timer_sync(&dev->timer); |
3423 | + mutex_lock(&input_mutex); |
3424 | |
3425 | list_for_each_entry_safe(handle, next, &dev->h_list, d_node) |
3426 | handle->handler->disconnect(handle); |
3427 | WARN_ON(!list_empty(&dev->h_list)); |
3428 | |
3429 | + del_timer_sync(&dev->timer); |
3430 | list_del_init(&dev->node); |
3431 | |
3432 | - device_unregister(&dev->dev); |
3433 | - |
3434 | input_wakeup_procfs_readers(); |
3435 | + |
3436 | + mutex_unlock(&input_mutex); |
3437 | + |
3438 | + device_unregister(&dev->dev); |
3439 | } |
3440 | EXPORT_SYMBOL(input_unregister_device); |
3441 | |
3442 | +/** |
3443 | + * input_register_handler - register a new input handler |
3444 | + * @handler: handler to be registered |
3445 | + * |
3446 | + * This function registers a new input handler (interface) for input |
3447 | + * devices in the system and attaches it to all input devices that |
3448 | + * are compatible with the handler. |
3449 | + */ |
3450 | int input_register_handler(struct input_handler *handler) |
3451 | { |
3452 | struct input_dev *dev; |
3453 | + int retval; |
3454 | + |
3455 | + retval = mutex_lock_interruptible(&input_mutex); |
3456 | + if (retval) |
3457 | + return retval; |
3458 | |
3459 | INIT_LIST_HEAD(&handler->h_list); |
3460 | |
3461 | if (handler->fops != NULL) { |
3462 | - if (input_table[handler->minor >> 5]) |
3463 | - return -EBUSY; |
3464 | - |
3465 | + if (input_table[handler->minor >> 5]) { |
3466 | + retval = -EBUSY; |
3467 | + goto out; |
3468 | + } |
3469 | input_table[handler->minor >> 5] = handler; |
3470 | } |
3471 | |
3472 | @@ -1189,14 +1438,26 @@ int input_register_handler(struct input_handler *handler) |
3473 | input_attach_handler(dev, handler); |
3474 | |
3475 | input_wakeup_procfs_readers(); |
3476 | - return 0; |
3477 | + |
3478 | + out: |
3479 | + mutex_unlock(&input_mutex); |
3480 | + return retval; |
3481 | } |
3482 | EXPORT_SYMBOL(input_register_handler); |
3483 | |
3484 | +/** |
3485 | + * input_unregister_handler - unregisters an input handler |
3486 | + * @handler: handler to be unregistered |
3487 | + * |
3488 | + * This function disconnects a handler from its input devices and |
3489 | + * removes it from lists of known handlers. |
3490 | + */ |
3491 | void input_unregister_handler(struct input_handler *handler) |
3492 | { |
3493 | struct input_handle *handle, *next; |
3494 | |
3495 | + mutex_lock(&input_mutex); |
3496 | + |
3497 | list_for_each_entry_safe(handle, next, &handler->h_list, h_node) |
3498 | handler->disconnect(handle); |
3499 | WARN_ON(!list_empty(&handler->h_list)); |
3500 | @@ -1207,14 +1468,50 @@ void input_unregister_handler(struct input_handler *handler) |
3501 | input_table[handler->minor >> 5] = NULL; |
3502 | |
3503 | input_wakeup_procfs_readers(); |
3504 | + |
3505 | + mutex_unlock(&input_mutex); |
3506 | } |
3507 | EXPORT_SYMBOL(input_unregister_handler); |
3508 | |
3509 | +/** |
3510 | + * input_register_handle - register a new input handle |
3511 | + * @handle: handle to register |
3512 | + * |
3513 | + * This function puts a new input handle onto device's |
3514 | + * and handler's lists so that events can flow through |
3515 | + * it once it is opened using input_open_device(). |
3516 | + * |
3517 | + * This function is supposed to be called from handler's |
3518 | + * connect() method. |
3519 | + */ |
3520 | int input_register_handle(struct input_handle *handle) |
3521 | { |
3522 | struct input_handler *handler = handle->handler; |
3523 | + struct input_dev *dev = handle->dev; |
3524 | + int error; |
3525 | + |
3526 | + /* |
3527 | + * We take dev->mutex here to prevent race with |
3528 | + * input_release_device(). |
3529 | + */ |
3530 | + error = mutex_lock_interruptible(&dev->mutex); |
3531 | + if (error) |
3532 | + return error; |
3533 | + list_add_tail_rcu(&handle->d_node, &dev->h_list); |
3534 | + mutex_unlock(&dev->mutex); |
3535 | + /* |
3536 | + * We don't use synchronize_rcu() here because we rely |
3537 | + * on dev->event_lock to protect read-side critical |
3538 | + * section in input_pass_event(). |
3539 | + */ |
3540 | + synchronize_sched(); |
3541 | |
3542 | - list_add_tail(&handle->d_node, &handle->dev->h_list); |
3543 | + /* |
3544 | + * Since we are supposed to be called from ->connect() |
3545 | + * which is mutually exclusive with ->disconnect() |
3546 | + * we can't be racing with input_unregister_handle() |
3547 | + * and so separate lock is not needed here. |
3548 | + */ |
3549 | list_add_tail(&handle->h_node, &handler->h_list); |
3550 | |
3551 | if (handler->start) |
3552 | @@ -1224,10 +1521,29 @@ int input_register_handle(struct input_handle *handle) |
3553 | } |
3554 | EXPORT_SYMBOL(input_register_handle); |
3555 | |
3556 | +/** |
3557 | + * input_unregister_handle - unregister an input handle |
3558 | + * @handle: handle to unregister |
3559 | + * |
3560 | + * This function removes input handle from device's |
3561 | + * and handler's lists. |
3562 | + * |
3563 | + * This function is supposed to be called from handler's |
3564 | + * disconnect() method. |
3565 | + */ |
3566 | void input_unregister_handle(struct input_handle *handle) |
3567 | { |
3568 | + struct input_dev *dev = handle->dev; |
3569 | + |
3570 | list_del_init(&handle->h_node); |
3571 | - list_del_init(&handle->d_node); |
3572 | + |
3573 | + /* |
3574 | + * Take dev->mutex to prevent race with input_release_device(). |
3575 | + */ |
3576 | + mutex_lock(&dev->mutex); |
3577 | + list_del_rcu(&handle->d_node); |
3578 | + mutex_unlock(&dev->mutex); |
3579 | + synchronize_sched(); |
3580 | } |
3581 | EXPORT_SYMBOL(input_unregister_handle); |
3582 | |
3583 | diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c |
3584 | index a9a0180..f306c97 100644 |
3585 | --- a/drivers/input/joydev.c |
3586 | +++ b/drivers/input/joydev.c |
3587 | @@ -43,6 +43,8 @@ struct joydev { |
3588 | struct input_handle handle; |
3589 | wait_queue_head_t wait; |
3590 | struct list_head client_list; |
3591 | + spinlock_t client_lock; /* protects client_list */ |
3592 | + struct mutex mutex; |
3593 | struct device dev; |
3594 | |
3595 | struct js_corr corr[ABS_MAX + 1]; |
3596 | @@ -61,31 +63,61 @@ struct joydev_client { |
3597 | int head; |
3598 | int tail; |
3599 | int startup; |
3600 | + spinlock_t buffer_lock; /* protects access to buffer, head and tail */ |
3601 | struct fasync_struct *fasync; |
3602 | struct joydev *joydev; |
3603 | struct list_head node; |
3604 | }; |
3605 | |
3606 | static struct joydev *joydev_table[JOYDEV_MINORS]; |
3607 | +static DEFINE_MUTEX(joydev_table_mutex); |
3608 | |
3609 | static int joydev_correct(int value, struct js_corr *corr) |
3610 | { |
3611 | switch (corr->type) { |
3612 | - case JS_CORR_NONE: |
3613 | - break; |
3614 | - case JS_CORR_BROKEN: |
3615 | - value = value > corr->coef[0] ? (value < corr->coef[1] ? 0 : |
3616 | - ((corr->coef[3] * (value - corr->coef[1])) >> 14)) : |
3617 | - ((corr->coef[2] * (value - corr->coef[0])) >> 14); |
3618 | - break; |
3619 | - default: |
3620 | - return 0; |
3621 | + |
3622 | + case JS_CORR_NONE: |
3623 | + break; |
3624 | + |
3625 | + case JS_CORR_BROKEN: |
3626 | + value = value > corr->coef[0] ? (value < corr->coef[1] ? 0 : |
3627 | + ((corr->coef[3] * (value - corr->coef[1])) >> 14)) : |
3628 | + ((corr->coef[2] * (value - corr->coef[0])) >> 14); |
3629 | + break; |
3630 | + |
3631 | + default: |
3632 | + return 0; |
3633 | } |
3634 | |
3635 | return value < -32767 ? -32767 : (value > 32767 ? 32767 : value); |
3636 | } |
3637 | |
3638 | -static void joydev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) |
3639 | +static void joydev_pass_event(struct joydev_client *client, |
3640 | + struct js_event *event) |
3641 | +{ |
3642 | + struct joydev *joydev = client->joydev; |
3643 | + |
3644 | + /* |
3645 | + * IRQs already disabled, just acquire the lock |
3646 | + */ |
3647 | + spin_lock(&client->buffer_lock); |
3648 | + |
3649 | + client->buffer[client->head] = *event; |
3650 | + |
3651 | + if (client->startup == joydev->nabs + joydev->nkey) { |
3652 | + client->head++; |
3653 | + client->head &= JOYDEV_BUFFER_SIZE - 1; |
3654 | + if (client->tail == client->head) |
3655 | + client->startup = 0; |
3656 | + } |
3657 | + |
3658 | + spin_unlock(&client->buffer_lock); |
3659 | + |
3660 | + kill_fasync(&client->fasync, SIGIO, POLL_IN); |
3661 | +} |
3662 | + |
3663 | +static void joydev_event(struct input_handle *handle, |
3664 | + unsigned int type, unsigned int code, int value) |
3665 | { |
3666 | struct joydev *joydev = handle->private; |
3667 | struct joydev_client *client; |
3668 | @@ -93,39 +125,32 @@ static void joydev_event(struct input_handle *handle, unsigned int type, unsigne |
3669 | |
3670 | switch (type) { |
3671 | |
3672 | - case EV_KEY: |
3673 | - if (code < BTN_MISC || value == 2) |
3674 | - return; |
3675 | - event.type = JS_EVENT_BUTTON; |
3676 | - event.number = joydev->keymap[code - BTN_MISC]; |
3677 | - event.value = value; |
3678 | - break; |
3679 | - |
3680 | - case EV_ABS: |
3681 | - event.type = JS_EVENT_AXIS; |
3682 | - event.number = joydev->absmap[code]; |
3683 | - event.value = joydev_correct(value, joydev->corr + event.number); |
3684 | - if (event.value == joydev->abs[event.number]) |
3685 | - return; |
3686 | - joydev->abs[event.number] = event.value; |
3687 | - break; |
3688 | + case EV_KEY: |
3689 | + if (code < BTN_MISC || value == 2) |
3690 | + return; |
3691 | + event.type = JS_EVENT_BUTTON; |
3692 | + event.number = joydev->keymap[code - BTN_MISC]; |
3693 | + event.value = value; |
3694 | + break; |
3695 | |
3696 | - default: |
3697 | + case EV_ABS: |
3698 | + event.type = JS_EVENT_AXIS; |
3699 | + event.number = joydev->absmap[code]; |
3700 | + event.value = joydev_correct(value, |
3701 | + &joydev->corr[event.number]); |
3702 | + if (event.value == joydev->abs[event.number]) |
3703 | return; |
3704 | + joydev->abs[event.number] = event.value; |
3705 | + break; |
3706 | + |
3707 | + default: |
3708 | + return; |
3709 | } |
3710 | |
3711 | event.time = jiffies_to_msecs(jiffies); |
3712 | |
3713 | - list_for_each_entry(client, &joydev->client_list, node) { |
3714 | - |
3715 | - memcpy(client->buffer + client->head, &event, sizeof(struct js_event)); |
3716 | - |
3717 | - if (client->startup == joydev->nabs + joydev->nkey) |
3718 | - if (client->tail == (client->head = (client->head + 1) & (JOYDEV_BUFFER_SIZE - 1))) |
3719 | - client->startup = 0; |
3720 | - |
3721 | - kill_fasync(&client->fasync, SIGIO, POLL_IN); |
3722 | - } |
3723 | + list_for_each_entry_rcu(client, &joydev->client_list, node) |
3724 | + joydev_pass_event(client, &event); |
3725 | |
3726 | wake_up_interruptible(&joydev->wait); |
3727 | } |
3728 | @@ -144,23 +169,88 @@ static void joydev_free(struct device *dev) |
3729 | { |
3730 | struct joydev *joydev = container_of(dev, struct joydev, dev); |
3731 | |
3732 | - joydev_table[joydev->minor] = NULL; |
3733 | kfree(joydev); |
3734 | } |
3735 | |
3736 | +static void joydev_attach_client(struct joydev *joydev, |
3737 | + struct joydev_client *client) |
3738 | +{ |
3739 | + spin_lock(&joydev->client_lock); |
3740 | + list_add_tail_rcu(&client->node, &joydev->client_list); |
3741 | + spin_unlock(&joydev->client_lock); |
3742 | + /* |
3743 | + * We don't use synchronize_rcu() here because read-side |
3744 | + * critical section is protected by a spinlock (dev->event_lock) |
3745 | + * instead of rcu_read_lock(). |
3746 | + */ |
3747 | + synchronize_sched(); |
3748 | +} |
3749 | + |
3750 | +static void joydev_detach_client(struct joydev *joydev, |
3751 | + struct joydev_client *client) |
3752 | +{ |
3753 | + spin_lock(&joydev->client_lock); |
3754 | + list_del_rcu(&client->node); |
3755 | + spin_unlock(&joydev->client_lock); |
3756 | + synchronize_sched(); |
3757 | +} |
3758 | + |
3759 | +static int joydev_open_device(struct joydev *joydev) |
3760 | +{ |
3761 | + int retval; |
3762 | + |
3763 | + retval = mutex_lock_interruptible(&joydev->mutex); |
3764 | + if (retval) |
3765 | + return retval; |
3766 | + |
3767 | + if (!joydev->exist) |
3768 | + retval = -ENODEV; |
3769 | + else if (!joydev->open++) { |
3770 | + retval = input_open_device(&joydev->handle); |
3771 | + if (retval) |
3772 | + joydev->open--; |
3773 | + } |
3774 | + |
3775 | + mutex_unlock(&joydev->mutex); |
3776 | + return retval; |
3777 | +} |
3778 | + |
3779 | +static void joydev_close_device(struct joydev *joydev) |
3780 | +{ |
3781 | + mutex_lock(&joydev->mutex); |
3782 | + |
3783 | + if (joydev->exist && !--joydev->open) |
3784 | + input_close_device(&joydev->handle); |
3785 | + |
3786 | + mutex_unlock(&joydev->mutex); |
3787 | +} |
3788 | + |
3789 | +/* |
3790 | + * Wake up users waiting for IO so they can disconnect from |
3791 | + * dead device. |
3792 | + */ |
3793 | +static void joydev_hangup(struct joydev *joydev) |
3794 | +{ |
3795 | + struct joydev_client *client; |
3796 | + |
3797 | + spin_lock(&joydev->client_lock); |
3798 | + list_for_each_entry(client, &joydev->client_list, node) |
3799 | + kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
3800 | + spin_unlock(&joydev->client_lock); |
3801 | + |
3802 | + wake_up_interruptible(&joydev->wait); |
3803 | +} |
3804 | + |
3805 | static int joydev_release(struct inode *inode, struct file *file) |
3806 | { |
3807 | struct joydev_client *client = file->private_data; |
3808 | struct joydev *joydev = client->joydev; |
3809 | |
3810 | joydev_fasync(-1, file, 0); |
3811 | - |
3812 | - list_del(&client->node); |
3813 | + joydev_detach_client(joydev, client); |
3814 | kfree(client); |
3815 | |
3816 | - if (!--joydev->open && joydev->exist) |
3817 | - input_close_device(&joydev->handle); |
3818 | - |
3819 | + joydev_close_device(joydev); |
3820 | put_device(&joydev->dev); |
3821 | |
3822 | return 0; |
3823 | @@ -176,11 +266,16 @@ static int joydev_open(struct inode *inode, struct file *file) |
3824 | if (i >= JOYDEV_MINORS) |
3825 | return -ENODEV; |
3826 | |
3827 | + error = mutex_lock_interruptible(&joydev_table_mutex); |
3828 | + if (error) |
3829 | + return error; |
3830 | joydev = joydev_table[i]; |
3831 | - if (!joydev || !joydev->exist) |
3832 | - return -ENODEV; |
3833 | + if (joydev) |
3834 | + get_device(&joydev->dev); |
3835 | + mutex_unlock(&joydev_table_mutex); |
3836 | |
3837 | - get_device(&joydev->dev); |
3838 | + if (!joydev) |
3839 | + return -ENODEV; |
3840 | |
3841 | client = kzalloc(sizeof(struct joydev_client), GFP_KERNEL); |
3842 | if (!client) { |
3843 | @@ -188,37 +283,129 @@ static int joydev_open(struct inode *inode, struct file *file) |
3844 | goto err_put_joydev; |
3845 | } |
3846 | |
3847 | + spin_lock_init(&client->buffer_lock); |
3848 | client->joydev = joydev; |
3849 | - list_add_tail(&client->node, &joydev->client_list); |
3850 | + joydev_attach_client(joydev, client); |
3851 | |
3852 | - if (!joydev->open++ && joydev->exist) { |
3853 | - error = input_open_device(&joydev->handle); |
3854 | - if (error) |
3855 | - goto err_free_client; |
3856 | - } |
3857 | + error = joydev_open_device(joydev); |
3858 | + if (error) |
3859 | + goto err_free_client; |
3860 | |
3861 | file->private_data = client; |
3862 | return 0; |
3863 | |
3864 | err_free_client: |
3865 | - list_del(&client->node); |
3866 | + joydev_detach_client(joydev, client); |
3867 | kfree(client); |
3868 | err_put_joydev: |
3869 | put_device(&joydev->dev); |
3870 | return error; |
3871 | } |
3872 | |
3873 | -static ssize_t joydev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) |
3874 | +static int joydev_generate_startup_event(struct joydev_client *client, |
3875 | + struct input_dev *input, |
3876 | + struct js_event *event) |
3877 | { |
3878 | - return -EINVAL; |
3879 | + struct joydev *joydev = client->joydev; |
3880 | + int have_event; |
3881 | + |
3882 | + spin_lock_irq(&client->buffer_lock); |
3883 | + |
3884 | + have_event = client->startup < joydev->nabs + joydev->nkey; |
3885 | + |
3886 | + if (have_event) { |
3887 | + |
3888 | + event->time = jiffies_to_msecs(jiffies); |
3889 | + if (client->startup < joydev->nkey) { |
3890 | + event->type = JS_EVENT_BUTTON | JS_EVENT_INIT; |
3891 | + event->number = client->startup; |
3892 | + event->value = !!test_bit(joydev->keypam[event->number], |
3893 | + input->key); |
3894 | + } else { |
3895 | + event->type = JS_EVENT_AXIS | JS_EVENT_INIT; |
3896 | + event->number = client->startup - joydev->nkey; |
3897 | + event->value = joydev->abs[event->number]; |
3898 | + } |
3899 | + client->startup++; |
3900 | + } |
3901 | + |
3902 | + spin_unlock_irq(&client->buffer_lock); |
3903 | + |
3904 | + return have_event; |
3905 | +} |
3906 | + |
3907 | +static int joydev_fetch_next_event(struct joydev_client *client, |
3908 | + struct js_event *event) |
3909 | +{ |
3910 | + int have_event; |
3911 | + |
3912 | + spin_lock_irq(&client->buffer_lock); |
3913 | + |
3914 | + have_event = client->head != client->tail; |
3915 | + if (have_event) { |
3916 | + *event = client->buffer[client->tail++]; |
3917 | + client->tail &= JOYDEV_BUFFER_SIZE - 1; |
3918 | + } |
3919 | + |
3920 | + spin_unlock_irq(&client->buffer_lock); |
3921 | + |
3922 | + return have_event; |
3923 | +} |
3924 | + |
3925 | +/* |
3926 | + * Old joystick interface |
3927 | + */ |
3928 | +static ssize_t joydev_0x_read(struct joydev_client *client, |
3929 | + struct input_dev *input, |
3930 | + char __user *buf) |
3931 | +{ |
3932 | + struct joydev *joydev = client->joydev; |
3933 | + struct JS_DATA_TYPE data; |
3934 | + int i; |
3935 | + |
3936 | + spin_lock_irq(&input->event_lock); |
3937 | + |
3938 | + /* |
3939 | + * Get device state |
3940 | + */ |
3941 | + for (data.buttons = i = 0; i < 32 && i < joydev->nkey; i++) |
3942 | + data.buttons |= |
3943 | + test_bit(joydev->keypam[i], input->key) ? (1 << i) : 0; |
3944 | + data.x = (joydev->abs[0] / 256 + 128) >> joydev->glue.JS_CORR.x; |
3945 | + data.y = (joydev->abs[1] / 256 + 128) >> joydev->glue.JS_CORR.y; |
3946 | + |
3947 | + /* |
3948 | + * Reset reader's event queue |
3949 | + */ |
3950 | + spin_lock(&client->buffer_lock); |
3951 | + client->startup = 0; |
3952 | + client->tail = client->head; |
3953 | + spin_unlock(&client->buffer_lock); |
3954 | + |
3955 | + spin_unlock_irq(&input->event_lock); |
3956 | + |
3957 | + if (copy_to_user(buf, &data, sizeof(struct JS_DATA_TYPE))) |
3958 | + return -EFAULT; |
3959 | + |
3960 | + return sizeof(struct JS_DATA_TYPE); |
3961 | +} |
3962 | + |
3963 | +static inline int joydev_data_pending(struct joydev_client *client) |
3964 | +{ |
3965 | + struct joydev *joydev = client->joydev; |
3966 | + |
3967 | + return client->startup < joydev->nabs + joydev->nkey || |
3968 | + client->head != client->tail; |
3969 | } |
3970 | |
3971 | -static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
3972 | +static ssize_t joydev_read(struct file *file, char __user *buf, |
3973 | + size_t count, loff_t *ppos) |
3974 | { |
3975 | struct joydev_client *client = file->private_data; |
3976 | struct joydev *joydev = client->joydev; |
3977 | struct input_dev *input = joydev->handle.dev; |
3978 | - int retval = 0; |
3979 | + struct js_event event; |
3980 | + int retval; |
3981 | |
3982 | if (!joydev->exist) |
3983 | return -ENODEV; |
3984 | @@ -226,68 +413,35 @@ static ssize_t joydev_read(struct file *file, char __user *buf, size_t count, lo |
3985 | if (count < sizeof(struct js_event)) |
3986 | return -EINVAL; |
3987 | |
3988 | - if (count == sizeof(struct JS_DATA_TYPE)) { |
3989 | - |
3990 | - struct JS_DATA_TYPE data; |
3991 | - int i; |
3992 | - |
3993 | - for (data.buttons = i = 0; i < 32 && i < joydev->nkey; i++) |
3994 | - data.buttons |= test_bit(joydev->keypam[i], input->key) ? (1 << i) : 0; |
3995 | - data.x = (joydev->abs[0] / 256 + 128) >> joydev->glue.JS_CORR.x; |
3996 | - data.y = (joydev->abs[1] / 256 + 128) >> joydev->glue.JS_CORR.y; |
3997 | - |
3998 | - if (copy_to_user(buf, &data, sizeof(struct JS_DATA_TYPE))) |
3999 | - return -EFAULT; |
4000 | - |
4001 | - client->startup = 0; |
4002 | - client->tail = client->head; |
4003 | + if (count == sizeof(struct JS_DATA_TYPE)) |
4004 | + return joydev_0x_read(client, input, buf); |
4005 | |
4006 | - return sizeof(struct JS_DATA_TYPE); |
4007 | - } |
4008 | - |
4009 | - if (client->startup == joydev->nabs + joydev->nkey && |
4010 | - client->head == client->tail && (file->f_flags & O_NONBLOCK)) |
4011 | + if (!joydev_data_pending(client) && (file->f_flags & O_NONBLOCK)) |
4012 | return -EAGAIN; |
4013 | |
4014 | retval = wait_event_interruptible(joydev->wait, |
4015 | - !joydev->exist || |
4016 | - client->startup < joydev->nabs + joydev->nkey || |
4017 | - client->head != client->tail); |
4018 | + !joydev->exist || joydev_data_pending(client)); |
4019 | if (retval) |
4020 | return retval; |
4021 | |
4022 | if (!joydev->exist) |
4023 | return -ENODEV; |
4024 | |
4025 | - while (client->startup < joydev->nabs + joydev->nkey && retval + sizeof(struct js_event) <= count) { |
4026 | - |
4027 | - struct js_event event; |
4028 | - |
4029 | - event.time = jiffies_to_msecs(jiffies); |
4030 | - |
4031 | - if (client->startup < joydev->nkey) { |
4032 | - event.type = JS_EVENT_BUTTON | JS_EVENT_INIT; |
4033 | - event.number = client->startup; |
4034 | - event.value = !!test_bit(joydev->keypam[event.number], input->key); |
4035 | - } else { |
4036 | - event.type = JS_EVENT_AXIS | JS_EVENT_INIT; |
4037 | - event.number = client->startup - joydev->nkey; |
4038 | - event.value = joydev->abs[event.number]; |
4039 | - } |
4040 | + while (retval + sizeof(struct js_event) <= count && |
4041 | + joydev_generate_startup_event(client, input, &event)) { |
4042 | |
4043 | if (copy_to_user(buf + retval, &event, sizeof(struct js_event))) |
4044 | return -EFAULT; |
4045 | |
4046 | - client->startup++; |
4047 | retval += sizeof(struct js_event); |
4048 | } |
4049 | |
4050 | - while (client->head != client->tail && retval + sizeof(struct js_event) <= count) { |
4051 | + while (retval + sizeof(struct js_event) <= count && |
4052 | + joydev_fetch_next_event(client, &event)) { |
4053 | |
4054 | - if (copy_to_user(buf + retval, client->buffer + client->tail, sizeof(struct js_event))) |
4055 | + if (copy_to_user(buf + retval, &event, sizeof(struct js_event))) |
4056 | return -EFAULT; |
4057 | |
4058 | - client->tail = (client->tail + 1) & (JOYDEV_BUFFER_SIZE - 1); |
4059 | retval += sizeof(struct js_event); |
4060 | } |
4061 | |
4062 | @@ -301,126 +455,144 @@ static unsigned int joydev_poll(struct file *file, poll_table *wait) |
4063 | struct joydev *joydev = client->joydev; |
4064 | |
4065 | poll_wait(file, &joydev->wait, wait); |
4066 | - return ((client->head != client->tail || client->startup < joydev->nabs + joydev->nkey) ? |
4067 | - (POLLIN | POLLRDNORM) : 0) | (joydev->exist ? 0 : (POLLHUP | POLLERR)); |
4068 | + return (joydev_data_pending(client) ? (POLLIN | POLLRDNORM) : 0) | |
4069 | + (joydev->exist ? 0 : (POLLHUP | POLLERR)); |
4070 | } |
4071 | |
4072 | -static int joydev_ioctl_common(struct joydev *joydev, unsigned int cmd, void __user *argp) |
4073 | +static int joydev_ioctl_common(struct joydev *joydev, |
4074 | + unsigned int cmd, void __user *argp) |
4075 | { |
4076 | struct input_dev *dev = joydev->handle.dev; |
4077 | int i, j; |
4078 | |
4079 | switch (cmd) { |
4080 | |
4081 | - case JS_SET_CAL: |
4082 | - return copy_from_user(&joydev->glue.JS_CORR, argp, |
4083 | + case JS_SET_CAL: |
4084 | + return copy_from_user(&joydev->glue.JS_CORR, argp, |
4085 | sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0; |
4086 | |
4087 | - case JS_GET_CAL: |
4088 | - return copy_to_user(argp, &joydev->glue.JS_CORR, |
4089 | + case JS_GET_CAL: |
4090 | + return copy_to_user(argp, &joydev->glue.JS_CORR, |
4091 | sizeof(joydev->glue.JS_CORR)) ? -EFAULT : 0; |
4092 | |
4093 | - case JS_SET_TIMEOUT: |
4094 | - return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); |
4095 | + case JS_SET_TIMEOUT: |
4096 | + return get_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); |
4097 | |
4098 | - case JS_GET_TIMEOUT: |
4099 | - return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); |
4100 | + case JS_GET_TIMEOUT: |
4101 | + return put_user(joydev->glue.JS_TIMEOUT, (s32 __user *) argp); |
4102 | |
4103 | - case JSIOCGVERSION: |
4104 | - return put_user(JS_VERSION, (__u32 __user *) argp); |
4105 | + case JSIOCGVERSION: |
4106 | + return put_user(JS_VERSION, (__u32 __user *) argp); |
4107 | |
4108 | - case JSIOCGAXES: |
4109 | - return put_user(joydev->nabs, (__u8 __user *) argp); |
4110 | + case JSIOCGAXES: |
4111 | + return put_user(joydev->nabs, (__u8 __user *) argp); |
4112 | |
4113 | - case JSIOCGBUTTONS: |
4114 | - return put_user(joydev->nkey, (__u8 __user *) argp); |
4115 | + case JSIOCGBUTTONS: |
4116 | + return put_user(joydev->nkey, (__u8 __user *) argp); |
4117 | |
4118 | - case JSIOCSCORR: |
4119 | - if (copy_from_user(joydev->corr, argp, |
4120 | - sizeof(joydev->corr[0]) * joydev->nabs)) |
4121 | - return -EFAULT; |
4122 | - for (i = 0; i < joydev->nabs; i++) { |
4123 | - j = joydev->abspam[i]; |
4124 | - joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i); |
4125 | - } |
4126 | - return 0; |
4127 | + case JSIOCSCORR: |
4128 | + if (copy_from_user(joydev->corr, argp, |
4129 | + sizeof(joydev->corr[0]) * joydev->nabs)) |
4130 | + return -EFAULT; |
4131 | |
4132 | - case JSIOCGCORR: |
4133 | - return copy_to_user(argp, joydev->corr, |
4134 | - sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; |
4135 | + for (i = 0; i < joydev->nabs; i++) { |
4136 | + j = joydev->abspam[i]; |
4137 | + joydev->abs[i] = joydev_correct(dev->abs[j], |
4138 | + &joydev->corr[i]); |
4139 | + } |
4140 | + return 0; |
4141 | |
4142 | - case JSIOCSAXMAP: |
4143 | - if (copy_from_user(joydev->abspam, argp, sizeof(__u8) * (ABS_MAX + 1))) |
4144 | - return -EFAULT; |
4145 | - for (i = 0; i < joydev->nabs; i++) { |
4146 | - if (joydev->abspam[i] > ABS_MAX) |
4147 | - return -EINVAL; |
4148 | - joydev->absmap[joydev->abspam[i]] = i; |
4149 | - } |
4150 | - return 0; |
4151 | - |
4152 | - case JSIOCGAXMAP: |
4153 | - return copy_to_user(argp, joydev->abspam, |
4154 | - sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; |
4155 | - |
4156 | - case JSIOCSBTNMAP: |
4157 | - if (copy_from_user(joydev->keypam, argp, sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) |
4158 | + case JSIOCGCORR: |
4159 | + return copy_to_user(argp, joydev->corr, |
4160 | + sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; |
4161 | + |
4162 | + case JSIOCSAXMAP: |
4163 | + if (copy_from_user(joydev->abspam, argp, |
4164 | + sizeof(__u8) * (ABS_MAX + 1))) |
4165 | + return -EFAULT; |
4166 | + |
4167 | + for (i = 0; i < joydev->nabs; i++) { |
4168 | + if (joydev->abspam[i] > ABS_MAX) |
4169 | + return -EINVAL; |
4170 | + joydev->absmap[joydev->abspam[i]] = i; |
4171 | + } |
4172 | + return 0; |
4173 | + |
4174 | + case JSIOCGAXMAP: |
4175 | + return copy_to_user(argp, joydev->abspam, |
4176 | + sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; |
4177 | + |
4178 | + case JSIOCSBTNMAP: |
4179 | + if (copy_from_user(joydev->keypam, argp, |
4180 | + sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) |
4181 | + return -EFAULT; |
4182 | + |
4183 | + for (i = 0; i < joydev->nkey; i++) { |
4184 | + if (joydev->keypam[i] > KEY_MAX || |
4185 | + joydev->keypam[i] < BTN_MISC) |
4186 | + return -EINVAL; |
4187 | + joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; |
4188 | + } |
4189 | + |
4190 | + return 0; |
4191 | + |
4192 | + case JSIOCGBTNMAP: |
4193 | + return copy_to_user(argp, joydev->keypam, |
4194 | + sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; |
4195 | + |
4196 | + default: |
4197 | + if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) { |
4198 | + int len; |
4199 | + if (!dev->name) |
4200 | + return 0; |
4201 | + len = strlen(dev->name) + 1; |
4202 | + if (len > _IOC_SIZE(cmd)) |
4203 | + len = _IOC_SIZE(cmd); |
4204 | + if (copy_to_user(argp, dev->name, len)) |
4205 | return -EFAULT; |
4206 | - for (i = 0; i < joydev->nkey; i++) { |
4207 | - if (joydev->keypam[i] > KEY_MAX || joydev->keypam[i] < BTN_MISC) |
4208 | - return -EINVAL; |
4209 | - joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; |
4210 | - } |
4211 | - return 0; |
4212 | - |
4213 | - case JSIOCGBTNMAP: |
4214 | - return copy_to_user(argp, joydev->keypam, |
4215 | - sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; |
4216 | - |
4217 | - default: |
4218 | - if ((cmd & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) == JSIOCGNAME(0)) { |
4219 | - int len; |
4220 | - if (!dev->name) |
4221 | - return 0; |
4222 | - len = strlen(dev->name) + 1; |
4223 | - if (len > _IOC_SIZE(cmd)) |
4224 | - len = _IOC_SIZE(cmd); |
4225 | - if (copy_to_user(argp, dev->name, len)) |
4226 | - return -EFAULT; |
4227 | - return len; |
4228 | - } |
4229 | + return len; |
4230 | + } |
4231 | } |
4232 | return -EINVAL; |
4233 | } |
4234 | |
4235 | #ifdef CONFIG_COMPAT |
4236 | -static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
4237 | +static long joydev_compat_ioctl(struct file *file, |
4238 | + unsigned int cmd, unsigned long arg) |
4239 | { |
4240 | struct joydev_client *client = file->private_data; |
4241 | struct joydev *joydev = client->joydev; |
4242 | void __user *argp = (void __user *)arg; |
4243 | s32 tmp32; |
4244 | struct JS_DATA_SAVE_TYPE_32 ds32; |
4245 | - int err; |
4246 | + int retval; |
4247 | |
4248 | - if (!joydev->exist) |
4249 | - return -ENODEV; |
4250 | + retval = mutex_lock_interruptible(&joydev->mutex); |
4251 | + if (retval) |
4252 | + return retval; |
4253 | + |
4254 | + if (!joydev->exist) { |
4255 | + retval = -ENODEV; |
4256 | + goto out; |
4257 | + } |
4258 | + |
4259 | + switch (cmd) { |
4260 | |
4261 | - switch(cmd) { |
4262 | case JS_SET_TIMELIMIT: |
4263 | - err = get_user(tmp32, (s32 __user *) arg); |
4264 | - if (err == 0) |
4265 | + retval = get_user(tmp32, (s32 __user *) arg); |
4266 | + if (retval == 0) |
4267 | joydev->glue.JS_TIMELIMIT = tmp32; |
4268 | break; |
4269 | + |
4270 | case JS_GET_TIMELIMIT: |
4271 | tmp32 = joydev->glue.JS_TIMELIMIT; |
4272 | - err = put_user(tmp32, (s32 __user *) arg); |
4273 | + retval = put_user(tmp32, (s32 __user *) arg); |
4274 | break; |
4275 | |
4276 | case JS_SET_ALL: |
4277 | - err = copy_from_user(&ds32, argp, |
4278 | - sizeof(ds32)) ? -EFAULT : 0; |
4279 | - if (err == 0) { |
4280 | + retval = copy_from_user(&ds32, argp, |
4281 | + sizeof(ds32)) ? -EFAULT : 0; |
4282 | + if (retval == 0) { |
4283 | joydev->glue.JS_TIMEOUT = ds32.JS_TIMEOUT; |
4284 | joydev->glue.BUSY = ds32.BUSY; |
4285 | joydev->glue.JS_EXPIRETIME = ds32.JS_EXPIRETIME; |
4286 | @@ -438,55 +610,119 @@ static long joydev_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo |
4287 | ds32.JS_SAVE = joydev->glue.JS_SAVE; |
4288 | ds32.JS_CORR = joydev->glue.JS_CORR; |
4289 | |
4290 | - err = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0; |
4291 | + retval = copy_to_user(argp, &ds32, sizeof(ds32)) ? -EFAULT : 0; |
4292 | break; |
4293 | |
4294 | default: |
4295 | - err = joydev_ioctl_common(joydev, cmd, argp); |
4296 | + retval = joydev_ioctl_common(joydev, cmd, argp); |
4297 | + break; |
4298 | } |
4299 | - return err; |
4300 | + |
4301 | + out: |
4302 | + mutex_unlock(&joydev->mutex); |
4303 | + return retval; |
4304 | } |
4305 | #endif /* CONFIG_COMPAT */ |
4306 | |
4307 | -static int joydev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) |
4308 | +static long joydev_ioctl(struct file *file, |
4309 | + unsigned int cmd, unsigned long arg) |
4310 | { |
4311 | struct joydev_client *client = file->private_data; |
4312 | struct joydev *joydev = client->joydev; |
4313 | void __user *argp = (void __user *)arg; |
4314 | + int retval; |
4315 | |
4316 | - if (!joydev->exist) |
4317 | - return -ENODEV; |
4318 | + retval = mutex_lock_interruptible(&joydev->mutex); |
4319 | + if (retval) |
4320 | + return retval; |
4321 | + |
4322 | + if (!joydev->exist) { |
4323 | + retval = -ENODEV; |
4324 | + goto out; |
4325 | + } |
4326 | + |
4327 | + switch (cmd) { |
4328 | + |
4329 | + case JS_SET_TIMELIMIT: |
4330 | + retval = get_user(joydev->glue.JS_TIMELIMIT, |
4331 | + (long __user *) arg); |
4332 | + break; |
4333 | + |
4334 | + case JS_GET_TIMELIMIT: |
4335 | + retval = put_user(joydev->glue.JS_TIMELIMIT, |
4336 | + (long __user *) arg); |
4337 | + break; |
4338 | + |
4339 | + case JS_SET_ALL: |
4340 | + retval = copy_from_user(&joydev->glue, argp, |
4341 | + sizeof(joydev->glue)) ? -EFAULT: 0; |
4342 | + break; |
4343 | + |
4344 | + case JS_GET_ALL: |
4345 | + retval = copy_to_user(argp, &joydev->glue, |
4346 | + sizeof(joydev->glue)) ? -EFAULT : 0; |
4347 | + break; |
4348 | |
4349 | - switch(cmd) { |
4350 | - case JS_SET_TIMELIMIT: |
4351 | - return get_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg); |
4352 | - case JS_GET_TIMELIMIT: |
4353 | - return put_user(joydev->glue.JS_TIMELIMIT, (long __user *) arg); |
4354 | - case JS_SET_ALL: |
4355 | - return copy_from_user(&joydev->glue, argp, |
4356 | - sizeof(joydev->glue)) ? -EFAULT : 0; |
4357 | - case JS_GET_ALL: |
4358 | - return copy_to_user(argp, &joydev->glue, |
4359 | - sizeof(joydev->glue)) ? -EFAULT : 0; |
4360 | - default: |
4361 | - return joydev_ioctl_common(joydev, cmd, argp); |
4362 | + default: |
4363 | + retval = joydev_ioctl_common(joydev, cmd, argp); |
4364 | + break; |
4365 | } |
4366 | + out: |
4367 | + mutex_unlock(&joydev->mutex); |
4368 | + return retval; |
4369 | } |
4370 | |
4371 | static const struct file_operations joydev_fops = { |
4372 | - .owner = THIS_MODULE, |
4373 | - .read = joydev_read, |
4374 | - .write = joydev_write, |
4375 | - .poll = joydev_poll, |
4376 | - .open = joydev_open, |
4377 | - .release = joydev_release, |
4378 | - .ioctl = joydev_ioctl, |
4379 | + .owner = THIS_MODULE, |
4380 | + .read = joydev_read, |
4381 | + .poll = joydev_poll, |
4382 | + .open = joydev_open, |
4383 | + .release = joydev_release, |
4384 | + .unlocked_ioctl = joydev_ioctl, |
4385 | #ifdef CONFIG_COMPAT |
4386 | - .compat_ioctl = joydev_compat_ioctl, |
4387 | + .compat_ioctl = joydev_compat_ioctl, |
4388 | #endif |
4389 | - .fasync = joydev_fasync, |
4390 | + .fasync = joydev_fasync, |
4391 | }; |
4392 | |
4393 | +static int joydev_install_chrdev(struct joydev *joydev) |
4394 | +{ |
4395 | + joydev_table[joydev->minor] = joydev; |
4396 | + return 0; |
4397 | +} |
4398 | + |
4399 | +static void joydev_remove_chrdev(struct joydev *joydev) |
4400 | +{ |
4401 | + mutex_lock(&joydev_table_mutex); |
4402 | + joydev_table[joydev->minor] = NULL; |
4403 | + mutex_unlock(&joydev_table_mutex); |
4404 | +} |
4405 | + |
4406 | +/* |
4407 | + * Mark device non-existant. This disables writes, ioctls and |
4408 | + * prevents new users from opening the device. Already posted |
4409 | + * blocking reads will stay, however new ones will fail. |
4410 | + */ |
4411 | +static void joydev_mark_dead(struct joydev *joydev) |
4412 | +{ |
4413 | + mutex_lock(&joydev->mutex); |
4414 | + joydev->exist = 0; |
4415 | + mutex_unlock(&joydev->mutex); |
4416 | +} |
4417 | + |
4418 | +static void joydev_cleanup(struct joydev *joydev) |
4419 | +{ |
4420 | + struct input_handle *handle = &joydev->handle; |
4421 | + |
4422 | + joydev_mark_dead(joydev); |
4423 | + joydev_hangup(joydev); |
4424 | + joydev_remove_chrdev(joydev); |
4425 | + |
4426 | + /* joydev is marked dead so noone else accesses joydev->open */ |
4427 | + if (joydev->open) |
4428 | + input_close_device(handle); |
4429 | +} |
4430 | + |
4431 | static int joydev_connect(struct input_handler *handler, struct input_dev *dev, |
4432 | const struct input_device_id *id) |
4433 | { |
4434 | @@ -494,7 +730,10 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, |
4435 | int i, j, t, minor; |
4436 | int error; |
4437 | |
4438 | - for (minor = 0; minor < JOYDEV_MINORS && joydev_table[minor]; minor++); |
4439 | + for (minor = 0; minor < JOYDEV_MINORS; minor++) |
4440 | + if (!joydev_table[minor]) |
4441 | + break; |
4442 | + |
4443 | if (minor == JOYDEV_MINORS) { |
4444 | printk(KERN_ERR "joydev: no more free joydev devices\n"); |
4445 | return -ENFILE; |
4446 | @@ -505,15 +744,19 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, |
4447 | return -ENOMEM; |
4448 | |
4449 | INIT_LIST_HEAD(&joydev->client_list); |
4450 | + spin_lock_init(&joydev->client_lock); |
4451 | + mutex_init(&joydev->mutex); |
4452 | init_waitqueue_head(&joydev->wait); |
4453 | |
4454 | + snprintf(joydev->name, sizeof(joydev->name), "js%d", minor); |
4455 | + joydev->exist = 1; |
4456 | joydev->minor = minor; |
4457 | + |
4458 | joydev->exist = 1; |
4459 | joydev->handle.dev = dev; |
4460 | joydev->handle.name = joydev->name; |
4461 | joydev->handle.handler = handler; |
4462 | joydev->handle.private = joydev; |
4463 | - snprintf(joydev->name, sizeof(joydev->name), "js%d", minor); |
4464 | |
4465 | for (i = 0; i < ABS_MAX + 1; i++) |
4466 | if (test_bit(i, dev->absbit)) { |
4467 | @@ -545,67 +788,65 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, |
4468 | } |
4469 | joydev->corr[i].type = JS_CORR_BROKEN; |
4470 | joydev->corr[i].prec = dev->absfuzz[j]; |
4471 | - joydev->corr[i].coef[0] = (dev->absmax[j] + dev->absmin[j]) / 2 - dev->absflat[j]; |
4472 | - joydev->corr[i].coef[1] = (dev->absmax[j] + dev->absmin[j]) / 2 + dev->absflat[j]; |
4473 | - if (!(t = ((dev->absmax[j] - dev->absmin[j]) / 2 - 2 * dev->absflat[j]))) |
4474 | - continue; |
4475 | - joydev->corr[i].coef[2] = (1 << 29) / t; |
4476 | - joydev->corr[i].coef[3] = (1 << 29) / t; |
4477 | - |
4478 | - joydev->abs[i] = joydev_correct(dev->abs[j], joydev->corr + i); |
4479 | + joydev->corr[i].coef[0] = |
4480 | + (dev->absmax[j] + dev->absmin[j]) / 2 - dev->absflat[j]; |
4481 | + joydev->corr[i].coef[1] = |
4482 | + (dev->absmax[j] + dev->absmin[j]) / 2 + dev->absflat[j]; |
4483 | + |
4484 | + t = (dev->absmax[j] - dev->absmin[j]) / 2 - 2 * dev->absflat[j]; |
4485 | + if (t) { |
4486 | + joydev->corr[i].coef[2] = (1 << 29) / t; |
4487 | + joydev->corr[i].coef[3] = (1 << 29) / t; |
4488 | + |
4489 | + joydev->abs[i] = joydev_correct(dev->abs[j], |
4490 | + joydev->corr + i); |
4491 | + } |
4492 | } |
4493 | |
4494 | - snprintf(joydev->dev.bus_id, sizeof(joydev->dev.bus_id), |
4495 | - "js%d", minor); |
4496 | + strlcpy(joydev->dev.bus_id, joydev->name, sizeof(joydev->dev.bus_id)); |
4497 | + joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor); |
4498 | joydev->dev.class = &input_class; |
4499 | joydev->dev.parent = &dev->dev; |
4500 | - joydev->dev.devt = MKDEV(INPUT_MAJOR, JOYDEV_MINOR_BASE + minor); |
4501 | joydev->dev.release = joydev_free; |
4502 | device_initialize(&joydev->dev); |
4503 | |
4504 | - joydev_table[minor] = joydev; |
4505 | - |
4506 | - error = device_add(&joydev->dev); |
4507 | + error = input_register_handle(&joydev->handle); |
4508 | if (error) |
4509 | goto err_free_joydev; |
4510 | |
4511 | - error = input_register_handle(&joydev->handle); |
4512 | + error = joydev_install_chrdev(joydev); |
4513 | if (error) |
4514 | - goto err_delete_joydev; |
4515 | + goto err_unregister_handle; |
4516 | + |
4517 | + error = device_add(&joydev->dev); |
4518 | + if (error) |
4519 | + goto err_cleanup_joydev; |
4520 | |
4521 | return 0; |
4522 | |
4523 | - err_delete_joydev: |
4524 | - device_del(&joydev->dev); |
4525 | + err_cleanup_joydev: |
4526 | + joydev_cleanup(joydev); |
4527 | + err_unregister_handle: |
4528 | + input_unregister_handle(&joydev->handle); |
4529 | err_free_joydev: |
4530 | put_device(&joydev->dev); |
4531 | return error; |
4532 | } |
4533 | |
4534 | - |
4535 | static void joydev_disconnect(struct input_handle *handle) |
4536 | { |
4537 | struct joydev *joydev = handle->private; |
4538 | - struct joydev_client *client; |
4539 | |
4540 | - input_unregister_handle(handle); |
4541 | device_del(&joydev->dev); |
4542 | - |
4543 | - joydev->exist = 0; |
4544 | - |
4545 | - if (joydev->open) { |
4546 | - input_close_device(handle); |
4547 | - list_for_each_entry(client, &joydev->client_list, node) |
4548 | - kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
4549 | - wake_up_interruptible(&joydev->wait); |
4550 | - } |
4551 | - |
4552 | + joydev_cleanup(joydev); |
4553 | + input_unregister_handle(handle); |
4554 | put_device(&joydev->dev); |
4555 | } |
4556 | |
4557 | static const struct input_device_id joydev_blacklist[] = { |
4558 | { |
4559 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, |
4560 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
4561 | + INPUT_DEVICE_ID_MATCH_KEYBIT, |
4562 | .evbit = { BIT(EV_KEY) }, |
4563 | .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) }, |
4564 | }, /* Avoid itouchpads, touchscreens and tablets */ |
4565 | @@ -614,17 +855,20 @@ static const struct input_device_id joydev_blacklist[] = { |
4566 | |
4567 | static const struct input_device_id joydev_ids[] = { |
4568 | { |
4569 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, |
4570 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
4571 | + INPUT_DEVICE_ID_MATCH_ABSBIT, |
4572 | .evbit = { BIT(EV_ABS) }, |
4573 | .absbit = { BIT(ABS_X) }, |
4574 | }, |
4575 | { |
4576 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, |
4577 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
4578 | + INPUT_DEVICE_ID_MATCH_ABSBIT, |
4579 | .evbit = { BIT(EV_ABS) }, |
4580 | .absbit = { BIT(ABS_WHEEL) }, |
4581 | }, |
4582 | { |
4583 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, |
4584 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
4585 | + INPUT_DEVICE_ID_MATCH_ABSBIT, |
4586 | .evbit = { BIT(EV_ABS) }, |
4587 | .absbit = { BIT(ABS_THROTTLE) }, |
4588 | }, |
4589 | @@ -634,14 +878,14 @@ static const struct input_device_id joydev_ids[] = { |
4590 | MODULE_DEVICE_TABLE(input, joydev_ids); |
4591 | |
4592 | static struct input_handler joydev_handler = { |
4593 | - .event = joydev_event, |
4594 | - .connect = joydev_connect, |
4595 | - .disconnect = joydev_disconnect, |
4596 | - .fops = &joydev_fops, |
4597 | - .minor = JOYDEV_MINOR_BASE, |
4598 | - .name = "joydev", |
4599 | - .id_table = joydev_ids, |
4600 | - .blacklist = joydev_blacklist, |
4601 | + .event = joydev_event, |
4602 | + .connect = joydev_connect, |
4603 | + .disconnect = joydev_disconnect, |
4604 | + .fops = &joydev_fops, |
4605 | + .minor = JOYDEV_MINOR_BASE, |
4606 | + .name = "joydev", |
4607 | + .id_table = joydev_ids, |
4608 | + .blacklist = joydev_blacklist, |
4609 | }; |
4610 | |
4611 | static int __init joydev_init(void) |
4612 | diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c |
4613 | index 9173916..cc36edb 100644 |
4614 | --- a/drivers/input/mousedev.c |
4615 | +++ b/drivers/input/mousedev.c |
4616 | @@ -61,9 +61,11 @@ struct mousedev { |
4617 | int open; |
4618 | int minor; |
4619 | char name[16]; |
4620 | + struct input_handle handle; |
4621 | wait_queue_head_t wait; |
4622 | struct list_head client_list; |
4623 | - struct input_handle handle; |
4624 | + spinlock_t client_lock; /* protects client_list */ |
4625 | + struct mutex mutex; |
4626 | struct device dev; |
4627 | |
4628 | struct list_head mixdev_node; |
4629 | @@ -113,108 +115,137 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; |
4630 | static struct input_handler mousedev_handler; |
4631 | |
4632 | static struct mousedev *mousedev_table[MOUSEDEV_MINORS]; |
4633 | +static DEFINE_MUTEX(mousedev_table_mutex); |
4634 | static struct mousedev *mousedev_mix; |
4635 | static LIST_HEAD(mousedev_mix_list); |
4636 | |
4637 | +static void mixdev_open_devices(void); |
4638 | +static void mixdev_close_devices(void); |
4639 | + |
4640 | #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) |
4641 | #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) |
4642 | |
4643 | -static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) |
4644 | +static void mousedev_touchpad_event(struct input_dev *dev, |
4645 | + struct mousedev *mousedev, |
4646 | + unsigned int code, int value) |
4647 | { |
4648 | int size, tmp; |
4649 | enum { FRACTION_DENOM = 128 }; |
4650 | |
4651 | switch (code) { |
4652 | - case ABS_X: |
4653 | - fx(0) = value; |
4654 | - if (mousedev->touch && mousedev->pkt_count >= 2) { |
4655 | - size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4656 | - if (size == 0) |
4657 | - size = 256 * 2; |
4658 | - tmp = ((value - fx(2)) * (256 * FRACTION_DENOM)) / size; |
4659 | - tmp += mousedev->frac_dx; |
4660 | - mousedev->packet.dx = tmp / FRACTION_DENOM; |
4661 | - mousedev->frac_dx = tmp - mousedev->packet.dx * FRACTION_DENOM; |
4662 | - } |
4663 | - break; |
4664 | |
4665 | - case ABS_Y: |
4666 | - fy(0) = value; |
4667 | - if (mousedev->touch && mousedev->pkt_count >= 2) { |
4668 | - /* use X size to keep the same scale */ |
4669 | - size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4670 | - if (size == 0) |
4671 | - size = 256 * 2; |
4672 | - tmp = -((value - fy(2)) * (256 * FRACTION_DENOM)) / size; |
4673 | - tmp += mousedev->frac_dy; |
4674 | - mousedev->packet.dy = tmp / FRACTION_DENOM; |
4675 | - mousedev->frac_dy = tmp - mousedev->packet.dy * FRACTION_DENOM; |
4676 | - } |
4677 | - break; |
4678 | + case ABS_X: |
4679 | + fx(0) = value; |
4680 | + if (mousedev->touch && mousedev->pkt_count >= 2) { |
4681 | + size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4682 | + if (size == 0) |
4683 | + size = 256 * 2; |
4684 | + tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size; |
4685 | + tmp += mousedev->frac_dx; |
4686 | + mousedev->packet.dx = tmp / FRACTION_DENOM; |
4687 | + mousedev->frac_dx = |
4688 | + tmp - mousedev->packet.dx * FRACTION_DENOM; |
4689 | + } |
4690 | + break; |
4691 | + |
4692 | + case ABS_Y: |
4693 | + fy(0) = value; |
4694 | + if (mousedev->touch && mousedev->pkt_count >= 2) { |
4695 | + /* use X size to keep the same scale */ |
4696 | + size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4697 | + if (size == 0) |
4698 | + size = 256 * 2; |
4699 | + tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size; |
4700 | + tmp += mousedev->frac_dy; |
4701 | + mousedev->packet.dy = tmp / FRACTION_DENOM; |
4702 | + mousedev->frac_dy = tmp - |
4703 | + mousedev->packet.dy * FRACTION_DENOM; |
4704 | + } |
4705 | + break; |
4706 | } |
4707 | } |
4708 | |
4709 | -static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) |
4710 | +static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, |
4711 | + unsigned int code, int value) |
4712 | { |
4713 | int size; |
4714 | |
4715 | switch (code) { |
4716 | - case ABS_X: |
4717 | - size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4718 | - if (size == 0) |
4719 | - size = xres ? : 1; |
4720 | - if (value > dev->absmax[ABS_X]) |
4721 | - value = dev->absmax[ABS_X]; |
4722 | - if (value < dev->absmin[ABS_X]) |
4723 | - value = dev->absmin[ABS_X]; |
4724 | - mousedev->packet.x = ((value - dev->absmin[ABS_X]) * xres) / size; |
4725 | - mousedev->packet.abs_event = 1; |
4726 | - break; |
4727 | |
4728 | - case ABS_Y: |
4729 | - size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y]; |
4730 | - if (size == 0) |
4731 | - size = yres ? : 1; |
4732 | - if (value > dev->absmax[ABS_Y]) |
4733 | - value = dev->absmax[ABS_Y]; |
4734 | - if (value < dev->absmin[ABS_Y]) |
4735 | - value = dev->absmin[ABS_Y]; |
4736 | - mousedev->packet.y = yres - ((value - dev->absmin[ABS_Y]) * yres) / size; |
4737 | - mousedev->packet.abs_event = 1; |
4738 | - break; |
4739 | + case ABS_X: |
4740 | + size = dev->absmax[ABS_X] - dev->absmin[ABS_X]; |
4741 | + if (size == 0) |
4742 | + size = xres ? : 1; |
4743 | + if (value > dev->absmax[ABS_X]) |
4744 | + value = dev->absmax[ABS_X]; |
4745 | + if (value < dev->absmin[ABS_X]) |
4746 | + value = dev->absmin[ABS_X]; |
4747 | + mousedev->packet.x = |
4748 | + ((value - dev->absmin[ABS_X]) * xres) / size; |
4749 | + mousedev->packet.abs_event = 1; |
4750 | + break; |
4751 | + |
4752 | + case ABS_Y: |
4753 | + size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y]; |
4754 | + if (size == 0) |
4755 | + size = yres ? : 1; |
4756 | + if (value > dev->absmax[ABS_Y]) |
4757 | + value = dev->absmax[ABS_Y]; |
4758 | + if (value < dev->absmin[ABS_Y]) |
4759 | + value = dev->absmin[ABS_Y]; |
4760 | + mousedev->packet.y = yres - |
4761 | + ((value - dev->absmin[ABS_Y]) * yres) / size; |
4762 | + mousedev->packet.abs_event = 1; |
4763 | + break; |
4764 | } |
4765 | } |
4766 | |
4767 | -static void mousedev_rel_event(struct mousedev *mousedev, unsigned int code, int value) |
4768 | +static void mousedev_rel_event(struct mousedev *mousedev, |
4769 | + unsigned int code, int value) |
4770 | { |
4771 | switch (code) { |
4772 | - case REL_X: mousedev->packet.dx += value; break; |
4773 | - case REL_Y: mousedev->packet.dy -= value; break; |
4774 | - case REL_WHEEL: mousedev->packet.dz -= value; break; |
4775 | + case REL_X: |
4776 | + mousedev->packet.dx += value; |
4777 | + break; |
4778 | + |
4779 | + case REL_Y: |
4780 | + mousedev->packet.dy -= value; |
4781 | + break; |
4782 | + |
4783 | + case REL_WHEEL: |
4784 | + mousedev->packet.dz -= value; |
4785 | + break; |
4786 | } |
4787 | } |
4788 | |
4789 | -static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int value) |
4790 | +static void mousedev_key_event(struct mousedev *mousedev, |
4791 | + unsigned int code, int value) |
4792 | { |
4793 | int index; |
4794 | |
4795 | switch (code) { |
4796 | - case BTN_TOUCH: |
4797 | - case BTN_0: |
4798 | - case BTN_LEFT: index = 0; break; |
4799 | - case BTN_STYLUS: |
4800 | - case BTN_1: |
4801 | - case BTN_RIGHT: index = 1; break; |
4802 | - case BTN_2: |
4803 | - case BTN_FORWARD: |
4804 | - case BTN_STYLUS2: |
4805 | - case BTN_MIDDLE: index = 2; break; |
4806 | - case BTN_3: |
4807 | - case BTN_BACK: |
4808 | - case BTN_SIDE: index = 3; break; |
4809 | - case BTN_4: |
4810 | - case BTN_EXTRA: index = 4; break; |
4811 | - default: return; |
4812 | + |
4813 | + case BTN_TOUCH: |
4814 | + case BTN_0: |
4815 | + case BTN_LEFT: index = 0; break; |
4816 | + |
4817 | + case BTN_STYLUS: |
4818 | + case BTN_1: |
4819 | + case BTN_RIGHT: index = 1; break; |
4820 | + |
4821 | + case BTN_2: |
4822 | + case BTN_FORWARD: |
4823 | + case BTN_STYLUS2: |
4824 | + case BTN_MIDDLE: index = 2; break; |
4825 | + |
4826 | + case BTN_3: |
4827 | + case BTN_BACK: |
4828 | + case BTN_SIDE: index = 3; break; |
4829 | + |
4830 | + case BTN_4: |
4831 | + case BTN_EXTRA: index = 4; break; |
4832 | + |
4833 | + default: return; |
4834 | } |
4835 | |
4836 | if (value) { |
4837 | @@ -226,19 +257,22 @@ static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int |
4838 | } |
4839 | } |
4840 | |
4841 | -static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_hw_data *packet) |
4842 | +static void mousedev_notify_readers(struct mousedev *mousedev, |
4843 | + struct mousedev_hw_data *packet) |
4844 | { |
4845 | struct mousedev_client *client; |
4846 | struct mousedev_motion *p; |
4847 | - unsigned long flags; |
4848 | + unsigned int new_head; |
4849 | int wake_readers = 0; |
4850 | |
4851 | - list_for_each_entry(client, &mousedev->client_list, node) { |
4852 | - spin_lock_irqsave(&client->packet_lock, flags); |
4853 | + list_for_each_entry_rcu(client, &mousedev->client_list, node) { |
4854 | + |
4855 | + /* Just acquire the lock, interrupts already disabled */ |
4856 | + spin_lock(&client->packet_lock); |
4857 | |
4858 | p = &client->packets[client->head]; |
4859 | if (client->ready && p->buttons != mousedev->packet.buttons) { |
4860 | - unsigned int new_head = (client->head + 1) % PACKET_QUEUE_LEN; |
4861 | + new_head = (client->head + 1) % PACKET_QUEUE_LEN; |
4862 | if (new_head != client->tail) { |
4863 | p = &client->packets[client->head = new_head]; |
4864 | memset(p, 0, sizeof(struct mousedev_motion)); |
4865 | @@ -253,19 +287,22 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h |
4866 | } |
4867 | |
4868 | client->pos_x += packet->dx; |
4869 | - client->pos_x = client->pos_x < 0 ? 0 : (client->pos_x >= xres ? xres : client->pos_x); |
4870 | + client->pos_x = client->pos_x < 0 ? |
4871 | + 0 : (client->pos_x >= xres ? xres : client->pos_x); |
4872 | client->pos_y += packet->dy; |
4873 | - client->pos_y = client->pos_y < 0 ? 0 : (client->pos_y >= yres ? yres : client->pos_y); |
4874 | + client->pos_y = client->pos_y < 0 ? |
4875 | + 0 : (client->pos_y >= yres ? yres : client->pos_y); |
4876 | |
4877 | p->dx += packet->dx; |
4878 | p->dy += packet->dy; |
4879 | p->dz += packet->dz; |
4880 | p->buttons = mousedev->packet.buttons; |
4881 | |
4882 | - if (p->dx || p->dy || p->dz || p->buttons != client->last_buttons) |
4883 | + if (p->dx || p->dy || p->dz || |
4884 | + p->buttons != client->last_buttons) |
4885 | client->ready = 1; |
4886 | |
4887 | - spin_unlock_irqrestore(&client->packet_lock, flags); |
4888 | + spin_unlock(&client->packet_lock); |
4889 | |
4890 | if (client->ready) { |
4891 | kill_fasync(&client->fasync, SIGIO, POLL_IN); |
4892 | @@ -281,7 +318,8 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) |
4893 | { |
4894 | if (!value) { |
4895 | if (mousedev->touch && |
4896 | - time_before(jiffies, mousedev->touch + msecs_to_jiffies(tap_time))) { |
4897 | + time_before(jiffies, |
4898 | + mousedev->touch + msecs_to_jiffies(tap_time))) { |
4899 | /* |
4900 | * Toggle left button to emulate tap. |
4901 | * We rely on the fact that mousedev_mix always has 0 |
4902 | @@ -290,7 +328,8 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) |
4903 | set_bit(0, &mousedev->packet.buttons); |
4904 | set_bit(0, &mousedev_mix->packet.buttons); |
4905 | mousedev_notify_readers(mousedev, &mousedev_mix->packet); |
4906 | - mousedev_notify_readers(mousedev_mix, &mousedev_mix->packet); |
4907 | + mousedev_notify_readers(mousedev_mix, |
4908 | + &mousedev_mix->packet); |
4909 | clear_bit(0, &mousedev->packet.buttons); |
4910 | clear_bit(0, &mousedev_mix->packet.buttons); |
4911 | } |
4912 | @@ -302,54 +341,61 @@ static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) |
4913 | mousedev->touch = jiffies; |
4914 | } |
4915 | |
4916 | -static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) |
4917 | +static void mousedev_event(struct input_handle *handle, |
4918 | + unsigned int type, unsigned int code, int value) |
4919 | { |
4920 | struct mousedev *mousedev = handle->private; |
4921 | |
4922 | switch (type) { |
4923 | - case EV_ABS: |
4924 | - /* Ignore joysticks */ |
4925 | - if (test_bit(BTN_TRIGGER, handle->dev->keybit)) |
4926 | - return; |
4927 | |
4928 | - if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) |
4929 | - mousedev_touchpad_event(handle->dev, mousedev, code, value); |
4930 | - else |
4931 | - mousedev_abs_event(handle->dev, mousedev, code, value); |
4932 | + case EV_ABS: |
4933 | + /* Ignore joysticks */ |
4934 | + if (test_bit(BTN_TRIGGER, handle->dev->keybit)) |
4935 | + return; |
4936 | |
4937 | - break; |
4938 | + if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) |
4939 | + mousedev_touchpad_event(handle->dev, |
4940 | + mousedev, code, value); |
4941 | + else |
4942 | + mousedev_abs_event(handle->dev, mousedev, code, value); |
4943 | |
4944 | - case EV_REL: |
4945 | - mousedev_rel_event(mousedev, code, value); |
4946 | - break; |
4947 | + break; |
4948 | |
4949 | - case EV_KEY: |
4950 | - if (value != 2) { |
4951 | - if (code == BTN_TOUCH && test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) |
4952 | - mousedev_touchpad_touch(mousedev, value); |
4953 | - else |
4954 | - mousedev_key_event(mousedev, code, value); |
4955 | - } |
4956 | - break; |
4957 | + case EV_REL: |
4958 | + mousedev_rel_event(mousedev, code, value); |
4959 | + break; |
4960 | |
4961 | - case EV_SYN: |
4962 | - if (code == SYN_REPORT) { |
4963 | - if (mousedev->touch) { |
4964 | - mousedev->pkt_count++; |
4965 | - /* Input system eats duplicate events, but we need all of them |
4966 | - * to do correct averaging so apply present one forward |
4967 | - */ |
4968 | - fx(0) = fx(1); |
4969 | - fy(0) = fy(1); |
4970 | - } |
4971 | - |
4972 | - mousedev_notify_readers(mousedev, &mousedev->packet); |
4973 | - mousedev_notify_readers(mousedev_mix, &mousedev->packet); |
4974 | - |
4975 | - mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0; |
4976 | - mousedev->packet.abs_event = 0; |
4977 | + case EV_KEY: |
4978 | + if (value != 2) { |
4979 | + if (code == BTN_TOUCH && |
4980 | + test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) |
4981 | + mousedev_touchpad_touch(mousedev, value); |
4982 | + else |
4983 | + mousedev_key_event(mousedev, code, value); |
4984 | + } |
4985 | + break; |
4986 | + |
4987 | + case EV_SYN: |
4988 | + if (code == SYN_REPORT) { |
4989 | + if (mousedev->touch) { |
4990 | + mousedev->pkt_count++; |
4991 | + /* |
4992 | + * Input system eats duplicate events, |
4993 | + * but we need all of them to do correct |
4994 | + * averaging so apply present one forward |
4995 | + */ |
4996 | + fx(0) = fx(1); |
4997 | + fy(0) = fy(1); |
4998 | } |
4999 | - break; |
5000 | + |
5001 | + mousedev_notify_readers(mousedev, &mousedev->packet); |
5002 | + mousedev_notify_readers(mousedev_mix, &mousedev->packet); |
5003 | + |
5004 | + mousedev->packet.dx = mousedev->packet.dy = |
5005 | + mousedev->packet.dz = 0; |
5006 | + mousedev->packet.abs_event = 0; |
5007 | + } |
5008 | + break; |
5009 | } |
5010 | } |
5011 | |
5012 | @@ -367,41 +413,48 @@ static void mousedev_free(struct device *dev) |
5013 | { |
5014 | struct mousedev *mousedev = container_of(dev, struct mousedev, dev); |
5015 | |
5016 | - mousedev_table[mousedev->minor] = NULL; |
5017 | kfree(mousedev); |
5018 | } |
5019 | |
5020 | -static int mixdev_add_device(struct mousedev *mousedev) |
5021 | +static int mousedev_open_device(struct mousedev *mousedev) |
5022 | { |
5023 | - int error; |
5024 | + int retval; |
5025 | |
5026 | - if (mousedev_mix->open) { |
5027 | - error = input_open_device(&mousedev->handle); |
5028 | - if (error) |
5029 | - return error; |
5030 | + retval = mutex_lock_interruptible(&mousedev->mutex); |
5031 | + if (retval) |
5032 | + return retval; |
5033 | |
5034 | - mousedev->open++; |
5035 | - mousedev->mixdev_open = 1; |
5036 | + if (mousedev->minor == MOUSEDEV_MIX) |
5037 | + mixdev_open_devices(); |
5038 | + else if (!mousedev->exist) |
5039 | + retval = -ENODEV; |
5040 | + else if (!mousedev->open++) { |
5041 | + retval = input_open_device(&mousedev->handle); |
5042 | + if (retval) |
5043 | + mousedev->open--; |
5044 | } |
5045 | |
5046 | - get_device(&mousedev->dev); |
5047 | - list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list); |
5048 | - |
5049 | - return 0; |
5050 | + mutex_unlock(&mousedev->mutex); |
5051 | + return retval; |
5052 | } |
5053 | |
5054 | -static void mixdev_remove_device(struct mousedev *mousedev) |
5055 | +static void mousedev_close_device(struct mousedev *mousedev) |
5056 | { |
5057 | - if (mousedev->mixdev_open) { |
5058 | - mousedev->mixdev_open = 0; |
5059 | - if (!--mousedev->open && mousedev->exist) |
5060 | - input_close_device(&mousedev->handle); |
5061 | - } |
5062 | + mutex_lock(&mousedev->mutex); |
5063 | |
5064 | - list_del_init(&mousedev->mixdev_node); |
5065 | - put_device(&mousedev->dev); |
5066 | + if (mousedev->minor == MOUSEDEV_MIX) |
5067 | + mixdev_close_devices(); |
5068 | + else if (mousedev->exist && !--mousedev->open) |
5069 | + input_close_device(&mousedev->handle); |
5070 | + |
5071 | + mutex_unlock(&mousedev->mutex); |
5072 | } |
5073 | |
5074 | +/* |
5075 | + * Open all available devices so they can all be multiplexed in one. |
5076 | + * stream. Note that this function is called with mousedev_mix->mutex |
5077 | + * held. |
5078 | + */ |
5079 | static void mixdev_open_devices(void) |
5080 | { |
5081 | struct mousedev *mousedev; |
5082 | @@ -411,16 +464,19 @@ static void mixdev_open_devices(void) |
5083 | |
5084 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
5085 | if (!mousedev->mixdev_open) { |
5086 | - if (!mousedev->open && mousedev->exist) |
5087 | - if (input_open_device(&mousedev->handle)) |
5088 | - continue; |
5089 | + if (mousedev_open_device(mousedev)) |
5090 | + continue; |
5091 | |
5092 | - mousedev->open++; |
5093 | mousedev->mixdev_open = 1; |
5094 | } |
5095 | } |
5096 | } |
5097 | |
5098 | +/* |
5099 | + * Close all devices that were opened as part of multiplexed |
5100 | + * device. Note that this function is called with mousedev_mix->mutex |
5101 | + * held. |
5102 | + */ |
5103 | static void mixdev_close_devices(void) |
5104 | { |
5105 | struct mousedev *mousedev; |
5106 | @@ -431,33 +487,50 @@ static void mixdev_close_devices(void) |
5107 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
5108 | if (mousedev->mixdev_open) { |
5109 | mousedev->mixdev_open = 0; |
5110 | - if (!--mousedev->open && mousedev->exist) |
5111 | - input_close_device(&mousedev->handle); |
5112 | + mousedev_close_device(mousedev); |
5113 | } |
5114 | } |
5115 | } |
5116 | |
5117 | + |
5118 | +static void mousedev_attach_client(struct mousedev *mousedev, |
5119 | + struct mousedev_client *client) |
5120 | +{ |
5121 | + spin_lock(&mousedev->client_lock); |
5122 | + list_add_tail_rcu(&client->node, &mousedev->client_list); |
5123 | + spin_unlock(&mousedev->client_lock); |
5124 | + /* |
5125 | + * We don't use synchronize_rcu() here because read-side |
5126 | + * critical section is protected by a spinlock (dev->event_lock) |
5127 | + * instead of rcu_read_lock(). |
5128 | + */ |
5129 | + synchronize_sched(); |
5130 | +} |
5131 | + |
5132 | +static void mousedev_detach_client(struct mousedev *mousedev, |
5133 | + struct mousedev_client *client) |
5134 | +{ |
5135 | + spin_lock(&mousedev->client_lock); |
5136 | + list_del_rcu(&client->node); |
5137 | + spin_unlock(&mousedev->client_lock); |
5138 | + synchronize_sched(); |
5139 | +} |
5140 | + |
5141 | static int mousedev_release(struct inode *inode, struct file *file) |
5142 | { |
5143 | struct mousedev_client *client = file->private_data; |
5144 | struct mousedev *mousedev = client->mousedev; |
5145 | |
5146 | mousedev_fasync(-1, file, 0); |
5147 | - |
5148 | - list_del(&client->node); |
5149 | + mousedev_detach_client(mousedev, client); |
5150 | kfree(client); |
5151 | |
5152 | - if (mousedev->minor == MOUSEDEV_MIX) |
5153 | - mixdev_close_devices(); |
5154 | - else if (!--mousedev->open && mousedev->exist) |
5155 | - input_close_device(&mousedev->handle); |
5156 | - |
5157 | + mousedev_close_device(mousedev); |
5158 | put_device(&mousedev->dev); |
5159 | |
5160 | return 0; |
5161 | } |
5162 | |
5163 | - |
5164 | static int mousedev_open(struct inode *inode, struct file *file) |
5165 | { |
5166 | struct mousedev_client *client; |
5167 | @@ -475,12 +548,17 @@ static int mousedev_open(struct inode *inode, struct file *file) |
5168 | if (i >= MOUSEDEV_MINORS) |
5169 | return -ENODEV; |
5170 | |
5171 | + error = mutex_lock_interruptible(&mousedev_table_mutex); |
5172 | + if (error) |
5173 | + return error; |
5174 | mousedev = mousedev_table[i]; |
5175 | + if (mousedev) |
5176 | + get_device(&mousedev->dev); |
5177 | + mutex_unlock(&mousedev_table_mutex); |
5178 | + |
5179 | if (!mousedev) |
5180 | return -ENODEV; |
5181 | |
5182 | - get_device(&mousedev->dev); |
5183 | - |
5184 | client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); |
5185 | if (!client) { |
5186 | error = -ENOMEM; |
5187 | @@ -491,21 +569,17 @@ static int mousedev_open(struct inode *inode, struct file *file) |
5188 | client->pos_x = xres / 2; |
5189 | client->pos_y = yres / 2; |
5190 | client->mousedev = mousedev; |
5191 | - list_add_tail(&client->node, &mousedev->client_list); |
5192 | + mousedev_attach_client(mousedev, client); |
5193 | |
5194 | - if (mousedev->minor == MOUSEDEV_MIX) |
5195 | - mixdev_open_devices(); |
5196 | - else if (!mousedev->open++ && mousedev->exist) { |
5197 | - error = input_open_device(&mousedev->handle); |
5198 | - if (error) |
5199 | - goto err_free_client; |
5200 | - } |
5201 | + error = mousedev_open_device(mousedev); |
5202 | + if (error) |
5203 | + goto err_free_client; |
5204 | |
5205 | file->private_data = client; |
5206 | return 0; |
5207 | |
5208 | err_free_client: |
5209 | - list_del(&client->node); |
5210 | + mousedev_detach_client(mousedev, client); |
5211 | kfree(client); |
5212 | err_put_mousedev: |
5213 | put_device(&mousedev->dev); |
5214 | @@ -517,41 +591,41 @@ static inline int mousedev_limit_delta(int delta, int limit) |
5215 | return delta > limit ? limit : (delta < -limit ? -limit : delta); |
5216 | } |
5217 | |
5218 | -static void mousedev_packet(struct mousedev_client *client, signed char *ps2_data) |
5219 | +static void mousedev_packet(struct mousedev_client *client, |
5220 | + signed char *ps2_data) |
5221 | { |
5222 | - struct mousedev_motion *p; |
5223 | - unsigned long flags; |
5224 | - |
5225 | - spin_lock_irqsave(&client->packet_lock, flags); |
5226 | - p = &client->packets[client->tail]; |
5227 | + struct mousedev_motion *p = &client->packets[client->tail]; |
5228 | |
5229 | - ps2_data[0] = 0x08 | ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); |
5230 | + ps2_data[0] = 0x08 | |
5231 | + ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); |
5232 | ps2_data[1] = mousedev_limit_delta(p->dx, 127); |
5233 | ps2_data[2] = mousedev_limit_delta(p->dy, 127); |
5234 | p->dx -= ps2_data[1]; |
5235 | p->dy -= ps2_data[2]; |
5236 | |
5237 | switch (client->mode) { |
5238 | - case MOUSEDEV_EMUL_EXPS: |
5239 | - ps2_data[3] = mousedev_limit_delta(p->dz, 7); |
5240 | - p->dz -= ps2_data[3]; |
5241 | - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); |
5242 | - client->bufsiz = 4; |
5243 | - break; |
5244 | - |
5245 | - case MOUSEDEV_EMUL_IMPS: |
5246 | - ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); |
5247 | - ps2_data[3] = mousedev_limit_delta(p->dz, 127); |
5248 | - p->dz -= ps2_data[3]; |
5249 | - client->bufsiz = 4; |
5250 | - break; |
5251 | - |
5252 | - case MOUSEDEV_EMUL_PS2: |
5253 | - default: |
5254 | - ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); |
5255 | - p->dz = 0; |
5256 | - client->bufsiz = 3; |
5257 | - break; |
5258 | + case MOUSEDEV_EMUL_EXPS: |
5259 | + ps2_data[3] = mousedev_limit_delta(p->dz, 7); |
5260 | + p->dz -= ps2_data[3]; |
5261 | + ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); |
5262 | + client->bufsiz = 4; |
5263 | + break; |
5264 | + |
5265 | + case MOUSEDEV_EMUL_IMPS: |
5266 | + ps2_data[0] |= |
5267 | + ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); |
5268 | + ps2_data[3] = mousedev_limit_delta(p->dz, 127); |
5269 | + p->dz -= ps2_data[3]; |
5270 | + client->bufsiz = 4; |
5271 | + break; |
5272 | + |
5273 | + case MOUSEDEV_EMUL_PS2: |
5274 | + default: |
5275 | + ps2_data[0] |= |
5276 | + ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); |
5277 | + p->dz = 0; |
5278 | + client->bufsiz = 3; |
5279 | + break; |
5280 | } |
5281 | |
5282 | if (!p->dx && !p->dy && !p->dz) { |
5283 | @@ -561,12 +635,56 @@ static void mousedev_packet(struct mousedev_client *client, signed char *ps2_dat |
5284 | } else |
5285 | client->tail = (client->tail + 1) % PACKET_QUEUE_LEN; |
5286 | } |
5287 | - |
5288 | - spin_unlock_irqrestore(&client->packet_lock, flags); |
5289 | } |
5290 | |
5291 | +static void mousedev_generate_response(struct mousedev_client *client, |
5292 | + int command) |
5293 | +{ |
5294 | + client->ps2[0] = 0xfa; /* ACK */ |
5295 | + |
5296 | + switch (command) { |
5297 | |
5298 | -static ssize_t mousedev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) |
5299 | + case 0xeb: /* Poll */ |
5300 | + mousedev_packet(client, &client->ps2[1]); |
5301 | + client->bufsiz++; /* account for leading ACK */ |
5302 | + break; |
5303 | + |
5304 | + case 0xf2: /* Get ID */ |
5305 | + switch (client->mode) { |
5306 | + case MOUSEDEV_EMUL_PS2: |
5307 | + client->ps2[1] = 0; |
5308 | + break; |
5309 | + case MOUSEDEV_EMUL_IMPS: |
5310 | + client->ps2[1] = 3; |
5311 | + break; |
5312 | + case MOUSEDEV_EMUL_EXPS: |
5313 | + client->ps2[1] = 4; |
5314 | + break; |
5315 | + } |
5316 | + client->bufsiz = 2; |
5317 | + break; |
5318 | + |
5319 | + case 0xe9: /* Get info */ |
5320 | + client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200; |
5321 | + client->bufsiz = 4; |
5322 | + break; |
5323 | + |
5324 | + case 0xff: /* Reset */ |
5325 | + client->impsseq = client->imexseq = 0; |
5326 | + client->mode = MOUSEDEV_EMUL_PS2; |
5327 | + client->ps2[1] = 0xaa; client->ps2[2] = 0x00; |
5328 | + client->bufsiz = 3; |
5329 | + break; |
5330 | + |
5331 | + default: |
5332 | + client->bufsiz = 1; |
5333 | + break; |
5334 | + } |
5335 | + client->buffer = client->bufsiz; |
5336 | +} |
5337 | + |
5338 | +static ssize_t mousedev_write(struct file *file, const char __user *buffer, |
5339 | + size_t count, loff_t *ppos) |
5340 | { |
5341 | struct mousedev_client *client = file->private_data; |
5342 | unsigned char c; |
5343 | @@ -577,6 +695,8 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer, size |
5344 | if (get_user(c, buffer + i)) |
5345 | return -EFAULT; |
5346 | |
5347 | + spin_lock_irq(&client->packet_lock); |
5348 | + |
5349 | if (c == mousedev_imex_seq[client->imexseq]) { |
5350 | if (++client->imexseq == MOUSEDEV_SEQ_LEN) { |
5351 | client->imexseq = 0; |
5352 | @@ -593,68 +713,39 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer, size |
5353 | } else |
5354 | client->impsseq = 0; |
5355 | |
5356 | - client->ps2[0] = 0xfa; |
5357 | - |
5358 | - switch (c) { |
5359 | - |
5360 | - case 0xeb: /* Poll */ |
5361 | - mousedev_packet(client, &client->ps2[1]); |
5362 | - client->bufsiz++; /* account for leading ACK */ |
5363 | - break; |
5364 | - |
5365 | - case 0xf2: /* Get ID */ |
5366 | - switch (client->mode) { |
5367 | - case MOUSEDEV_EMUL_PS2: client->ps2[1] = 0; break; |
5368 | - case MOUSEDEV_EMUL_IMPS: client->ps2[1] = 3; break; |
5369 | - case MOUSEDEV_EMUL_EXPS: client->ps2[1] = 4; break; |
5370 | - } |
5371 | - client->bufsiz = 2; |
5372 | - break; |
5373 | - |
5374 | - case 0xe9: /* Get info */ |
5375 | - client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200; |
5376 | - client->bufsiz = 4; |
5377 | - break; |
5378 | - |
5379 | - case 0xff: /* Reset */ |
5380 | - client->impsseq = client->imexseq = 0; |
5381 | - client->mode = MOUSEDEV_EMUL_PS2; |
5382 | - client->ps2[1] = 0xaa; client->ps2[2] = 0x00; |
5383 | - client->bufsiz = 3; |
5384 | - break; |
5385 | - |
5386 | - default: |
5387 | - client->bufsiz = 1; |
5388 | - break; |
5389 | - } |
5390 | + mousedev_generate_response(client, c); |
5391 | |
5392 | - client->buffer = client->bufsiz; |
5393 | + spin_unlock_irq(&client->packet_lock); |
5394 | } |
5395 | |
5396 | kill_fasync(&client->fasync, SIGIO, POLL_IN); |
5397 | - |
5398 | wake_up_interruptible(&client->mousedev->wait); |
5399 | |
5400 | return count; |
5401 | } |
5402 | |
5403 | -static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) |
5404 | +static ssize_t mousedev_read(struct file *file, char __user *buffer, |
5405 | + size_t count, loff_t *ppos) |
5406 | { |
5407 | struct mousedev_client *client = file->private_data; |
5408 | + struct mousedev *mousedev = client->mousedev; |
5409 | + signed char data[sizeof(client->ps2)]; |
5410 | int retval = 0; |
5411 | |
5412 | - if (!client->ready && !client->buffer && (file->f_flags & O_NONBLOCK)) |
5413 | + if (!client->ready && !client->buffer && mousedev->exist && |
5414 | + (file->f_flags & O_NONBLOCK)) |
5415 | return -EAGAIN; |
5416 | |
5417 | - retval = wait_event_interruptible(client->mousedev->wait, |
5418 | - !client->mousedev->exist || client->ready || client->buffer); |
5419 | - |
5420 | + retval = wait_event_interruptible(mousedev->wait, |
5421 | + !mousedev->exist || client->ready || client->buffer); |
5422 | if (retval) |
5423 | return retval; |
5424 | |
5425 | - if (!client->mousedev->exist) |
5426 | + if (!mousedev->exist) |
5427 | return -ENODEV; |
5428 | |
5429 | + spin_lock_irq(&client->packet_lock); |
5430 | + |
5431 | if (!client->buffer && client->ready) { |
5432 | mousedev_packet(client, client->ps2); |
5433 | client->buffer = client->bufsiz; |
5434 | @@ -663,9 +754,12 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t coun |
5435 | if (count > client->buffer) |
5436 | count = client->buffer; |
5437 | |
5438 | + memcpy(data, client->ps2 + client->bufsiz - client->buffer, count); |
5439 | client->buffer -= count; |
5440 | |
5441 | - if (copy_to_user(buffer, client->ps2 + client->bufsiz - client->buffer - count, count)) |
5442 | + spin_unlock_irq(&client->packet_lock); |
5443 | + |
5444 | + if (copy_to_user(buffer, data, count)) |
5445 | return -EFAULT; |
5446 | |
5447 | return count; |
5448 | @@ -692,6 +786,60 @@ static const struct file_operations mousedev_fops = { |
5449 | .fasync = mousedev_fasync, |
5450 | }; |
5451 | |
5452 | +static int mousedev_install_chrdev(struct mousedev *mousedev) |
5453 | +{ |
5454 | + mousedev_table[mousedev->minor] = mousedev; |
5455 | + return 0; |
5456 | +} |
5457 | + |
5458 | +static void mousedev_remove_chrdev(struct mousedev *mousedev) |
5459 | +{ |
5460 | + mutex_lock(&mousedev_table_mutex); |
5461 | + mousedev_table[mousedev->minor] = NULL; |
5462 | + mutex_unlock(&mousedev_table_mutex); |
5463 | +} |
5464 | + |
5465 | +/* |
5466 | + * Mark device non-existent. This disables writes, ioctls and |
5467 | + * prevents new users from opening the device. Already posted |
5468 | + * blocking reads will stay, however new ones will fail. |
5469 | + */ |
5470 | +static void mousedev_mark_dead(struct mousedev *mousedev) |
5471 | +{ |
5472 | + mutex_lock(&mousedev->mutex); |
5473 | + mousedev->exist = 0; |
5474 | + mutex_unlock(&mousedev->mutex); |
5475 | +} |
5476 | + |
5477 | +/* |
5478 | + * Wake up users waiting for IO so they can disconnect from |
5479 | + * dead device. |
5480 | + */ |
5481 | +static void mousedev_hangup(struct mousedev *mousedev) |
5482 | +{ |
5483 | + struct mousedev_client *client; |
5484 | + |
5485 | + spin_lock(&mousedev->client_lock); |
5486 | + list_for_each_entry(client, &mousedev->client_list, node) |
5487 | + kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
5488 | + spin_unlock(&mousedev->client_lock); |
5489 | + |
5490 | + wake_up_interruptible(&mousedev->wait); |
5491 | +} |
5492 | + |
5493 | +static void mousedev_cleanup(struct mousedev *mousedev) |
5494 | +{ |
5495 | + struct input_handle *handle = &mousedev->handle; |
5496 | + |
5497 | + mousedev_mark_dead(mousedev); |
5498 | + mousedev_hangup(mousedev); |
5499 | + mousedev_remove_chrdev(mousedev); |
5500 | + |
5501 | + /* mousedev is marked dead so no one else accesses mousedev->open */ |
5502 | + if (mousedev->open) |
5503 | + input_close_device(handle); |
5504 | +} |
5505 | + |
5506 | static struct mousedev *mousedev_create(struct input_dev *dev, |
5507 | struct input_handler *handler, |
5508 | int minor) |
5509 | @@ -707,6 +855,10 @@ static struct mousedev *mousedev_create(struct input_dev *dev, |
5510 | |
5511 | INIT_LIST_HEAD(&mousedev->client_list); |
5512 | INIT_LIST_HEAD(&mousedev->mixdev_node); |
5513 | + spin_lock_init(&mousedev->client_lock); |
5514 | + mutex_init(&mousedev->mutex); |
5515 | + lockdep_set_subclass(&mousedev->mutex, |
5516 | + minor == MOUSEDEV_MIX ? MOUSEDEV_MIX : 0); |
5517 | init_waitqueue_head(&mousedev->wait); |
5518 | |
5519 | if (minor == MOUSEDEV_MIX) |
5520 | @@ -731,14 +883,27 @@ static struct mousedev *mousedev_create(struct input_dev *dev, |
5521 | mousedev->dev.release = mousedev_free; |
5522 | device_initialize(&mousedev->dev); |
5523 | |
5524 | - mousedev_table[minor] = mousedev; |
5525 | + if (minor != MOUSEDEV_MIX) { |
5526 | + error = input_register_handle(&mousedev->handle); |
5527 | + if (error) |
5528 | + goto err_free_mousedev; |
5529 | + } |
5530 | + |
5531 | + error = mousedev_install_chrdev(mousedev); |
5532 | + if (error) |
5533 | + goto err_unregister_handle; |
5534 | |
5535 | error = device_add(&mousedev->dev); |
5536 | if (error) |
5537 | - goto err_free_mousedev; |
5538 | + goto err_cleanup_mousedev; |
5539 | |
5540 | return mousedev; |
5541 | |
5542 | + err_cleanup_mousedev: |
5543 | + mousedev_cleanup(mousedev); |
5544 | + err_unregister_handle: |
5545 | + if (minor != MOUSEDEV_MIX) |
5546 | + input_unregister_handle(&mousedev->handle); |
5547 | err_free_mousedev: |
5548 | put_device(&mousedev->dev); |
5549 | err_out: |
5550 | @@ -747,29 +912,64 @@ static struct mousedev *mousedev_create(struct input_dev *dev, |
5551 | |
5552 | static void mousedev_destroy(struct mousedev *mousedev) |
5553 | { |
5554 | - struct mousedev_client *client; |
5555 | - |
5556 | device_del(&mousedev->dev); |
5557 | - mousedev->exist = 0; |
5558 | + mousedev_cleanup(mousedev); |
5559 | + if (mousedev->minor != MOUSEDEV_MIX) |
5560 | + input_unregister_handle(&mousedev->handle); |
5561 | + put_device(&mousedev->dev); |
5562 | +} |
5563 | |
5564 | - if (mousedev->open) { |
5565 | - input_close_device(&mousedev->handle); |
5566 | - list_for_each_entry(client, &mousedev->client_list, node) |
5567 | - kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
5568 | - wake_up_interruptible(&mousedev->wait); |
5569 | +static int mixdev_add_device(struct mousedev *mousedev) |
5570 | +{ |
5571 | + int retval; |
5572 | + |
5573 | + retval = mutex_lock_interruptible(&mousedev_mix->mutex); |
5574 | + if (retval) |
5575 | + return retval; |
5576 | + |
5577 | + if (mousedev_mix->open) { |
5578 | + retval = mousedev_open_device(mousedev); |
5579 | + if (retval) |
5580 | + goto out; |
5581 | + |
5582 | + mousedev->mixdev_open = 1; |
5583 | } |
5584 | |
5585 | + get_device(&mousedev->dev); |
5586 | + list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list); |
5587 | + |
5588 | + out: |
5589 | + mutex_unlock(&mousedev_mix->mutex); |
5590 | + return retval; |
5591 | +} |
5592 | + |
5593 | +static void mixdev_remove_device(struct mousedev *mousedev) |
5594 | +{ |
5595 | + mutex_lock(&mousedev_mix->mutex); |
5596 | + |
5597 | + if (mousedev->mixdev_open) { |
5598 | + mousedev->mixdev_open = 0; |
5599 | + mousedev_close_device(mousedev); |
5600 | + } |
5601 | + |
5602 | + list_del_init(&mousedev->mixdev_node); |
5603 | + mutex_unlock(&mousedev_mix->mutex); |
5604 | + |
5605 | put_device(&mousedev->dev); |
5606 | } |
5607 | |
5608 | -static int mousedev_connect(struct input_handler *handler, struct input_dev *dev, |
5609 | +static int mousedev_connect(struct input_handler *handler, |
5610 | + struct input_dev *dev, |
5611 | const struct input_device_id *id) |
5612 | { |
5613 | struct mousedev *mousedev; |
5614 | int minor; |
5615 | int error; |
5616 | |
5617 | - for (minor = 0; minor < MOUSEDEV_MINORS && mousedev_table[minor]; minor++); |
5618 | + for (minor = 0; minor < MOUSEDEV_MINORS; minor++) |
5619 | + if (!mousedev_table[minor]) |
5620 | + break; |
5621 | + |
5622 | if (minor == MOUSEDEV_MINORS) { |
5623 | printk(KERN_ERR "mousedev: no more free mousedev devices\n"); |
5624 | return -ENFILE; |
5625 | @@ -779,21 +979,13 @@ static int mousedev_connect(struct input_handler *handler, struct input_dev *dev |
5626 | if (IS_ERR(mousedev)) |
5627 | return PTR_ERR(mousedev); |
5628 | |
5629 | - error = input_register_handle(&mousedev->handle); |
5630 | - if (error) |
5631 | - goto err_delete_mousedev; |
5632 | - |
5633 | error = mixdev_add_device(mousedev); |
5634 | - if (error) |
5635 | - goto err_unregister_handle; |
5636 | + if (error) { |
5637 | + mousedev_destroy(mousedev); |
5638 | + return error; |
5639 | + } |
5640 | |
5641 | return 0; |
5642 | - |
5643 | - err_unregister_handle: |
5644 | - input_unregister_handle(&mousedev->handle); |
5645 | - err_delete_mousedev: |
5646 | - device_unregister(&mousedev->dev); |
5647 | - return error; |
5648 | } |
5649 | |
5650 | static void mousedev_disconnect(struct input_handle *handle) |
5651 | @@ -801,33 +993,42 @@ static void mousedev_disconnect(struct input_handle *handle) |
5652 | struct mousedev *mousedev = handle->private; |
5653 | |
5654 | mixdev_remove_device(mousedev); |
5655 | - input_unregister_handle(handle); |
5656 | mousedev_destroy(mousedev); |
5657 | } |
5658 | |
5659 | static const struct input_device_id mousedev_ids[] = { |
5660 | { |
5661 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_RELBIT, |
5662 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
5663 | + INPUT_DEVICE_ID_MATCH_KEYBIT | |
5664 | + INPUT_DEVICE_ID_MATCH_RELBIT, |
5665 | .evbit = { BIT(EV_KEY) | BIT(EV_REL) }, |
5666 | .keybit = { [LONG(BTN_LEFT)] = BIT(BTN_LEFT) }, |
5667 | .relbit = { BIT(REL_X) | BIT(REL_Y) }, |
5668 | - }, /* A mouse like device, at least one button, two relative axes */ |
5669 | + }, /* A mouse like device, at least one button, |
5670 | + two relative axes */ |
5671 | { |
5672 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_RELBIT, |
5673 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
5674 | + INPUT_DEVICE_ID_MATCH_RELBIT, |
5675 | .evbit = { BIT(EV_KEY) | BIT(EV_REL) }, |
5676 | .relbit = { BIT(REL_WHEEL) }, |
5677 | }, /* A separate scrollwheel */ |
5678 | { |
5679 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, |
5680 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
5681 | + INPUT_DEVICE_ID_MATCH_KEYBIT | |
5682 | + INPUT_DEVICE_ID_MATCH_ABSBIT, |
5683 | .evbit = { BIT(EV_KEY) | BIT(EV_ABS) }, |
5684 | .keybit = { [LONG(BTN_TOUCH)] = BIT(BTN_TOUCH) }, |
5685 | .absbit = { BIT(ABS_X) | BIT(ABS_Y) }, |
5686 | - }, /* A tablet like device, at least touch detection, two absolute axes */ |
5687 | + }, /* A tablet like device, at least touch detection, |
5688 | + two absolute axes */ |
5689 | { |
5690 | - .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, |
5691 | + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | |
5692 | + INPUT_DEVICE_ID_MATCH_KEYBIT | |
5693 | + INPUT_DEVICE_ID_MATCH_ABSBIT, |
5694 | .evbit = { BIT(EV_KEY) | BIT(EV_ABS) }, |
5695 | .keybit = { [LONG(BTN_TOOL_FINGER)] = BIT(BTN_TOOL_FINGER) }, |
5696 | - .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) | BIT(ABS_TOOL_WIDTH) }, |
5697 | + .absbit = { BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE) | |
5698 | + BIT(ABS_TOOL_WIDTH) }, |
5699 | }, /* A touchpad */ |
5700 | |
5701 | { }, /* Terminating entry */ |
5702 | diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c |
5703 | index d2f882e..1202334 100644 |
5704 | --- a/drivers/input/tsdev.c |
5705 | +++ b/drivers/input/tsdev.c |
5706 | @@ -112,6 +112,8 @@ struct tsdev { |
5707 | struct input_handle handle; |
5708 | wait_queue_head_t wait; |
5709 | struct list_head client_list; |
5710 | + spinlock_t client_lock; /* protects client_list */ |
5711 | + struct mutex mutex; |
5712 | struct device dev; |
5713 | |
5714 | int x, y, pressure; |
5715 | @@ -122,8 +124,9 @@ struct tsdev_client { |
5716 | struct fasync_struct *fasync; |
5717 | struct list_head node; |
5718 | struct tsdev *tsdev; |
5719 | + struct ts_event buffer[TSDEV_BUFFER_SIZE]; |
5720 | int head, tail; |
5721 | - struct ts_event event[TSDEV_BUFFER_SIZE]; |
5722 | + spinlock_t buffer_lock; /* protects access to buffer, head and tail */ |
5723 | int raw; |
5724 | }; |
5725 | |
5726 | @@ -137,6 +140,7 @@ struct tsdev_client { |
5727 | #define TS_SET_CAL _IOW(IOC_H3600_TS_MAGIC, 11, struct ts_calibration) |
5728 | |
5729 | static struct tsdev *tsdev_table[TSDEV_MINORS/2]; |
5730 | +static DEFINE_MUTEX(tsdev_table_mutex); |
5731 | |
5732 | static int tsdev_fasync(int fd, struct file *file, int on) |
5733 | { |
5734 | @@ -144,9 +148,94 @@ static int tsdev_fasync(int fd, struct file *file, int on) |
5735 | int retval; |
5736 | |
5737 | retval = fasync_helper(fd, file, on, &client->fasync); |
5738 | + |
5739 | return retval < 0 ? retval : 0; |
5740 | } |
5741 | |
5742 | +static void tsdev_free(struct device *dev) |
5743 | +{ |
5744 | + struct tsdev *tsdev = container_of(dev, struct tsdev, dev); |
5745 | + |
5746 | + kfree(tsdev); |
5747 | +} |
5748 | + |
5749 | +static void tsdev_attach_client(struct tsdev *tsdev, struct tsdev_client *client) |
5750 | +{ |
5751 | + spin_lock(&tsdev->client_lock); |
5752 | + list_add_tail_rcu(&client->node, &tsdev->client_list); |
5753 | + spin_unlock(&tsdev->client_lock); |
5754 | + synchronize_sched(); |
5755 | +} |
5756 | + |
5757 | +static void tsdev_detach_client(struct tsdev *tsdev, struct tsdev_client *client) |
5758 | +{ |
5759 | + spin_lock(&tsdev->client_lock); |
5760 | + list_del_rcu(&client->node); |
5761 | + spin_unlock(&tsdev->client_lock); |
5762 | + synchronize_sched(); |
5763 | +} |
5764 | + |
5765 | +static int tsdev_open_device(struct tsdev *tsdev) |
5766 | +{ |
5767 | + int retval; |
5768 | + |
5769 | + retval = mutex_lock_interruptible(&tsdev->mutex); |
5770 | + if (retval) |
5771 | + return retval; |
5772 | + |
5773 | + if (!tsdev->exist) |
5774 | + retval = -ENODEV; |
5775 | + else if (!tsdev->open++) { |
5776 | + retval = input_open_device(&tsdev->handle); |
5777 | + if (retval) |
5778 | + tsdev->open--; |
5779 | + } |
5780 | + |
5781 | + mutex_unlock(&tsdev->mutex); |
5782 | + return retval; |
5783 | +} |
5784 | + |
5785 | +static void tsdev_close_device(struct tsdev *tsdev) |
5786 | +{ |
5787 | + mutex_lock(&tsdev->mutex); |
5788 | + |
5789 | + if (tsdev->exist && !--tsdev->open) |
5790 | + input_close_device(&tsdev->handle); |
5791 | + |
5792 | + mutex_unlock(&tsdev->mutex); |
5793 | +} |
5794 | + |
5795 | +/* |
5796 | + * Wake up users waiting for IO so they can disconnect from |
5797 | + * dead device. |
5798 | + */ |
5799 | +static void tsdev_hangup(struct tsdev *tsdev) |
5800 | +{ |
5801 | + struct tsdev_client *client; |
5802 | + |
5803 | + spin_lock(&tsdev->client_lock); |
5804 | + list_for_each_entry(client, &tsdev->client_list, node) |
5805 | + kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
5806 | + spin_unlock(&tsdev->client_lock); |
5807 | + |
5808 | + wake_up_interruptible(&tsdev->wait); |
5809 | +} |
5810 | + |
5811 | +static int tsdev_release(struct inode *inode, struct file *file) |
5812 | +{ |
5813 | + struct tsdev_client *client = file->private_data; |
5814 | + struct tsdev *tsdev = client->tsdev; |
5815 | + |
5816 | + tsdev_fasync(-1, file, 0); |
5817 | + tsdev_detach_client(tsdev, client); |
5818 | + kfree(client); |
5819 | + |
5820 | + tsdev_close_device(tsdev); |
5821 | + put_device(&tsdev->dev); |
5822 | + |
5823 | + return 0; |
5824 | +} |
5825 | + |
5826 | static int tsdev_open(struct inode *inode, struct file *file) |
5827 | { |
5828 | int i = iminor(inode) - TSDEV_MINOR_BASE; |
5829 | @@ -161,11 +250,16 @@ static int tsdev_open(struct inode *inode, struct file *file) |
5830 | if (i >= TSDEV_MINORS) |
5831 | return -ENODEV; |
5832 | |
5833 | + error = mutex_lock_interruptible(&tsdev_table_mutex); |
5834 | + if (error) |
5835 | + return error; |
5836 | tsdev = tsdev_table[i & TSDEV_MINOR_MASK]; |
5837 | - if (!tsdev || !tsdev->exist) |
5838 | - return -ENODEV; |
5839 | + if (tsdev) |
5840 | + get_device(&tsdev->dev); |
5841 | + mutex_unlock(&tsdev_table_mutex); |
5842 | |
5843 | - get_device(&tsdev->dev); |
5844 | + if (!tsdev) |
5845 | + return -ENODEV; |
5846 | |
5847 | client = kzalloc(sizeof(struct tsdev_client), GFP_KERNEL); |
5848 | if (!client) { |
5849 | @@ -173,51 +267,42 @@ static int tsdev_open(struct inode *inode, struct file *file) |
5850 | goto err_put_tsdev; |
5851 | } |
5852 | |
5853 | + spin_lock_init(&client->buffer_lock); |
5854 | client->tsdev = tsdev; |
5855 | - client->raw = (i >= TSDEV_MINORS / 2) ? 1 : 0; |
5856 | - list_add_tail(&client->node, &tsdev->client_list); |
5857 | + client->raw = i >= TSDEV_MINORS / 2; |
5858 | + tsdev_attach_client(tsdev, client); |
5859 | |
5860 | - if (!tsdev->open++ && tsdev->exist) { |
5861 | - error = input_open_device(&tsdev->handle); |
5862 | - if (error) |
5863 | - goto err_free_client; |
5864 | - } |
5865 | + error = tsdev_open_device(tsdev); |
5866 | + if (error) |
5867 | + goto err_free_client; |
5868 | |
5869 | file->private_data = client; |
5870 | return 0; |
5871 | |
5872 | err_free_client: |
5873 | - list_del(&client->node); |
5874 | + tsdev_detach_client(tsdev, client); |
5875 | kfree(client); |
5876 | err_put_tsdev: |
5877 | put_device(&tsdev->dev); |
5878 | return error; |
5879 | } |
5880 | |
5881 | -static void tsdev_free(struct device *dev) |
5882 | -{ |
5883 | - struct tsdev *tsdev = container_of(dev, struct tsdev, dev); |
5884 | - |
5885 | - tsdev_table[tsdev->minor] = NULL; |
5886 | - kfree(tsdev); |
5887 | -} |
5888 | - |
5889 | -static int tsdev_release(struct inode *inode, struct file *file) |
5890 | +static int tsdev_fetch_next_event(struct tsdev_client *client, |
5891 | + struct ts_event *event) |
5892 | { |
5893 | - struct tsdev_client *client = file->private_data; |
5894 | - struct tsdev *tsdev = client->tsdev; |
5895 | + int have_event; |
5896 | |
5897 | - tsdev_fasync(-1, file, 0); |
5898 | - |
5899 | - list_del(&client->node); |
5900 | - kfree(client); |
5901 | + spin_lock_irq(&client->buffer_lock); |
5902 | |
5903 | - if (!--tsdev->open && tsdev->exist) |
5904 | - input_close_device(&tsdev->handle); |
5905 | + have_event = client->head != client->tail; |
5906 | + if (have_event) { |
5907 | + *event = client->buffer[client->tail++]; |
5908 | + client->tail &= TSDEV_BUFFER_SIZE - 1; |
5909 | + } |
5910 | |
5911 | - put_device(&tsdev->dev); |
5912 | + spin_unlock_irq(&client->buffer_lock); |
5913 | |
5914 | - return 0; |
5915 | + return have_event; |
5916 | } |
5917 | |
5918 | static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count, |
5919 | @@ -225,9 +310,11 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count, |
5920 | { |
5921 | struct tsdev_client *client = file->private_data; |
5922 | struct tsdev *tsdev = client->tsdev; |
5923 | - int retval = 0; |
5924 | + struct ts_event event; |
5925 | + int retval; |
5926 | |
5927 | - if (client->head == client->tail && tsdev->exist && (file->f_flags & O_NONBLOCK)) |
5928 | + if (client->head == client->tail && tsdev->exist && |
5929 | + (file->f_flags & O_NONBLOCK)) |
5930 | return -EAGAIN; |
5931 | |
5932 | retval = wait_event_interruptible(tsdev->wait, |
5933 | @@ -238,13 +325,14 @@ static ssize_t tsdev_read(struct file *file, char __user *buffer, size_t count, |
5934 | if (!tsdev->exist) |
5935 | return -ENODEV; |
5936 | |
5937 | - while (client->head != client->tail && |
5938 | - retval + sizeof (struct ts_event) <= count) { |
5939 | - if (copy_to_user (buffer + retval, client->event + client->tail, |
5940 | - sizeof (struct ts_event))) |
5941 | + while (retval + sizeof(struct ts_event) <= count && |
5942 | + tsdev_fetch_next_event(client, &event)) { |
5943 | + |
5944 | + if (copy_to_user(buffer + retval, &event, |
5945 | + sizeof(struct ts_event))) |
5946 | return -EFAULT; |
5947 | - client->tail = (client->tail + 1) & (TSDEV_BUFFER_SIZE - 1); |
5948 | - retval += sizeof (struct ts_event); |
5949 | + |
5950 | + retval += sizeof(struct ts_event); |
5951 | } |
5952 | |
5953 | return retval; |
5954 | @@ -261,14 +349,23 @@ static unsigned int tsdev_poll(struct file *file, poll_table *wait) |
5955 | (tsdev->exist ? 0 : (POLLHUP | POLLERR)); |
5956 | } |
5957 | |
5958 | -static int tsdev_ioctl(struct inode *inode, struct file *file, |
5959 | - unsigned int cmd, unsigned long arg) |
5960 | +static long tsdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
5961 | { |
5962 | struct tsdev_client *client = file->private_data; |
5963 | struct tsdev *tsdev = client->tsdev; |
5964 | int retval = 0; |
5965 | |
5966 | + retval = mutex_lock_interruptible(&tsdev->mutex); |
5967 | + if (retval) |
5968 | + return retval; |
5969 | + |
5970 | + if (!tsdev->exist) { |
5971 | + retval = -ENODEV; |
5972 | + goto out; |
5973 | + } |
5974 | + |
5975 | switch (cmd) { |
5976 | + |
5977 | case TS_GET_CAL: |
5978 | if (copy_to_user((void __user *)arg, &tsdev->cal, |
5979 | sizeof (struct ts_calibration))) |
5980 | @@ -277,7 +374,7 @@ static int tsdev_ioctl(struct inode *inode, struct file *file, |
5981 | |
5982 | case TS_SET_CAL: |
5983 | if (copy_from_user(&tsdev->cal, (void __user *)arg, |
5984 | - sizeof (struct ts_calibration))) |
5985 | + sizeof(struct ts_calibration))) |
5986 | retval = -EFAULT; |
5987 | break; |
5988 | |
5989 | @@ -286,29 +383,79 @@ static int tsdev_ioctl(struct inode *inode, struct file *file, |
5990 | break; |
5991 | } |
5992 | |
5993 | + out: |
5994 | + mutex_unlock(&tsdev->mutex); |
5995 | return retval; |
5996 | } |
5997 | |
5998 | static const struct file_operations tsdev_fops = { |
5999 | - .owner = THIS_MODULE, |
6000 | - .open = tsdev_open, |
6001 | - .release = tsdev_release, |
6002 | - .read = tsdev_read, |
6003 | - .poll = tsdev_poll, |
6004 | - .fasync = tsdev_fasync, |
6005 | - .ioctl = tsdev_ioctl, |
6006 | + .owner = THIS_MODULE, |
6007 | + .open = tsdev_open, |
6008 | + .release = tsdev_release, |
6009 | + .read = tsdev_read, |
6010 | + .poll = tsdev_poll, |
6011 | + .fasync = tsdev_fasync, |
6012 | + .unlocked_ioctl = tsdev_ioctl, |
6013 | }; |
6014 | |
6015 | +static void tsdev_pass_event(struct tsdev *tsdev, struct tsdev_client *client, |
6016 | + int x, int y, int pressure, int millisecs) |
6017 | +{ |
6018 | + struct ts_event *event; |
6019 | + int tmp; |
6020 | + |
6021 | + /* Interrupts are already disabled, just acquire the lock */ |
6022 | + spin_lock(&client->buffer_lock); |
6023 | + |
6024 | + event = &client->buffer[client->head++]; |
6025 | + client->head &= TSDEV_BUFFER_SIZE - 1; |
6026 | + |
6027 | + /* Calibration */ |
6028 | + if (!client->raw) { |
6029 | + x = ((x * tsdev->cal.xscale) >> 8) + tsdev->cal.xtrans; |
6030 | + y = ((y * tsdev->cal.yscale) >> 8) + tsdev->cal.ytrans; |
6031 | + if (tsdev->cal.xyswap) { |
6032 | + tmp = x; x = y; y = tmp; |
6033 | + } |
6034 | + } |
6035 | + |
6036 | + event->millisecs = millisecs; |
6037 | + event->x = x; |
6038 | + event->y = y; |
6039 | + event->pressure = pressure; |
6040 | + |
6041 | + spin_unlock(&client->buffer_lock); |
6042 | + |
6043 | + kill_fasync(&client->fasync, SIGIO, POLL_IN); |
6044 | +} |
6045 | + |
6046 | +static void tsdev_distribute_event(struct tsdev *tsdev) |
6047 | +{ |
6048 | + struct tsdev_client *client; |
6049 | + struct timeval time; |
6050 | + int millisecs; |
6051 | + |
6052 | + do_gettimeofday(&time); |
6053 | + millisecs = time.tv_usec / 1000; |
6054 | + |
6055 | + list_for_each_entry_rcu(client, &tsdev->client_list, node) |
6056 | + tsdev_pass_event(tsdev, client, |
6057 | + tsdev->x, tsdev->y, |
6058 | + tsdev->pressure, millisecs); |
6059 | +} |
6060 | + |
6061 | static void tsdev_event(struct input_handle *handle, unsigned int type, |
6062 | unsigned int code, int value) |
6063 | { |
6064 | struct tsdev *tsdev = handle->private; |
6065 | - struct tsdev_client *client; |
6066 | - struct timeval time; |
6067 | + struct input_dev *dev = handle->dev; |
6068 | + int wake_up_readers = 0; |
6069 | |
6070 | switch (type) { |
6071 | + |
6072 | case EV_ABS: |
6073 | switch (code) { |
6074 | + |
6075 | case ABS_X: |
6076 | tsdev->x = value; |
6077 | break; |
6078 | @@ -318,9 +465,9 @@ static void tsdev_event(struct input_handle *handle, unsigned int type, |
6079 | break; |
6080 | |
6081 | case ABS_PRESSURE: |
6082 | - if (value > handle->dev->absmax[ABS_PRESSURE]) |
6083 | - value = handle->dev->absmax[ABS_PRESSURE]; |
6084 | - value -= handle->dev->absmin[ABS_PRESSURE]; |
6085 | + if (value > dev->absmax[ABS_PRESSURE]) |
6086 | + value = dev->absmax[ABS_PRESSURE]; |
6087 | + value -= dev->absmin[ABS_PRESSURE]; |
6088 | if (value < 0) |
6089 | value = 0; |
6090 | tsdev->pressure = value; |
6091 | @@ -330,6 +477,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type, |
6092 | |
6093 | case EV_REL: |
6094 | switch (code) { |
6095 | + |
6096 | case REL_X: |
6097 | tsdev->x += value; |
6098 | if (tsdev->x < 0) |
6099 | @@ -351,6 +499,7 @@ static void tsdev_event(struct input_handle *handle, unsigned int type, |
6100 | case EV_KEY: |
6101 | if (code == BTN_TOUCH || code == BTN_MOUSE) { |
6102 | switch (value) { |
6103 | + |
6104 | case 0: |
6105 | tsdev->pressure = 0; |
6106 | break; |
6107 | @@ -362,49 +511,71 @@ static void tsdev_event(struct input_handle *handle, unsigned int type, |
6108 | } |
6109 | } |
6110 | break; |
6111 | + |
6112 | + case EV_SYN: |
6113 | + if (code == SYN_REPORT) { |
6114 | + tsdev_distribute_event(tsdev); |
6115 | + wake_up_readers = 1; |
6116 | + } |
6117 | + break; |
6118 | } |
6119 | |
6120 | - if (type != EV_SYN || code != SYN_REPORT) |
6121 | - return; |
6122 | + if (wake_up_readers) |
6123 | + wake_up_interruptible(&tsdev->wait); |
6124 | +} |
6125 | + |
6126 | +static int tsdev_install_chrdev(struct tsdev *tsdev) |
6127 | +{ |
6128 | + tsdev_table[tsdev->minor] = tsdev; |
6129 | + return 0; |
6130 | +} |
6131 | |
6132 | - list_for_each_entry(client, &tsdev->client_list, node) { |
6133 | - int x, y, tmp; |
6134 | +static void tsdev_remove_chrdev(struct tsdev *tsdev) |
6135 | +{ |
6136 | + mutex_lock(&tsdev_table_mutex); |
6137 | + tsdev_table[tsdev->minor] = NULL; |
6138 | + mutex_unlock(&tsdev_table_mutex); |
6139 | +} |
6140 | |
6141 | - do_gettimeofday(&time); |
6142 | - client->event[client->head].millisecs = time.tv_usec / 1000; |
6143 | - client->event[client->head].pressure = tsdev->pressure; |
6144 | +/* |
6145 | + * Mark device non-existant. This disables writes, ioctls and |
6146 | + * prevents new users from opening the device. Already posted |
6147 | + * blocking reads will stay, however new ones will fail. |
6148 | + */ |
6149 | +static void tsdev_mark_dead(struct tsdev *tsdev) |
6150 | +{ |
6151 | + mutex_lock(&tsdev->mutex); |
6152 | + tsdev->exist = 0; |
6153 | + mutex_unlock(&tsdev->mutex); |
6154 | +} |
6155 | |
6156 | - x = tsdev->x; |
6157 | - y = tsdev->y; |
6158 | +static void tsdev_cleanup(struct tsdev *tsdev) |
6159 | +{ |
6160 | + struct input_handle *handle = &tsdev->handle; |
6161 | |
6162 | - /* Calibration */ |
6163 | - if (!client->raw) { |
6164 | - x = ((x * tsdev->cal.xscale) >> 8) + tsdev->cal.xtrans; |
6165 | - y = ((y * tsdev->cal.yscale) >> 8) + tsdev->cal.ytrans; |
6166 | - if (tsdev->cal.xyswap) { |
6167 | - tmp = x; x = y; y = tmp; |
6168 | - } |
6169 | - } |
6170 | + tsdev_mark_dead(tsdev); |
6171 | + tsdev_hangup(tsdev); |
6172 | + tsdev_remove_chrdev(tsdev); |
6173 | |
6174 | - client->event[client->head].x = x; |
6175 | - client->event[client->head].y = y; |
6176 | - client->head = (client->head + 1) & (TSDEV_BUFFER_SIZE - 1); |
6177 | - kill_fasync(&client->fasync, SIGIO, POLL_IN); |
6178 | - } |
6179 | - wake_up_interruptible(&tsdev->wait); |
6180 | + /* tsdev is marked dead so noone else accesses tsdev->open */ |
6181 | + if (tsdev->open) |
6182 | + input_close_device(handle); |
6183 | } |
6184 | |
6185 | static int tsdev_connect(struct input_handler *handler, struct input_dev *dev, |
6186 | const struct input_device_id *id) |
6187 | { |
6188 | struct tsdev *tsdev; |
6189 | - int minor, delta; |
6190 | + int delta; |
6191 | + int minor; |
6192 | int error; |
6193 | |
6194 | - for (minor = 0; minor < TSDEV_MINORS / 2 && tsdev_table[minor]; minor++); |
6195 | - if (minor >= TSDEV_MINORS / 2) { |
6196 | - printk(KERN_ERR |
6197 | - "tsdev: You have way too many touchscreens\n"); |
6198 | + for (minor = 0; minor < TSDEV_MINORS / 2; minor++) |
6199 | + if (!tsdev_table[minor]) |
6200 | + break; |
6201 | + |
6202 | + if (minor == TSDEV_MINORS) { |
6203 | + printk(KERN_ERR "tsdev: no more free tsdev devices\n"); |
6204 | return -ENFILE; |
6205 | } |
6206 | |
6207 | @@ -413,15 +584,18 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev, |
6208 | return -ENOMEM; |
6209 | |
6210 | INIT_LIST_HEAD(&tsdev->client_list); |
6211 | + spin_lock_init(&tsdev->client_lock); |
6212 | + mutex_init(&tsdev->mutex); |
6213 | init_waitqueue_head(&tsdev->wait); |
6214 | |
6215 | + snprintf(tsdev->name, sizeof(tsdev->name), "ts%d", minor); |
6216 | tsdev->exist = 1; |
6217 | tsdev->minor = minor; |
6218 | + |
6219 | tsdev->handle.dev = dev; |
6220 | tsdev->handle.name = tsdev->name; |
6221 | tsdev->handle.handler = handler; |
6222 | tsdev->handle.private = tsdev; |
6223 | - snprintf(tsdev->name, sizeof(tsdev->name), "ts%d", minor); |
6224 | |
6225 | /* Precompute the rough calibration matrix */ |
6226 | delta = dev->absmax [ABS_X] - dev->absmin [ABS_X] + 1; |
6227 | @@ -436,28 +610,31 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev, |
6228 | tsdev->cal.yscale = (yres << 8) / delta; |
6229 | tsdev->cal.ytrans = - ((dev->absmin [ABS_Y] * tsdev->cal.yscale) >> 8); |
6230 | |
6231 | - snprintf(tsdev->dev.bus_id, sizeof(tsdev->dev.bus_id), |
6232 | - "ts%d", minor); |
6233 | + strlcpy(tsdev->dev.bus_id, tsdev->name, sizeof(tsdev->dev.bus_id)); |
6234 | + tsdev->dev.devt = MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor); |
6235 | tsdev->dev.class = &input_class; |
6236 | tsdev->dev.parent = &dev->dev; |
6237 | - tsdev->dev.devt = MKDEV(INPUT_MAJOR, TSDEV_MINOR_BASE + minor); |
6238 | tsdev->dev.release = tsdev_free; |
6239 | device_initialize(&tsdev->dev); |
6240 | |
6241 | - tsdev_table[minor] = tsdev; |
6242 | - |
6243 | - error = device_add(&tsdev->dev); |
6244 | + error = input_register_handle(&tsdev->handle); |
6245 | if (error) |
6246 | goto err_free_tsdev; |
6247 | |
6248 | - error = input_register_handle(&tsdev->handle); |
6249 | + error = tsdev_install_chrdev(tsdev); |
6250 | if (error) |
6251 | - goto err_delete_tsdev; |
6252 | + goto err_unregister_handle; |
6253 | + |
6254 | + error = device_add(&tsdev->dev); |
6255 | + if (error) |
6256 | + goto err_cleanup_tsdev; |
6257 | |
6258 | return 0; |
6259 | |
6260 | - err_delete_tsdev: |
6261 | - device_del(&tsdev->dev); |
6262 | + err_cleanup_tsdev: |
6263 | + tsdev_cleanup(tsdev); |
6264 | + err_unregister_handle: |
6265 | + input_unregister_handle(&tsdev->handle); |
6266 | err_free_tsdev: |
6267 | put_device(&tsdev->dev); |
6268 | return error; |
6269 | @@ -466,20 +643,10 @@ static int tsdev_connect(struct input_handler *handler, struct input_dev *dev, |
6270 | static void tsdev_disconnect(struct input_handle *handle) |
6271 | { |
6272 | struct tsdev *tsdev = handle->private; |
6273 | - struct tsdev_client *client; |
6274 | |
6275 | - input_unregister_handle(handle); |
6276 | device_del(&tsdev->dev); |
6277 | - |
6278 | - tsdev->exist = 0; |
6279 | - |
6280 | - if (tsdev->open) { |
6281 | - input_close_device(handle); |
6282 | - list_for_each_entry(client, &tsdev->client_list, node) |
6283 | - kill_fasync(&client->fasync, SIGIO, POLL_HUP); |
6284 | - wake_up_interruptible(&tsdev->wait); |
6285 | - } |
6286 | - |
6287 | + tsdev_cleanup(tsdev); |
6288 | + input_unregister_handle(handle); |
6289 | put_device(&tsdev->dev); |
6290 | } |
6291 | |
6292 | @@ -510,13 +677,13 @@ static const struct input_device_id tsdev_ids[] = { |
6293 | MODULE_DEVICE_TABLE(input, tsdev_ids); |
6294 | |
6295 | static struct input_handler tsdev_handler = { |
6296 | - .event = tsdev_event, |
6297 | - .connect = tsdev_connect, |
6298 | - .disconnect = tsdev_disconnect, |
6299 | - .fops = &tsdev_fops, |
6300 | - .minor = TSDEV_MINOR_BASE, |
6301 | - .name = "tsdev", |
6302 | - .id_table = tsdev_ids, |
6303 | + .event = tsdev_event, |
6304 | + .connect = tsdev_connect, |
6305 | + .disconnect = tsdev_disconnect, |
6306 | + .fops = &tsdev_fops, |
6307 | + .minor = TSDEV_MINOR_BASE, |
6308 | + .name = "tsdev", |
6309 | + .id_table = tsdev_ids, |
6310 | }; |
6311 | |
6312 | static int __init tsdev_init(void) |
6313 | diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c |
6314 | index 23b6f7b..f65b7f9 100644 |
6315 | --- a/drivers/isdn/capi/capidrv.c |
6316 | +++ b/drivers/isdn/capi/capidrv.c |
6317 | @@ -2306,13 +2306,14 @@ static int __init capidrv_init(void) |
6318 | |
6319 | static void __exit capidrv_exit(void) |
6320 | { |
6321 | - char rev[10]; |
6322 | + char rev[32]; |
6323 | char *p; |
6324 | |
6325 | if ((p = strchr(revision, ':')) != 0) { |
6326 | - strcpy(rev, p + 1); |
6327 | - p = strchr(rev, '$'); |
6328 | - *p = 0; |
6329 | + strncpy(rev, p + 1, sizeof(rev)); |
6330 | + rev[sizeof(rev)-1] = 0; |
6331 | + if ((p = strchr(rev, '$')) != 0) |
6332 | + *p = 0; |
6333 | } else { |
6334 | strcpy(rev, " ??? "); |
6335 | } |
6336 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
6337 | index bdc52d6..ba2e135 100644 |
6338 | --- a/drivers/md/dm-crypt.c |
6339 | +++ b/drivers/md/dm-crypt.c |
6340 | @@ -399,7 +399,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
6341 | struct bio *clone; |
6342 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
6343 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
6344 | - unsigned int i; |
6345 | + unsigned i, len; |
6346 | + struct page *page; |
6347 | |
6348 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
6349 | if (!clone) |
6350 | @@ -408,10 +409,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
6351 | clone_init(io, clone); |
6352 | |
6353 | for (i = 0; i < nr_iovecs; i++) { |
6354 | - struct bio_vec *bv = bio_iovec_idx(clone, i); |
6355 | - |
6356 | - bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); |
6357 | - if (!bv->bv_page) |
6358 | + page = mempool_alloc(cc->page_pool, gfp_mask); |
6359 | + if (!page) |
6360 | break; |
6361 | |
6362 | /* |
6363 | @@ -422,15 +421,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
6364 | if (i == (MIN_BIO_PAGES - 1)) |
6365 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
6366 | |
6367 | - bv->bv_offset = 0; |
6368 | - if (size > PAGE_SIZE) |
6369 | - bv->bv_len = PAGE_SIZE; |
6370 | - else |
6371 | - bv->bv_len = size; |
6372 | + len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
6373 | + |
6374 | + if (!bio_add_page(clone, page, len, 0)) { |
6375 | + mempool_free(page, cc->page_pool); |
6376 | + break; |
6377 | + } |
6378 | |
6379 | - clone->bi_size += bv->bv_len; |
6380 | - clone->bi_vcnt++; |
6381 | - size -= bv->bv_len; |
6382 | + size -= len; |
6383 | } |
6384 | |
6385 | if (!clone->bi_size) { |
6386 | @@ -515,6 +513,9 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error) |
6387 | struct crypt_config *cc = io->target->private; |
6388 | unsigned read_io = bio_data_dir(clone) == READ; |
6389 | |
6390 | + if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
6391 | + error = -EIO; |
6392 | + |
6393 | /* |
6394 | * free the processed pages, even if |
6395 | * it's only a partially completed write |
6396 | @@ -529,10 +530,8 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error) |
6397 | if (!read_io) |
6398 | goto out; |
6399 | |
6400 | - if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { |
6401 | - error = -EIO; |
6402 | + if (unlikely(error)) |
6403 | goto out; |
6404 | - } |
6405 | |
6406 | bio_put(clone); |
6407 | io->post_process = 1; |
6408 | diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c |
6409 | index b441d82..8a4f63b 100644 |
6410 | --- a/drivers/md/dm-ioctl.c |
6411 | +++ b/drivers/md/dm-ioctl.c |
6412 | @@ -1250,21 +1250,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size) |
6413 | if (!table) |
6414 | goto out_argv; |
6415 | |
6416 | - if (tmsg->sector >= dm_table_get_size(table)) { |
6417 | + ti = dm_table_find_target(table, tmsg->sector); |
6418 | + if (!dm_target_is_valid(ti)) { |
6419 | DMWARN("Target message sector outside device."); |
6420 | r = -EINVAL; |
6421 | - goto out_table; |
6422 | - } |
6423 | - |
6424 | - ti = dm_table_find_target(table, tmsg->sector); |
6425 | - if (ti->type->message) |
6426 | + } else if (ti->type->message) |
6427 | r = ti->type->message(ti, argc, argv); |
6428 | else { |
6429 | DMWARN("Target type does not support messages"); |
6430 | r = -EINVAL; |
6431 | } |
6432 | |
6433 | - out_table: |
6434 | dm_table_put(table); |
6435 | out_argv: |
6436 | kfree(argv); |
6437 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
6438 | index 2bcde57..72d2250 100644 |
6439 | --- a/drivers/md/dm-table.c |
6440 | +++ b/drivers/md/dm-table.c |
6441 | @@ -187,8 +187,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num) |
6442 | |
6443 | /* |
6444 | * Allocate both the target array and offset array at once. |
6445 | + * Append an empty entry to catch sectors beyond the end of |
6446 | + * the device. |
6447 | */ |
6448 | - n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + |
6449 | + n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
6450 | sizeof(sector_t)); |
6451 | if (!n_highs) |
6452 | return -ENOMEM; |
6453 | @@ -862,6 +864,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) |
6454 | |
6455 | /* |
6456 | * Search the btree for the correct target. |
6457 | + * |
6458 | + * Caller should check returned pointer with dm_target_is_valid() |
6459 | + * to trap I/O beyond end of device. |
6460 | */ |
6461 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) |
6462 | { |
6463 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
6464 | index 998d450..fac09d5 100644 |
6465 | --- a/drivers/md/dm.c |
6466 | +++ b/drivers/md/dm.c |
6467 | @@ -663,13 +663,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, |
6468 | return clone; |
6469 | } |
6470 | |
6471 | -static void __clone_and_map(struct clone_info *ci) |
6472 | +static int __clone_and_map(struct clone_info *ci) |
6473 | { |
6474 | struct bio *clone, *bio = ci->bio; |
6475 | - struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); |
6476 | - sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); |
6477 | + struct dm_target *ti; |
6478 | + sector_t len = 0, max; |
6479 | struct dm_target_io *tio; |
6480 | |
6481 | + ti = dm_table_find_target(ci->map, ci->sector); |
6482 | + if (!dm_target_is_valid(ti)) |
6483 | + return -EIO; |
6484 | + |
6485 | + max = max_io_len(ci->md, ci->sector, ti); |
6486 | + |
6487 | /* |
6488 | * Allocate a target io object. |
6489 | */ |
6490 | @@ -727,6 +733,9 @@ static void __clone_and_map(struct clone_info *ci) |
6491 | do { |
6492 | if (offset) { |
6493 | ti = dm_table_find_target(ci->map, ci->sector); |
6494 | + if (!dm_target_is_valid(ti)) |
6495 | + return -EIO; |
6496 | + |
6497 | max = max_io_len(ci->md, ci->sector, ti); |
6498 | |
6499 | tio = alloc_tio(ci->md); |
6500 | @@ -750,6 +759,8 @@ static void __clone_and_map(struct clone_info *ci) |
6501 | |
6502 | ci->idx++; |
6503 | } |
6504 | + |
6505 | + return 0; |
6506 | } |
6507 | |
6508 | /* |
6509 | @@ -758,6 +769,7 @@ static void __clone_and_map(struct clone_info *ci) |
6510 | static void __split_bio(struct mapped_device *md, struct bio *bio) |
6511 | { |
6512 | struct clone_info ci; |
6513 | + int error = 0; |
6514 | |
6515 | ci.map = dm_get_table(md); |
6516 | if (!ci.map) { |
6517 | @@ -777,11 +789,11 @@ static void __split_bio(struct mapped_device *md, struct bio *bio) |
6518 | ci.idx = bio->bi_idx; |
6519 | |
6520 | start_io_acct(ci.io); |
6521 | - while (ci.sector_count) |
6522 | - __clone_and_map(&ci); |
6523 | + while (ci.sector_count && !error) |
6524 | + error = __clone_and_map(&ci); |
6525 | |
6526 | /* drop the extra reference count */ |
6527 | - dec_pending(ci.io, 0); |
6528 | + dec_pending(ci.io, error); |
6529 | dm_table_put(ci.map); |
6530 | } |
6531 | /*----------------------------------------------------------------- |
6532 | diff --git a/drivers/md/dm.h b/drivers/md/dm.h |
6533 | index 462ee65..07298a3 100644 |
6534 | --- a/drivers/md/dm.h |
6535 | +++ b/drivers/md/dm.h |
6536 | @@ -113,6 +113,11 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); |
6537 | void dm_table_unplug_all(struct dm_table *t); |
6538 | int dm_table_flush_all(struct dm_table *t); |
6539 | |
6540 | +/* |
6541 | + * To check the return value from dm_table_find_target(). |
6542 | + */ |
6543 | +#define dm_target_is_valid(t) ((t)->table) |
6544 | + |
6545 | /*----------------------------------------------------------------- |
6546 | * A registry of target types. |
6547 | *---------------------------------------------------------------*/ |
6548 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
6549 | index e86cacb..3085228 100644 |
6550 | --- a/drivers/md/raid5.c |
6551 | +++ b/drivers/md/raid5.c |
6552 | @@ -2875,7 +2875,8 @@ static void handle_stripe5(struct stripe_head *sh) |
6553 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
6554 | } |
6555 | |
6556 | - if (s.expanding && s.locked == 0) |
6557 | + if (s.expanding && s.locked == 0 && |
6558 | + !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) |
6559 | handle_stripe_expansion(conf, sh, NULL); |
6560 | |
6561 | if (sh->ops.count) |
6562 | @@ -3077,7 +3078,8 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) |
6563 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
6564 | } |
6565 | |
6566 | - if (s.expanding && s.locked == 0) |
6567 | + if (s.expanding && s.locked == 0 && |
6568 | + !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) |
6569 | handle_stripe_expansion(conf, sh, &r6s); |
6570 | |
6571 | spin_unlock(&sh->lock); |
6572 | diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c |
6573 | index 0222bba..91047c7 100644 |
6574 | --- a/drivers/misc/thinkpad_acpi.c |
6575 | +++ b/drivers/misc/thinkpad_acpi.c |
6576 | @@ -968,9 +968,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) |
6577 | KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */ |
6578 | KEY_UNKNOWN, /* 0x0D: FN+INSERT */ |
6579 | KEY_UNKNOWN, /* 0x0E: FN+DELETE */ |
6580 | - KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */ |
6581 | + KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ |
6582 | /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */ |
6583 | - KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */ |
6584 | + KEY_RESERVED, /* 0x10: FN+END (brightness down) */ |
6585 | KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */ |
6586 | KEY_UNKNOWN, /* 0x12: FN+PGDOWN */ |
6587 | KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */ |
6588 | diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c |
6589 | index f23e13c..d2d4730 100644 |
6590 | --- a/drivers/net/atl1/atl1_main.c |
6591 | +++ b/drivers/net/atl1/atl1_main.c |
6592 | @@ -121,7 +121,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) |
6593 | struct atl1_hw *hw = &adapter->hw; |
6594 | struct net_device *netdev = adapter->netdev; |
6595 | |
6596 | - hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
6597 | + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
6598 | hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
6599 | |
6600 | adapter->wol = 0; |
6601 | @@ -689,7 +689,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) |
6602 | { |
6603 | struct atl1_adapter *adapter = netdev_priv(netdev); |
6604 | int old_mtu = netdev->mtu; |
6605 | - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
6606 | + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
6607 | |
6608 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || |
6609 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { |
6610 | @@ -854,8 +854,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter) |
6611 | /* set Interrupt Clear Timer */ |
6612 | iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); |
6613 | |
6614 | - /* set MTU, 4 : VLAN */ |
6615 | - iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU); |
6616 | + /* set max frame size hw will accept */ |
6617 | + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); |
6618 | |
6619 | /* jumbo size & rrd retirement timer */ |
6620 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) |
6621 | diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c |
6622 | index f6e4030..0883112 100644 |
6623 | --- a/drivers/net/cassini.c |
6624 | +++ b/drivers/net/cassini.c |
6625 | @@ -336,30 +336,6 @@ static inline void cas_mask_intr(struct cas *cp) |
6626 | cas_disable_irq(cp, i); |
6627 | } |
6628 | |
6629 | -static inline void cas_buffer_init(cas_page_t *cp) |
6630 | -{ |
6631 | - struct page *page = cp->buffer; |
6632 | - atomic_set((atomic_t *)&page->lru.next, 1); |
6633 | -} |
6634 | - |
6635 | -static inline int cas_buffer_count(cas_page_t *cp) |
6636 | -{ |
6637 | - struct page *page = cp->buffer; |
6638 | - return atomic_read((atomic_t *)&page->lru.next); |
6639 | -} |
6640 | - |
6641 | -static inline void cas_buffer_inc(cas_page_t *cp) |
6642 | -{ |
6643 | - struct page *page = cp->buffer; |
6644 | - atomic_inc((atomic_t *)&page->lru.next); |
6645 | -} |
6646 | - |
6647 | -static inline void cas_buffer_dec(cas_page_t *cp) |
6648 | -{ |
6649 | - struct page *page = cp->buffer; |
6650 | - atomic_dec((atomic_t *)&page->lru.next); |
6651 | -} |
6652 | - |
6653 | static void cas_enable_irq(struct cas *cp, const int ring) |
6654 | { |
6655 | if (ring == 0) { /* all but TX_DONE */ |
6656 | @@ -497,7 +473,6 @@ static int cas_page_free(struct cas *cp, cas_page_t *page) |
6657 | { |
6658 | pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, |
6659 | PCI_DMA_FROMDEVICE); |
6660 | - cas_buffer_dec(page); |
6661 | __free_pages(page->buffer, cp->page_order); |
6662 | kfree(page); |
6663 | return 0; |
6664 | @@ -527,7 +502,6 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) |
6665 | page->buffer = alloc_pages(flags, cp->page_order); |
6666 | if (!page->buffer) |
6667 | goto page_err; |
6668 | - cas_buffer_init(page); |
6669 | page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, |
6670 | cp->page_size, PCI_DMA_FROMDEVICE); |
6671 | return page; |
6672 | @@ -606,7 +580,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) |
6673 | list_for_each_safe(elem, tmp, &list) { |
6674 | cas_page_t *page = list_entry(elem, cas_page_t, list); |
6675 | |
6676 | - if (cas_buffer_count(page) > 1) |
6677 | + if (page_count(page->buffer) > 1) |
6678 | continue; |
6679 | |
6680 | list_del(elem); |
6681 | @@ -1374,7 +1348,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) |
6682 | cas_page_t *page = cp->rx_pages[1][index]; |
6683 | cas_page_t *new; |
6684 | |
6685 | - if (cas_buffer_count(page) == 1) |
6686 | + if (page_count(page->buffer) == 1) |
6687 | return page; |
6688 | |
6689 | new = cas_page_dequeue(cp); |
6690 | @@ -1394,7 +1368,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring, |
6691 | cas_page_t **page1 = cp->rx_pages[1]; |
6692 | |
6693 | /* swap if buffer is in use */ |
6694 | - if (cas_buffer_count(page0[index]) > 1) { |
6695 | + if (page_count(page0[index]->buffer) > 1) { |
6696 | cas_page_t *new = cas_page_spare(cp, index); |
6697 | if (new) { |
6698 | page1[index] = page0[index]; |
6699 | @@ -1979,6 +1953,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, |
6700 | struct cas_page *page; |
6701 | struct sk_buff *skb; |
6702 | void *addr, *crcaddr; |
6703 | + __sum16 csum; |
6704 | char *p; |
6705 | |
6706 | hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); |
6707 | @@ -2062,10 +2037,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, |
6708 | |
6709 | skb_shinfo(skb)->nr_frags++; |
6710 | skb->data_len += hlen - swivel; |
6711 | + skb->truesize += hlen - swivel; |
6712 | skb->len += hlen - swivel; |
6713 | |
6714 | get_page(page->buffer); |
6715 | - cas_buffer_inc(page); |
6716 | frag->page = page->buffer; |
6717 | frag->page_offset = off; |
6718 | frag->size = hlen - swivel; |
6719 | @@ -2090,7 +2065,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, |
6720 | frag++; |
6721 | |
6722 | get_page(page->buffer); |
6723 | - cas_buffer_inc(page); |
6724 | frag->page = page->buffer; |
6725 | frag->page_offset = 0; |
6726 | frag->size = hlen; |
6727 | @@ -2158,14 +2132,15 @@ end_copy_pkt: |
6728 | skb_put(skb, alloclen); |
6729 | } |
6730 | |
6731 | - i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); |
6732 | + csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); |
6733 | if (cp->crc_size) { |
6734 | /* checksum includes FCS. strip it out. */ |
6735 | - i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); |
6736 | + csum = csum_fold(csum_partial(crcaddr, cp->crc_size, |
6737 | + csum_unfold(csum))); |
6738 | if (addr) |
6739 | cas_page_unmap(addr); |
6740 | } |
6741 | - skb->csum = ntohs(i ^ 0xffff); |
6742 | + skb->csum = csum_unfold(~csum); |
6743 | skb->ip_summed = CHECKSUM_COMPLETE; |
6744 | skb->protocol = eth_type_trans(skb, cp->dev); |
6745 | return len; |
6746 | @@ -2253,7 +2228,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) |
6747 | released = 0; |
6748 | while (entry != last) { |
6749 | /* make a new buffer if it's still in use */ |
6750 | - if (cas_buffer_count(page[entry]) > 1) { |
6751 | + if (page_count(page[entry]->buffer) > 1) { |
6752 | cas_page_t *new = cas_page_dequeue(cp); |
6753 | if (!new) { |
6754 | /* let the timer know that we need to |
6755 | diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h |
6756 | index a970804..a201431 100644 |
6757 | --- a/drivers/net/cassini.h |
6758 | +++ b/drivers/net/cassini.h |
6759 | @@ -4122,8 +4122,8 @@ cas_saturn_patch_t cas_saturn_patch[] = { |
6760 | inserted into |
6761 | outgoing frame. */ |
6762 | struct cas_tx_desc { |
6763 | - u64 control; |
6764 | - u64 buffer; |
6765 | + __le64 control; |
6766 | + __le64 buffer; |
6767 | }; |
6768 | |
6769 | /* descriptor ring for free buffers contains page-sized buffers. the index |
6770 | @@ -4131,8 +4131,8 @@ struct cas_tx_desc { |
6771 | * the completion ring. |
6772 | */ |
6773 | struct cas_rx_desc { |
6774 | - u64 index; |
6775 | - u64 buffer; |
6776 | + __le64 index; |
6777 | + __le64 buffer; |
6778 | }; |
6779 | |
6780 | /* received packets are put on the completion ring. */ |
6781 | @@ -4210,10 +4210,10 @@ struct cas_rx_desc { |
6782 | #define RX_INDEX_RELEASE 0x0000000000002000ULL |
6783 | |
6784 | struct cas_rx_comp { |
6785 | - u64 word1; |
6786 | - u64 word2; |
6787 | - u64 word3; |
6788 | - u64 word4; |
6789 | + __le64 word1; |
6790 | + __le64 word2; |
6791 | + __le64 word3; |
6792 | + __le64 word4; |
6793 | }; |
6794 | |
6795 | enum link_state { |
6796 | @@ -4252,7 +4252,7 @@ struct cas_init_block { |
6797 | struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; |
6798 | struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; |
6799 | struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; |
6800 | - u64 tx_compwb; |
6801 | + __le64 tx_compwb; |
6802 | }; |
6803 | |
6804 | /* tiny buffers to deal with target abort issue. we allocate a bit |
6805 | diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c |
6806 | index 231ce43..a82a1fa 100644 |
6807 | --- a/drivers/net/chelsio/cxgb2.c |
6808 | +++ b/drivers/net/chelsio/cxgb2.c |
6809 | @@ -370,6 +370,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = { |
6810 | "TxInternalMACXmitError", |
6811 | "TxFramesWithExcessiveDeferral", |
6812 | "TxFCSErrors", |
6813 | + "TxJumboFramesOk", |
6814 | + "TxJumboOctetsOk", |
6815 | |
6816 | "RxOctetsOK", |
6817 | "RxOctetsBad", |
6818 | @@ -388,15 +390,16 @@ static char stats_strings[][ETH_GSTRING_LEN] = { |
6819 | "RxInRangeLengthErrors", |
6820 | "RxOutOfRangeLengthField", |
6821 | "RxFrameTooLongErrors", |
6822 | + "RxJumboFramesOk", |
6823 | + "RxJumboOctetsOk", |
6824 | |
6825 | /* Port stats */ |
6826 | - "RxPackets", |
6827 | "RxCsumGood", |
6828 | - "TxPackets", |
6829 | "TxCsumOffload", |
6830 | "TxTso", |
6831 | "RxVlan", |
6832 | "TxVlan", |
6833 | + "TxNeedHeadroom", |
6834 | |
6835 | /* Interrupt stats */ |
6836 | "rx drops", |
6837 | @@ -454,23 +457,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, |
6838 | const struct cmac_statistics *s; |
6839 | const struct sge_intr_counts *t; |
6840 | struct sge_port_stats ss; |
6841 | - unsigned int len; |
6842 | |
6843 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); |
6844 | - |
6845 | - len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); |
6846 | - memcpy(data, &s->TxOctetsOK, len); |
6847 | - data += len; |
6848 | - |
6849 | - len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); |
6850 | - memcpy(data, &s->RxOctetsOK, len); |
6851 | - data += len; |
6852 | - |
6853 | + t = t1_sge_get_intr_counts(adapter->sge); |
6854 | t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); |
6855 | - memcpy(data, &ss, sizeof(ss)); |
6856 | - data += sizeof(ss); |
6857 | |
6858 | - t = t1_sge_get_intr_counts(adapter->sge); |
6859 | + *data++ = s->TxOctetsOK; |
6860 | + *data++ = s->TxOctetsBad; |
6861 | + *data++ = s->TxUnicastFramesOK; |
6862 | + *data++ = s->TxMulticastFramesOK; |
6863 | + *data++ = s->TxBroadcastFramesOK; |
6864 | + *data++ = s->TxPauseFrames; |
6865 | + *data++ = s->TxFramesWithDeferredXmissions; |
6866 | + *data++ = s->TxLateCollisions; |
6867 | + *data++ = s->TxTotalCollisions; |
6868 | + *data++ = s->TxFramesAbortedDueToXSCollisions; |
6869 | + *data++ = s->TxUnderrun; |
6870 | + *data++ = s->TxLengthErrors; |
6871 | + *data++ = s->TxInternalMACXmitError; |
6872 | + *data++ = s->TxFramesWithExcessiveDeferral; |
6873 | + *data++ = s->TxFCSErrors; |
6874 | + *data++ = s->TxJumboFramesOK; |
6875 | + *data++ = s->TxJumboOctetsOK; |
6876 | + |
6877 | + *data++ = s->RxOctetsOK; |
6878 | + *data++ = s->RxOctetsBad; |
6879 | + *data++ = s->RxUnicastFramesOK; |
6880 | + *data++ = s->RxMulticastFramesOK; |
6881 | + *data++ = s->RxBroadcastFramesOK; |
6882 | + *data++ = s->RxPauseFrames; |
6883 | + *data++ = s->RxFCSErrors; |
6884 | + *data++ = s->RxAlignErrors; |
6885 | + *data++ = s->RxSymbolErrors; |
6886 | + *data++ = s->RxDataErrors; |
6887 | + *data++ = s->RxSequenceErrors; |
6888 | + *data++ = s->RxRuntErrors; |
6889 | + *data++ = s->RxJabberErrors; |
6890 | + *data++ = s->RxInternalMACRcvError; |
6891 | + *data++ = s->RxInRangeLengthErrors; |
6892 | + *data++ = s->RxOutOfRangeLengthField; |
6893 | + *data++ = s->RxFrameTooLongErrors; |
6894 | + *data++ = s->RxJumboFramesOK; |
6895 | + *data++ = s->RxJumboOctetsOK; |
6896 | + |
6897 | + *data++ = ss.rx_cso_good; |
6898 | + *data++ = ss.tx_cso; |
6899 | + *data++ = ss.tx_tso; |
6900 | + *data++ = ss.vlan_xtract; |
6901 | + *data++ = ss.vlan_insert; |
6902 | + *data++ = ss.tx_need_hdrroom; |
6903 | + |
6904 | *data++ = t->rx_drops; |
6905 | *data++ = t->pure_rsps; |
6906 | *data++ = t->unhandled_irqs; |
6907 | diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c |
6908 | index 678778a..2117c4f 100644 |
6909 | --- a/drivers/net/chelsio/pm3393.c |
6910 | +++ b/drivers/net/chelsio/pm3393.c |
6911 | @@ -45,7 +45,7 @@ |
6912 | |
6913 | #include <linux/crc32.h> |
6914 | |
6915 | -#define OFFSET(REG_ADDR) (REG_ADDR << 2) |
6916 | +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) |
6917 | |
6918 | /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ |
6919 | #define MAX_FRAME_SIZE 9600 |
6920 | @@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, |
6921 | return 0; |
6922 | } |
6923 | |
6924 | -static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, |
6925 | - int over) |
6926 | -{ |
6927 | - u32 val0, val1, val2; |
6928 | - |
6929 | - t1_tpi_read(adapter, offs, &val0); |
6930 | - t1_tpi_read(adapter, offs + 4, &val1); |
6931 | - t1_tpi_read(adapter, offs + 8, &val2); |
6932 | - |
6933 | - *val &= ~0ull << 40; |
6934 | - *val |= val0 & 0xffff; |
6935 | - *val |= (val1 & 0xffff) << 16; |
6936 | - *val |= (u64)(val2 & 0xff) << 32; |
6937 | - |
6938 | - if (over) |
6939 | - *val += 1ull << 40; |
6940 | +#define RMON_UPDATE(mac, name, stat_name) \ |
6941 | +{ \ |
6942 | + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ |
6943 | + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ |
6944 | + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ |
6945 | + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ |
6946 | + ((u64)(val1 & 0xffff) << 16) | \ |
6947 | + ((u64)(val2 & 0xff) << 32) | \ |
6948 | + ((mac)->stats.stat_name & \ |
6949 | + 0xffffff0000000000ULL); \ |
6950 | + if (ro & \ |
6951 | + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ |
6952 | + (mac)->stats.stat_name += 1ULL << 40; \ |
6953 | } |
6954 | |
6955 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, |
6956 | int flag) |
6957 | { |
6958 | - static struct { |
6959 | - unsigned int reg; |
6960 | - unsigned int offset; |
6961 | - } hw_stats [] = { |
6962 | - |
6963 | -#define HW_STAT(name, stat_name) \ |
6964 | - { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } |
6965 | - |
6966 | - /* Rx stats */ |
6967 | - HW_STAT(RxOctetsReceivedOK, RxOctetsOK), |
6968 | - HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK), |
6969 | - HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK), |
6970 | - HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK), |
6971 | - HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames), |
6972 | - HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors), |
6973 | - HW_STAT(RxFramesLostDueToInternalMACErrors, |
6974 | - RxInternalMACRcvError), |
6975 | - HW_STAT(RxSymbolErrors, RxSymbolErrors), |
6976 | - HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors), |
6977 | - HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors), |
6978 | - HW_STAT(RxJabbers, RxJabberErrors), |
6979 | - HW_STAT(RxFragments, RxRuntErrors), |
6980 | - HW_STAT(RxUndersizedFrames, RxRuntErrors), |
6981 | - HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK), |
6982 | - HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK), |
6983 | - |
6984 | - /* Tx stats */ |
6985 | - HW_STAT(TxOctetsTransmittedOK, TxOctetsOK), |
6986 | - HW_STAT(TxFramesLostDueToInternalMACTransmissionError, |
6987 | - TxInternalMACXmitError), |
6988 | - HW_STAT(TxTransmitSystemError, TxFCSErrors), |
6989 | - HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK), |
6990 | - HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK), |
6991 | - HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK), |
6992 | - HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames), |
6993 | - HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK), |
6994 | - HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK) |
6995 | - }, *p = hw_stats; |
6996 | - u64 ro; |
6997 | - u32 val0, val1, val2, val3; |
6998 | - u64 *stats = (u64 *) &mac->stats; |
6999 | - unsigned int i; |
7000 | + u64 ro; |
7001 | + u32 val0, val1, val2, val3; |
7002 | |
7003 | /* Snap the counters */ |
7004 | pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, |
7005 | @@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, |
7006 | ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | |
7007 | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); |
7008 | |
7009 | - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { |
7010 | - unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW; |
7011 | - |
7012 | - pm3393_rmon_update((mac)->adapter, OFFSET(p->reg), |
7013 | - stats + p->offset, ro & (reg >> 2)); |
7014 | - } |
7015 | - |
7016 | - |
7017 | + /* Rx stats */ |
7018 | + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); |
7019 | + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); |
7020 | + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); |
7021 | + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); |
7022 | + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); |
7023 | + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); |
7024 | + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, |
7025 | + RxInternalMACRcvError); |
7026 | + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); |
7027 | + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); |
7028 | + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); |
7029 | + RMON_UPDATE(mac, RxJabbers, RxJabberErrors); |
7030 | + RMON_UPDATE(mac, RxFragments, RxRuntErrors); |
7031 | + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); |
7032 | + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); |
7033 | + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); |
7034 | + |
7035 | + /* Tx stats */ |
7036 | + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); |
7037 | + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, |
7038 | + TxInternalMACXmitError); |
7039 | + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); |
7040 | + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); |
7041 | + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); |
7042 | + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); |
7043 | + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); |
7044 | + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); |
7045 | + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); |
7046 | |
7047 | return &mac->stats; |
7048 | } |
7049 | diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c |
7050 | index e4f874a..d77f1eb 100644 |
7051 | --- a/drivers/net/chelsio/sge.c |
7052 | +++ b/drivers/net/chelsio/sge.c |
7053 | @@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port, |
7054 | for_each_possible_cpu(cpu) { |
7055 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); |
7056 | |
7057 | - ss->rx_packets += st->rx_packets; |
7058 | ss->rx_cso_good += st->rx_cso_good; |
7059 | - ss->tx_packets += st->tx_packets; |
7060 | ss->tx_cso += st->tx_cso; |
7061 | ss->tx_tso += st->tx_tso; |
7062 | + ss->tx_need_hdrroom += st->tx_need_hdrroom; |
7063 | ss->vlan_xtract += st->vlan_xtract; |
7064 | ss->vlan_insert += st->vlan_insert; |
7065 | } |
7066 | @@ -1379,11 +1378,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
7067 | } |
7068 | __skb_pull(skb, sizeof(*p)); |
7069 | |
7070 | - skb->dev->last_rx = jiffies; |
7071 | st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); |
7072 | - st->rx_packets++; |
7073 | |
7074 | skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); |
7075 | + skb->dev->last_rx = jiffies; |
7076 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && |
7077 | skb->protocol == htons(ETH_P_IP) && |
7078 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { |
7079 | @@ -1851,7 +1849,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
7080 | { |
7081 | struct adapter *adapter = dev->priv; |
7082 | struct sge *sge = adapter->sge; |
7083 | - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); |
7084 | + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], |
7085 | + smp_processor_id()); |
7086 | struct cpl_tx_pkt *cpl; |
7087 | struct sk_buff *orig_skb = skb; |
7088 | int ret; |
7089 | @@ -1859,6 +1858,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
7090 | if (skb->protocol == htons(ETH_P_CPL5)) |
7091 | goto send; |
7092 | |
7093 | + /* |
7094 | + * We are using a non-standard hard_header_len. |
7095 | + * Allocate more header room in the rare cases it is not big enough. |
7096 | + */ |
7097 | + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { |
7098 | + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); |
7099 | + ++st->tx_need_hdrroom; |
7100 | + dev_kfree_skb_any(orig_skb); |
7101 | + if (!skb) |
7102 | + return NETDEV_TX_OK; |
7103 | + } |
7104 | + |
7105 | if (skb_shinfo(skb)->gso_size) { |
7106 | int eth_type; |
7107 | struct cpl_tx_pkt_lso *hdr; |
7108 | @@ -1892,24 +1903,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
7109 | return NETDEV_TX_OK; |
7110 | } |
7111 | |
7112 | - /* |
7113 | - * We are using a non-standard hard_header_len and some kernel |
7114 | - * components, such as pktgen, do not handle it right. |
7115 | - * Complain when this happens but try to fix things up. |
7116 | - */ |
7117 | - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { |
7118 | - pr_debug("%s: headroom %d header_len %d\n", dev->name, |
7119 | - skb_headroom(skb), dev->hard_header_len); |
7120 | - |
7121 | - if (net_ratelimit()) |
7122 | - printk(KERN_ERR "%s: inadequate headroom in " |
7123 | - "Tx packet\n", dev->name); |
7124 | - skb = skb_realloc_headroom(skb, sizeof(*cpl)); |
7125 | - dev_kfree_skb_any(orig_skb); |
7126 | - if (!skb) |
7127 | - return NETDEV_TX_OK; |
7128 | - } |
7129 | - |
7130 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && |
7131 | skb->ip_summed == CHECKSUM_PARTIAL && |
7132 | ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7133 | @@ -1955,7 +1948,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) |
7134 | cpl->vlan_valid = 0; |
7135 | |
7136 | send: |
7137 | - st->tx_packets++; |
7138 | dev->trans_start = jiffies; |
7139 | ret = t1_sge_tx(skb, adapter, 0, dev); |
7140 | |
7141 | diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h |
7142 | index d132a0e..80165f9 100644 |
7143 | --- a/drivers/net/chelsio/sge.h |
7144 | +++ b/drivers/net/chelsio/sge.h |
7145 | @@ -57,13 +57,12 @@ struct sge_intr_counts { |
7146 | }; |
7147 | |
7148 | struct sge_port_stats { |
7149 | - u64 rx_packets; /* # of Ethernet packets received */ |
7150 | u64 rx_cso_good; /* # of successful RX csum offloads */ |
7151 | - u64 tx_packets; /* # of TX packets */ |
7152 | u64 tx_cso; /* # of TX checksum offloads */ |
7153 | u64 tx_tso; /* # of TSO requests */ |
7154 | u64 vlan_xtract; /* # of VLAN tag extractions */ |
7155 | u64 vlan_insert; /* # of VLAN tag insertions */ |
7156 | + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ |
7157 | }; |
7158 | |
7159 | struct sk_buff; |
7160 | diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c |
7161 | index fcbe508..cbcdf14 100644 |
7162 | --- a/drivers/net/forcedeth.c |
7163 | +++ b/drivers/net/forcedeth.c |
7164 | @@ -5564,35 +5564,35 @@ static struct pci_device_id pci_tbl[] = { |
7165 | }, |
7166 | { /* MCP77 Ethernet Controller */ |
7167 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), |
7168 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7169 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7170 | }, |
7171 | { /* MCP77 Ethernet Controller */ |
7172 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), |
7173 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7174 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7175 | }, |
7176 | { /* MCP77 Ethernet Controller */ |
7177 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), |
7178 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7179 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7180 | }, |
7181 | { /* MCP77 Ethernet Controller */ |
7182 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), |
7183 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7184 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7185 | }, |
7186 | { /* MCP79 Ethernet Controller */ |
7187 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), |
7188 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7189 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7190 | }, |
7191 | { /* MCP79 Ethernet Controller */ |
7192 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), |
7193 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7194 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7195 | }, |
7196 | { /* MCP79 Ethernet Controller */ |
7197 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), |
7198 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7199 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7200 | }, |
7201 | { /* MCP79 Ethernet Controller */ |
7202 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), |
7203 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, |
7204 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
7205 | }, |
7206 | {0,}, |
7207 | }; |
7208 | diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c |
7209 | index 2575077..3ed45a3 100644 |
7210 | --- a/drivers/net/sky2.c |
7211 | +++ b/drivers/net/sky2.c |
7212 | @@ -812,8 +812,13 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) |
7213 | |
7214 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); |
7215 | |
7216 | - /* Flush Rx MAC FIFO on any flow control or error */ |
7217 | - sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
7218 | + if (hw->chip_id == CHIP_ID_YUKON_XL) { |
7219 | + /* Hardware errata - clear flush mask */ |
7220 | + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); |
7221 | + } else { |
7222 | + /* Flush Rx MAC FIFO on any flow control or error */ |
7223 | + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); |
7224 | + } |
7225 | |
7226 | /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ |
7227 | reg = RX_GMF_FL_THR_DEF + 1; |
7228 | @@ -1307,15 +1312,11 @@ static int sky2_up(struct net_device *dev) |
7229 | */ |
7230 | if (otherdev && netif_running(otherdev) && |
7231 | (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { |
7232 | - struct sky2_port *osky2 = netdev_priv(otherdev); |
7233 | u16 cmd; |
7234 | |
7235 | cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); |
7236 | cmd &= ~PCI_X_CMD_MAX_SPLIT; |
7237 | sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); |
7238 | - |
7239 | - sky2->rx_csum = 0; |
7240 | - osky2->rx_csum = 0; |
7241 | } |
7242 | |
7243 | if (netif_msg_ifup(sky2)) |
7244 | @@ -4017,7 +4018,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, |
7245 | sky2->duplex = -1; |
7246 | sky2->speed = -1; |
7247 | sky2->advertising = sky2_supported_modes(hw); |
7248 | - sky2->rx_csum = 1; |
7249 | + sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); |
7250 | sky2->wol = wol; |
7251 | |
7252 | spin_lock_init(&sky2->phy_lock); |
7253 | diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c |
7254 | index 524dc5f..9057d71 100644 |
7255 | --- a/drivers/net/usb/kaweth.c |
7256 | +++ b/drivers/net/usb/kaweth.c |
7257 | @@ -70,7 +70,7 @@ |
7258 | #define KAWETH_TX_TIMEOUT (5 * HZ) |
7259 | #define KAWETH_SCRATCH_SIZE 32 |
7260 | #define KAWETH_FIRMWARE_BUF_SIZE 4096 |
7261 | -#define KAWETH_CONTROL_TIMEOUT (30 * HZ) |
7262 | +#define KAWETH_CONTROL_TIMEOUT (30000) |
7263 | |
7264 | #define KAWETH_STATUS_BROKEN 0x0000001 |
7265 | #define KAWETH_STATUS_CLOSING 0x0000002 |
7266 | diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c |
7267 | index 6240b97..3bbc5c4 100644 |
7268 | --- a/drivers/net/usb/mcs7830.c |
7269 | +++ b/drivers/net/usb/mcs7830.c |
7270 | @@ -94,7 +94,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) |
7271 | |
7272 | ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, |
7273 | MCS7830_RD_BMREQ, 0x0000, index, data, |
7274 | - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT)); |
7275 | + size, MCS7830_CTRL_TIMEOUT); |
7276 | return ret; |
7277 | } |
7278 | |
7279 | @@ -105,7 +105,7 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data) |
7280 | |
7281 | ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, |
7282 | MCS7830_WR_BMREQ, 0x0000, index, data, |
7283 | - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT)); |
7284 | + size, MCS7830_CTRL_TIMEOUT); |
7285 | return ret; |
7286 | } |
7287 | |
7288 | diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c |
7289 | index 027f686..02a09d5 100644 |
7290 | --- a/drivers/pci/hotplug/fakephp.c |
7291 | +++ b/drivers/pci/hotplug/fakephp.c |
7292 | @@ -39,6 +39,7 @@ |
7293 | #include <linux/init.h> |
7294 | #include <linux/string.h> |
7295 | #include <linux/slab.h> |
7296 | +#include <linux/workqueue.h> |
7297 | #include "../pci.h" |
7298 | |
7299 | #if !defined(MODULE) |
7300 | @@ -63,10 +64,16 @@ struct dummy_slot { |
7301 | struct list_head node; |
7302 | struct hotplug_slot *slot; |
7303 | struct pci_dev *dev; |
7304 | + struct work_struct remove_work; |
7305 | + unsigned long removed; |
7306 | }; |
7307 | |
7308 | static int debug; |
7309 | static LIST_HEAD(slot_list); |
7310 | +static struct workqueue_struct *dummyphp_wq; |
7311 | + |
7312 | +static void pci_rescan_worker(struct work_struct *work); |
7313 | +static DECLARE_WORK(pci_rescan_work, pci_rescan_worker); |
7314 | |
7315 | static int enable_slot (struct hotplug_slot *slot); |
7316 | static int disable_slot (struct hotplug_slot *slot); |
7317 | @@ -109,7 +116,7 @@ static int add_slot(struct pci_dev *dev) |
7318 | slot->name = &dev->dev.bus_id[0]; |
7319 | dbg("slot->name = %s\n", slot->name); |
7320 | |
7321 | - dslot = kmalloc(sizeof(struct dummy_slot), GFP_KERNEL); |
7322 | + dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); |
7323 | if (!dslot) |
7324 | goto error_info; |
7325 | |
7326 | @@ -164,6 +171,14 @@ static void remove_slot(struct dummy_slot *dslot) |
7327 | err("Problem unregistering a slot %s\n", dslot->slot->name); |
7328 | } |
7329 | |
7330 | +/* called from the single-threaded workqueue handler to remove a slot */ |
7331 | +static void remove_slot_worker(struct work_struct *work) |
7332 | +{ |
7333 | + struct dummy_slot *dslot = |
7334 | + container_of(work, struct dummy_slot, remove_work); |
7335 | + remove_slot(dslot); |
7336 | +} |
7337 | + |
7338 | /** |
7339 | * Rescan slot. |
7340 | * Tries hard not to re-enable already existing devices |
7341 | @@ -267,11 +282,17 @@ static inline void pci_rescan(void) { |
7342 | pci_rescan_buses(&pci_root_buses); |
7343 | } |
7344 | |
7345 | +/* called from the single-threaded workqueue handler to rescan all pci buses */ |
7346 | +static void pci_rescan_worker(struct work_struct *work) |
7347 | +{ |
7348 | + pci_rescan(); |
7349 | +} |
7350 | |
7351 | static int enable_slot(struct hotplug_slot *hotplug_slot) |
7352 | { |
7353 | /* mis-use enable_slot for rescanning of the pci bus */ |
7354 | - pci_rescan(); |
7355 | + cancel_work_sync(&pci_rescan_work); |
7356 | + queue_work(dummyphp_wq, &pci_rescan_work); |
7357 | return -ENODEV; |
7358 | } |
7359 | |
7360 | @@ -306,6 +327,10 @@ static int disable_slot(struct hotplug_slot *slot) |
7361 | err("Can't remove PCI devices with other PCI devices behind it yet.\n"); |
7362 | return -ENODEV; |
7363 | } |
7364 | + if (test_and_set_bit(0, &dslot->removed)) { |
7365 | + dbg("Slot already scheduled for removal\n"); |
7366 | + return -ENODEV; |
7367 | + } |
7368 | /* search for subfunctions and disable them first */ |
7369 | if (!(dslot->dev->devfn & 7)) { |
7370 | for (func = 1; func < 8; func++) { |
7371 | @@ -328,8 +353,9 @@ static int disable_slot(struct hotplug_slot *slot) |
7372 | /* remove the device from the pci core */ |
7373 | pci_remove_bus_device(dslot->dev); |
7374 | |
7375 | - /* blow away this sysfs entry and other parts. */ |
7376 | - remove_slot(dslot); |
7377 | + /* queue work item to blow away this sysfs entry and other parts. */ |
7378 | + INIT_WORK(&dslot->remove_work, remove_slot_worker); |
7379 | + queue_work(dummyphp_wq, &dslot->remove_work); |
7380 | |
7381 | return 0; |
7382 | } |
7383 | @@ -340,6 +366,7 @@ static void cleanup_slots (void) |
7384 | struct list_head *next; |
7385 | struct dummy_slot *dslot; |
7386 | |
7387 | + destroy_workqueue(dummyphp_wq); |
7388 | list_for_each_safe (tmp, next, &slot_list) { |
7389 | dslot = list_entry (tmp, struct dummy_slot, node); |
7390 | remove_slot(dslot); |
7391 | @@ -351,6 +378,10 @@ static int __init dummyphp_init(void) |
7392 | { |
7393 | info(DRIVER_DESC "\n"); |
7394 | |
7395 | + dummyphp_wq = create_singlethread_workqueue(MY_NAME); |
7396 | + if (!dummyphp_wq) |
7397 | + return -ENOMEM; |
7398 | + |
7399 | return pci_scan_buses(); |
7400 | } |
7401 | |
7402 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
7403 | index 50f2dd9..75831c8 100644 |
7404 | --- a/drivers/pci/quirks.c |
7405 | +++ b/drivers/pci/quirks.c |
7406 | @@ -465,6 +465,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk |
7407 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi ); |
7408 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi ); |
7409 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi ); |
7410 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi ); |
7411 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi ); |
7412 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi ); |
7413 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi ); |
7414 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi ); |
7415 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi ); |
7416 | |
7417 | /* |
7418 | * VIA ACPI: One IO region pointed to by longword at |
7419 | diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c |
7420 | index 6b357cd..5a827ea 100644 |
7421 | --- a/drivers/spi/omap2_mcspi.c |
7422 | +++ b/drivers/spi/omap2_mcspi.c |
7423 | @@ -350,6 +350,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7424 | tx = xfer->tx_buf; |
7425 | |
7426 | do { |
7427 | + c -= 1; |
7428 | if (tx != NULL) { |
7429 | if (mcspi_wait_for_reg_bit(chstat_reg, |
7430 | OMAP2_MCSPI_CHSTAT_TXS) < 0) { |
7431 | @@ -380,7 +381,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7432 | word_len, *(rx - 1)); |
7433 | #endif |
7434 | } |
7435 | - c -= 1; |
7436 | } while (c); |
7437 | } else if (word_len <= 16) { |
7438 | u16 *rx; |
7439 | @@ -389,6 +389,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7440 | rx = xfer->rx_buf; |
7441 | tx = xfer->tx_buf; |
7442 | do { |
7443 | + c -= 2; |
7444 | if (tx != NULL) { |
7445 | if (mcspi_wait_for_reg_bit(chstat_reg, |
7446 | OMAP2_MCSPI_CHSTAT_TXS) < 0) { |
7447 | @@ -419,7 +420,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7448 | word_len, *(rx - 1)); |
7449 | #endif |
7450 | } |
7451 | - c -= 2; |
7452 | } while (c); |
7453 | } else if (word_len <= 32) { |
7454 | u32 *rx; |
7455 | @@ -428,6 +428,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7456 | rx = xfer->rx_buf; |
7457 | tx = xfer->tx_buf; |
7458 | do { |
7459 | + c -= 4; |
7460 | if (tx != NULL) { |
7461 | if (mcspi_wait_for_reg_bit(chstat_reg, |
7462 | OMAP2_MCSPI_CHSTAT_TXS) < 0) { |
7463 | @@ -458,7 +459,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
7464 | word_len, *(rx - 1)); |
7465 | #endif |
7466 | } |
7467 | - c -= 4; |
7468 | } while (c); |
7469 | } |
7470 | |
7471 | diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c |
7472 | index 0bb8de4..7cf21d7 100644 |
7473 | --- a/drivers/usb/serial/sierra.c |
7474 | +++ b/drivers/usb/serial/sierra.c |
7475 | @@ -100,6 +100,7 @@ static struct usb_device_id id_table [] = { |
7476 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
7477 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ |
7478 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
7479 | + { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ |
7480 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
7481 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
7482 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ |
7483 | @@ -108,6 +109,7 @@ static struct usb_device_id id_table [] = { |
7484 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
7485 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
7486 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
7487 | + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ |
7488 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
7489 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ |
7490 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ |
7491 | @@ -136,6 +138,7 @@ static struct usb_device_id id_table_3port [] = { |
7492 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ |
7493 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
7494 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
7495 | + { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ |
7496 | { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ |
7497 | { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ |
7498 | { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U*/ |
7499 | @@ -144,6 +147,7 @@ static struct usb_device_id id_table_3port [] = { |
7500 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
7501 | { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ |
7502 | { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ |
7503 | + { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ |
7504 | { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ |
7505 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ |
7506 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ |
7507 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
7508 | index dd41677..48a61da 100644 |
7509 | --- a/fs/cifs/inode.c |
7510 | +++ b/fs/cifs/inode.c |
7511 | @@ -919,6 +919,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) |
7512 | goto mkdir_out; |
7513 | } |
7514 | |
7515 | + mode &= ~current->fs->umask; |
7516 | rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT, |
7517 | mode, NULL /* netfid */, pInfo, &oplock, |
7518 | full_path, cifs_sb->local_nls, |
7519 | diff --git a/fs/exec.c b/fs/exec.c |
7520 | index 073b0b8..401b850 100644 |
7521 | --- a/fs/exec.c |
7522 | +++ b/fs/exec.c |
7523 | @@ -1786,6 +1786,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) |
7524 | but keep the previous behaviour for now. */ |
7525 | if (!ispipe && !S_ISREG(inode->i_mode)) |
7526 | goto close_fail; |
7527 | + /* |
7528 | + * Dont allow local users get cute and trick others to coredump |
7529 | + * into their pre-created files: |
7530 | + */ |
7531 | + if (inode->i_uid != current->fsuid) |
7532 | + goto close_fail; |
7533 | if (!file->f_op) |
7534 | goto close_fail; |
7535 | if (!file->f_op->write) |
7536 | diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c |
7537 | index a94473d..5d8dcb9 100644 |
7538 | --- a/fs/ncpfs/mmap.c |
7539 | +++ b/fs/ncpfs/mmap.c |
7540 | @@ -50,10 +50,6 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area, |
7541 | pos = vmf->pgoff << PAGE_SHIFT; |
7542 | |
7543 | count = PAGE_SIZE; |
7544 | - if ((unsigned long)vmf->virtual_address + PAGE_SIZE > area->vm_end) { |
7545 | - WARN_ON(1); /* shouldn't happen? */ |
7546 | - count = area->vm_end - (unsigned long)vmf->virtual_address; |
7547 | - } |
7548 | /* what we can read in one go */ |
7549 | bufsize = NCP_SERVER(inode)->buffer_size; |
7550 | |
7551 | diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c |
7552 | index 10f6e7d..2dc0a54 100644 |
7553 | --- a/fs/nfsd/nfs3xdr.c |
7554 | +++ b/fs/nfsd/nfs3xdr.c |
7555 | @@ -396,8 +396,11 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, |
7556 | * Round the length of the data which was specified up to |
7557 | * the next multiple of XDR units and then compare that |
7558 | * against the length which was actually received. |
7559 | + * Note that when RPCSEC/GSS (for example) is used, the |
7560 | + * data buffer can be padded so dlen might be larger |
7561 | + * than required. It must never be smaller. |
7562 | */ |
7563 | - if (dlen != XDR_QUADLEN(len)*4) |
7564 | + if (dlen < XDR_QUADLEN(len)*4) |
7565 | return 0; |
7566 | |
7567 | if (args->count > max_blocksize) { |
7568 | diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c |
7569 | index cb3e7fa..bd3d5b9 100644 |
7570 | --- a/fs/nfsd/nfsxdr.c |
7571 | +++ b/fs/nfsd/nfsxdr.c |
7572 | @@ -313,8 +313,11 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, |
7573 | * Round the length of the data which was specified up to |
7574 | * the next multiple of XDR units and then compare that |
7575 | * against the length which was actually received. |
7576 | + * Note that when RPCSEC/GSS (for example) is used, the |
7577 | + * data buffer can be padded so dlen might be larger |
7578 | + * than required. It must never be smaller. |
7579 | */ |
7580 | - if (dlen != XDR_QUADLEN(len)*4) |
7581 | + if (dlen < XDR_QUADLEN(len)*4) |
7582 | return 0; |
7583 | |
7584 | rqstp->rq_vec[0].iov_base = (void*)p; |
7585 | diff --git a/fs/splice.c b/fs/splice.c |
7586 | index 02c39ae..2aa8f5a 100644 |
7587 | --- a/fs/splice.c |
7588 | +++ b/fs/splice.c |
7589 | @@ -1234,6 +1234,9 @@ static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) |
7590 | { |
7591 | int partial; |
7592 | |
7593 | + if (!access_ok(VERIFY_READ, src, n)) |
7594 | + return -EFAULT; |
7595 | + |
7596 | pagefault_disable(); |
7597 | partial = __copy_from_user_inatomic(dst, src, n); |
7598 | pagefault_enable(); |
7599 | @@ -1442,6 +1445,11 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, |
7600 | break; |
7601 | } |
7602 | |
7603 | + if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { |
7604 | + error = -EFAULT; |
7605 | + break; |
7606 | + } |
7607 | + |
7608 | sd.len = 0; |
7609 | sd.total_len = len; |
7610 | sd.flags = flags; |
7611 | diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild |
7612 | index c68e168..1a922fa 100644 |
7613 | --- a/include/asm-m68k/Kbuild |
7614 | +++ b/include/asm-m68k/Kbuild |
7615 | @@ -1 +1,2 @@ |
7616 | include include/asm-generic/Kbuild.asm |
7617 | +header-y += cachectl.h |
7618 | diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h |
7619 | index cc6d872..11d5383 100644 |
7620 | --- a/include/asm-powerpc/systbl.h |
7621 | +++ b/include/asm-powerpc/systbl.h |
7622 | @@ -308,8 +308,8 @@ COMPAT_SYS_SPU(move_pages) |
7623 | SYSCALL_SPU(getcpu) |
7624 | COMPAT_SYS(epoll_pwait) |
7625 | COMPAT_SYS_SPU(utimensat) |
7626 | -COMPAT_SYS(fallocate) |
7627 | COMPAT_SYS_SPU(signalfd) |
7628 | COMPAT_SYS_SPU(timerfd) |
7629 | SYSCALL_SPU(eventfd) |
7630 | COMPAT_SYS_SPU(sync_file_range2) |
7631 | +COMPAT_SYS(fallocate) |
7632 | diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h |
7633 | index 1fc6554..38cbec7 100644 |
7634 | --- a/include/asm-sparc64/dma-mapping.h |
7635 | +++ b/include/asm-sparc64/dma-mapping.h |
7636 | @@ -25,15 +25,9 @@ struct dma_ops { |
7637 | void (*sync_single_for_cpu)(struct device *dev, |
7638 | dma_addr_t dma_handle, size_t size, |
7639 | enum dma_data_direction direction); |
7640 | - void (*sync_single_for_device)(struct device *dev, |
7641 | - dma_addr_t dma_handle, size_t size, |
7642 | - enum dma_data_direction direction); |
7643 | void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, |
7644 | int nelems, |
7645 | enum dma_data_direction direction); |
7646 | - void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, |
7647 | - int nelems, |
7648 | - enum dma_data_direction direction); |
7649 | }; |
7650 | extern const struct dma_ops *dma_ops; |
7651 | |
7652 | @@ -105,7 +99,7 @@ static inline void dma_sync_single_for_device(struct device *dev, |
7653 | size_t size, |
7654 | enum dma_data_direction direction) |
7655 | { |
7656 | - dma_ops->sync_single_for_device(dev, dma_handle, size, direction); |
7657 | + /* No flushing needed to sync cpu writes to the device. */ |
7658 | } |
7659 | |
7660 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
7661 | @@ -123,7 +117,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, |
7662 | size_t size, |
7663 | enum dma_data_direction direction) |
7664 | { |
7665 | - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); |
7666 | + /* No flushing needed to sync cpu writes to the device. */ |
7667 | } |
7668 | |
7669 | |
7670 | @@ -138,7 +132,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, |
7671 | struct scatterlist *sg, int nelems, |
7672 | enum dma_data_direction direction) |
7673 | { |
7674 | - dma_ops->sync_sg_for_device(dev, sg, nelems, direction); |
7675 | + /* No flushing needed to sync cpu writes to the device. */ |
7676 | } |
7677 | |
7678 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
7679 | diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h |
7680 | index 524d498..3ad45df 100644 |
7681 | --- a/include/asm-sparc64/hypervisor.h |
7682 | +++ b/include/asm-sparc64/hypervisor.h |
7683 | @@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions, |
7684 | */ |
7685 | #define HV_FAST_MMU_DEMAP_ALL 0x24 |
7686 | |
7687 | +#ifndef __ASSEMBLY__ |
7688 | +extern void sun4v_mmu_demap_all(void); |
7689 | +#endif |
7690 | + |
7691 | /* mmu_map_perm_addr() |
7692 | * TRAP: HV_FAST_TRAP |
7693 | * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR |
7694 | diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h |
7695 | index 1393e57..f59f257 100644 |
7696 | --- a/include/asm-sparc64/pci.h |
7697 | +++ b/include/asm-sparc64/pci.h |
7698 | @@ -200,6 +200,10 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) |
7699 | struct device_node; |
7700 | extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); |
7701 | |
7702 | +#define HAVE_ARCH_PCI_RESOURCE_TO_USER |
7703 | +extern void pci_resource_to_user(const struct pci_dev *dev, int bar, |
7704 | + const struct resource *rsrc, |
7705 | + resource_size_t *start, resource_size_t *end); |
7706 | #endif /* __KERNEL__ */ |
7707 | |
7708 | #endif /* __SPARC64_PCI_H */ |
7709 | diff --git a/include/linux/acpi.h b/include/linux/acpi.h |
7710 | index bf5e000..919e0a5 100644 |
7711 | --- a/include/linux/acpi.h |
7712 | +++ b/include/linux/acpi.h |
7713 | @@ -40,6 +40,7 @@ |
7714 | #include <acpi/acpi_drivers.h> |
7715 | #include <acpi/acpi_numa.h> |
7716 | #include <asm/acpi.h> |
7717 | +#include <linux/dmi.h> |
7718 | |
7719 | |
7720 | #ifdef CONFIG_ACPI |
7721 | @@ -187,7 +188,9 @@ extern int ec_transaction(u8 command, |
7722 | #endif /*CONFIG_ACPI_EC*/ |
7723 | |
7724 | extern int acpi_blacklisted(void); |
7725 | -extern void acpi_bios_year(char *s); |
7726 | +#ifdef CONFIG_DMI |
7727 | +extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); |
7728 | +#endif |
7729 | |
7730 | #define ACPI_CSTATE_LIMIT_DEFINED /* for driver builds */ |
7731 | #ifdef CONFIG_ACPI |
7732 | @@ -247,5 +250,5 @@ static inline int acpi_boot_table_init(void) |
7733 | return 0; |
7734 | } |
7735 | |
7736 | -#endif /* CONFIG_ACPI */ |
7737 | +#endif /* !CONFIG_ACPI */ |
7738 | #endif /*_LINUX_ACPI_H*/ |
7739 | diff --git a/include/linux/dmi.h b/include/linux/dmi.h |
7740 | index b8ac7b0..d8a946f 100644 |
7741 | --- a/include/linux/dmi.h |
7742 | +++ b/include/linux/dmi.h |
7743 | @@ -78,6 +78,7 @@ extern struct dmi_device * dmi_find_device(int type, const char *name, |
7744 | extern void dmi_scan_machine(void); |
7745 | extern int dmi_get_year(int field); |
7746 | extern int dmi_name_in_vendors(char *str); |
7747 | +extern int dmi_available; |
7748 | |
7749 | #else |
7750 | |
7751 | @@ -87,6 +88,7 @@ static inline struct dmi_device * dmi_find_device(int type, const char *name, |
7752 | struct dmi_device *from) { return NULL; } |
7753 | static inline int dmi_get_year(int year) { return 0; } |
7754 | static inline int dmi_name_in_vendors(char *s) { return 0; } |
7755 | +#define dmi_available 0 |
7756 | |
7757 | #endif |
7758 | |
7759 | diff --git a/include/linux/freezer.h b/include/linux/freezer.h |
7760 | index efded00..7fa9500 100644 |
7761 | --- a/include/linux/freezer.h |
7762 | +++ b/include/linux/freezer.h |
7763 | @@ -4,6 +4,7 @@ |
7764 | #define FREEZER_H_INCLUDED |
7765 | |
7766 | #include <linux/sched.h> |
7767 | +#include <linux/wait.h> |
7768 | |
7769 | #ifdef CONFIG_PM_SLEEP |
7770 | /* |
7771 | @@ -126,6 +127,24 @@ static inline void set_freezable(void) |
7772 | current->flags &= ~PF_NOFREEZE; |
7773 | } |
7774 | |
7775 | +/* |
7776 | + * Freezer-friendly wrapper around wait_event_interruptible(), originally |
7777 | + * defined in <linux/wait.h> |
7778 | + */ |
7779 | + |
7780 | +#define wait_event_freezable(wq, condition) \ |
7781 | +({ \ |
7782 | + int __retval; \ |
7783 | + do { \ |
7784 | + __retval = wait_event_interruptible(wq, \ |
7785 | + (condition) || freezing(current)); \ |
7786 | + if (__retval && !freezing(current)) \ |
7787 | + break; \ |
7788 | + else if (!(condition)) \ |
7789 | + __retval = -ERESTARTSYS; \ |
7790 | + } while (try_to_freeze()); \ |
7791 | + __retval; \ |
7792 | +}) |
7793 | #else /* !CONFIG_PM_SLEEP */ |
7794 | static inline int frozen(struct task_struct *p) { return 0; } |
7795 | static inline int freezing(struct task_struct *p) { return 0; } |
7796 | @@ -143,6 +162,10 @@ static inline void freezer_do_not_count(void) {} |
7797 | static inline void freezer_count(void) {} |
7798 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
7799 | static inline void set_freezable(void) {} |
7800 | + |
7801 | +#define wait_event_freezable(wq, condition) \ |
7802 | + wait_event_interruptible(wq, condition) |
7803 | + |
7804 | #endif /* !CONFIG_PM_SLEEP */ |
7805 | |
7806 | #endif /* FREEZER_H_INCLUDED */ |
7807 | diff --git a/include/linux/input.h b/include/linux/input.h |
7808 | index 36e00aa..5ec6b68 100644 |
7809 | --- a/include/linux/input.h |
7810 | +++ b/include/linux/input.h |
7811 | @@ -853,7 +853,7 @@ struct ff_rumble_effect { |
7812 | * defining effect parameters |
7813 | * |
7814 | * This structure is sent through ioctl from the application to the driver. |
7815 | - * To create a new effect aplication should set its @id to -1; the kernel |
7816 | + * To create a new effect application should set its @id to -1; the kernel |
7817 | * will return assigned @id which can later be used to update or delete |
7818 | * this effect. |
7819 | * |
7820 | @@ -933,9 +933,82 @@ struct ff_effect { |
7821 | #define BIT(x) (1UL<<((x)%BITS_PER_LONG)) |
7822 | #define LONG(x) ((x)/BITS_PER_LONG) |
7823 | |
7824 | +/** |
7825 | + * struct input_dev - represents an input device |
7826 | + * @name: name of the device |
7827 | + * @phys: physical path to the device in the system hierarchy |
7828 | + * @uniq: unique identification code for the device (if device has it) |
7829 | + * @id: id of the device (struct input_id) |
7830 | + * @evbit: bitmap of types of events supported by the device (EV_KEY, |
7831 | + * EV_REL, etc.) |
7832 | + * @keybit: bitmap of keys/buttons this device has |
7833 | + * @relbit: bitmap of relative axes for the device |
7834 | + * @absbit: bitmap of absolute axes for the device |
7835 | + * @mscbit: bitmap of miscellaneous events supported by the device |
7836 | + * @ledbit: bitmap of leds present on the device |
7837 | + * @sndbit: bitmap of sound effects supported by the device |
7838 | + * @ffbit: bitmap of force feedback effects supported by the device |
7839 | + * @swbit: bitmap of switches present on the device |
7840 | + * @keycodemax: size of keycode table |
7841 | + * @keycodesize: size of elements in keycode table |
7842 | + * @keycode: map of scancodes to keycodes for this device |
7843 | + * @setkeycode: optional method to alter current keymap, used to implement |
7844 | + * sparse keymaps. If not supplied default mechanism will be used |
7845 | + * @getkeycode: optional method to retrieve current keymap. If not supplied |
7846 | + * default mechanism will be used |
7847 | + * @ff: force feedback structure associated with the device if device |
7848 | + * supports force feedback effects |
7849 | + * @repeat_key: stores key code of the last key pressed; used to implement |
7850 | + * software autorepeat |
7851 | + * @timer: timer for software autorepeat |
7852 | + * @sync: set to 1 when there were no new events since last EV_SYNC |
7853 | + * @abs: current values for reports from absolute axes |
7854 | + * @rep: current values for autorepeat parameters (delay, rate) |
7855 | + * @key: reflects current state of device's keys/buttons |
7856 | + * @led: reflects current state of device's LEDs |
7857 | + * @snd: reflects current state of sound effects |
7858 | + * @sw: reflects current state of device's switches |
7859 | + * @absmax: maximum values for events coming from absolute axes |
7860 | + * @absmin: minimum values for events coming from absolute axes |
7861 | + * @absfuzz: describes noisiness for axes |
7862 | + * @absflat: size of the center flat position (used by joydev) |
7863 | + * @open: this method is called when the very first user calls |
7864 | + * input_open_device(). The driver must prepare the device |
7865 | + * to start generating events (start polling thread, |
7866 | + * request an IRQ, submit URB, etc.) |
7867 | + * @close: this method is called when the very last user calls |
7868 | + * input_close_device(). |
7869 | + * @flush: purges the device. Most commonly used to get rid of force |
7870 | + * feedback effects loaded into the device when disconnecting |
7871 | + * from it |
7872 | + * @event: event handler for events sent _to_ the device, like EV_LED |
7873 | + * or EV_SND. The device is expected to carry out the requested |
7874 | + * action (turn on a LED, play sound, etc.) The call is protected |
7875 | + * by @event_lock and must not sleep |
7876 | + * @grab: input handle that currently has the device grabbed (via |
7877 | + * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole |
7878 | + * recipient for all input events coming from the device |
7879 | + * @event_lock: this spinlock is is taken when input core receives |
7880 | + * and processes a new event for the device (in input_event()). |
7881 | + * Code that accesses and/or modifies parameters of a device |
7882 | + * (such as keymap or absmin, absmax, absfuzz, etc.) after device |
7883 | + * has been registered with input core must take this lock. |
7884 | + * @mutex: serializes calls to open(), close() and flush() methods |
7885 | + * @users: stores number of users (input handlers) that opened this |
7886 | + * device. It is used by input_open_device() and input_close_device() |
7887 | + * to make sure that dev->open() is only called when the first |
7888 | + * user opens device and dev->close() is called when the very |
7889 | + * last user closes the device |
7890 | + * @going_away: marks devices that are in a middle of unregistering and |
7891 | + * causes input_open_device*() fail with -ENODEV. |
7892 | + * @dev: driver model's view of this device |
7893 | + * @h_list: list of input handles associated with the device. When |
7894 | + * accessing the list dev->mutex must be held |
7895 | + * @node: used to place the device onto input_dev_list |
7896 | + */ |
7897 | struct input_dev { |
7898 | |
7899 | - void *private; |
7900 | + void *private; /* do not use */ |
7901 | |
7902 | const char *name; |
7903 | const char *phys; |
7904 | @@ -963,8 +1036,6 @@ struct input_dev { |
7905 | unsigned int repeat_key; |
7906 | struct timer_list timer; |
7907 | |
7908 | - int state; |
7909 | - |
7910 | int sync; |
7911 | |
7912 | int abs[ABS_MAX + 1]; |
7913 | @@ -987,8 +1058,11 @@ struct input_dev { |
7914 | |
7915 | struct input_handle *grab; |
7916 | |
7917 | - struct mutex mutex; /* serializes open and close operations */ |
7918 | + spinlock_t event_lock; |
7919 | + struct mutex mutex; |
7920 | + |
7921 | unsigned int users; |
7922 | + int going_away; |
7923 | |
7924 | struct device dev; |
7925 | union { /* temporarily so while we switching to struct device */ |
7926 | @@ -1054,7 +1128,9 @@ struct input_handle; |
7927 | /** |
7928 | * struct input_handler - implements one of interfaces for input devices |
7929 | * @private: driver-specific data |
7930 | - * @event: event handler |
7931 | + * @event: event handler. This method is being called by input core with |
7932 | + * interrupts disabled and dev->event_lock spinlock held and so |
7933 | + * it may not sleep |
7934 | * @connect: called when attaching a handler to an input device |
7935 | * @disconnect: disconnects a handler from input device |
7936 | * @start: starts handler for given handle. This function is called by |
7937 | @@ -1066,10 +1142,18 @@ struct input_handle; |
7938 | * @name: name of the handler, to be shown in /proc/bus/input/handlers |
7939 | * @id_table: pointer to a table of input_device_ids this driver can |
7940 | * handle |
7941 | - * @blacklist: prointer to a table of input_device_ids this driver should |
7942 | + * @blacklist: pointer to a table of input_device_ids this driver should |
7943 | * ignore even if they match @id_table |
7944 | * @h_list: list of input handles associated with the handler |
7945 | * @node: for placing the driver onto input_handler_list |
7946 | + * |
7947 | + * Input handlers attach to input devices and create input handles. There |
7948 | + * are likely several handlers attached to any given input device at the |
7949 | + * same time. All of them will get their copy of input event generated by |
7950 | + * the device. |
7951 | + * |
7952 | + * Note that input core serializes calls to connect() and disconnect() |
7953 | + * methods. |
7954 | */ |
7955 | struct input_handler { |
7956 | |
7957 | @@ -1091,6 +1175,18 @@ struct input_handler { |
7958 | struct list_head node; |
7959 | }; |
7960 | |
7961 | +/** |
7962 | + * struct input_handle - links input device with an input handler |
7963 | + * @private: handler-specific data |
7964 | + * @open: counter showing whether the handle is 'open', i.e. should deliver |
7965 | + * events from its device |
7966 | + * @name: name given to the handle by handler that created it |
7967 | + * @dev: input device the handle is attached to |
7968 | + * @handler: handler that works with the device through this handle |
7969 | + * @d_node: used to put the handle on device's list of attached handles |
7970 | + * @h_node: used to put the handle on handler's list of handles from which |
7971 | + * it gets events |
7972 | + */ |
7973 | struct input_handle { |
7974 | |
7975 | void *private; |
7976 | @@ -1213,7 +1309,7 @@ extern struct class input_class; |
7977 | * @max_effects: maximum number of effects supported by device |
7978 | * @effects: pointer to an array of effects currently loaded into device |
7979 | * @effect_owners: array of effect owners; when file handle owning |
7980 | - * an effect gets closed the effcet is automatically erased |
7981 | + * an effect gets closed the effect is automatically erased |
7982 | * |
7983 | * Every force-feedback device must implement upload() and playback() |
7984 | * methods; erase() is optional. set_gain() and set_autocenter() need |
7985 | diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
7986 | index 97de8aa..0349e82 100644 |
7987 | --- a/include/linux/pci_ids.h |
7988 | +++ b/include/linux/pci_ids.h |
7989 | @@ -2287,6 +2287,8 @@ |
7990 | #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 |
7991 | #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 |
7992 | #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 |
7993 | +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 |
7994 | +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 |
7995 | #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 |
7996 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
7997 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 |
7998 | diff --git a/include/linux/pm.h b/include/linux/pm.h |
7999 | index 48b71ba..71e589b 100644 |
8000 | --- a/include/linux/pm.h |
8001 | +++ b/include/linux/pm.h |
8002 | @@ -344,6 +344,15 @@ static inline int call_platform_enable_wakeup(struct device *dev, int is_on) |
8003 | device_set_wakeup_enable(dev,val); \ |
8004 | } while(0) |
8005 | |
8006 | +/* |
8007 | + * Global Power Management flags |
8008 | + * Used to keep APM and ACPI from both being active |
8009 | + */ |
8010 | +extern unsigned int pm_flags; |
8011 | + |
8012 | +#define PM_APM 1 |
8013 | +#define PM_ACPI 2 |
8014 | + |
8015 | #endif /* __KERNEL__ */ |
8016 | |
8017 | #endif /* _LINUX_PM_H */ |
8018 | diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h |
8019 | index 514729a..446f4f4 100644 |
8020 | --- a/include/linux/pm_legacy.h |
8021 | +++ b/include/linux/pm_legacy.h |
8022 | @@ -4,10 +4,6 @@ |
8023 | |
8024 | #ifdef CONFIG_PM_LEGACY |
8025 | |
8026 | -extern int pm_active; |
8027 | - |
8028 | -#define PM_IS_ACTIVE() (pm_active != 0) |
8029 | - |
8030 | /* |
8031 | * Register a device with power management |
8032 | */ |
8033 | @@ -21,8 +17,6 @@ int __deprecated pm_send_all(pm_request_t rqst, void *data); |
8034 | |
8035 | #else /* CONFIG_PM_LEGACY */ |
8036 | |
8037 | -#define PM_IS_ACTIVE() 0 |
8038 | - |
8039 | static inline struct pm_dev *pm_register(pm_dev_t type, |
8040 | unsigned long id, |
8041 | pm_callback callback) |
8042 | diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h |
8043 | index 9371c61..39b6671 100644 |
8044 | --- a/include/linux/quicklist.h |
8045 | +++ b/include/linux/quicklist.h |
8046 | @@ -56,14 +56,6 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, |
8047 | struct page *page) |
8048 | { |
8049 | struct quicklist *q; |
8050 | - int nid = page_to_nid(page); |
8051 | - |
8052 | - if (unlikely(nid != numa_node_id())) { |
8053 | - if (dtor) |
8054 | - dtor(p); |
8055 | - __free_page(page); |
8056 | - return; |
8057 | - } |
8058 | |
8059 | q = &get_cpu_var(quicklist)[nr]; |
8060 | *(void **)p = q->page; |
8061 | diff --git a/kernel/kmod.c b/kernel/kmod.c |
8062 | index c6a4f8a..bb7df2a 100644 |
8063 | --- a/kernel/kmod.c |
8064 | +++ b/kernel/kmod.c |
8065 | @@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, |
8066 | enum umh_wait wait) |
8067 | { |
8068 | DECLARE_COMPLETION_ONSTACK(done); |
8069 | - int retval; |
8070 | + int retval = 0; |
8071 | |
8072 | helper_lock(); |
8073 | - if (sub_info->path[0] == '\0') { |
8074 | - retval = 0; |
8075 | + if (sub_info->path[0] == '\0') |
8076 | goto out; |
8077 | - } |
8078 | |
8079 | if (!khelper_wq || usermodehelper_disabled) { |
8080 | retval = -EBUSY; |
8081 | @@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, |
8082 | sub_info->wait = wait; |
8083 | |
8084 | queue_work(khelper_wq, &sub_info->work); |
8085 | - if (wait == UMH_NO_WAIT) /* task has freed sub_info */ |
8086 | - return 0; |
8087 | + if (wait == UMH_NO_WAIT) /* task has freed sub_info */ |
8088 | + goto unlock; |
8089 | wait_for_completion(&done); |
8090 | retval = sub_info->retval; |
8091 | |
8092 | - out: |
8093 | +out: |
8094 | call_usermodehelper_freeinfo(sub_info); |
8095 | +unlock: |
8096 | helper_unlock(); |
8097 | return retval; |
8098 | } |
8099 | diff --git a/kernel/power/main.c b/kernel/power/main.c |
8100 | index 350b485..0e44534 100644 |
8101 | --- a/kernel/power/main.c |
8102 | +++ b/kernel/power/main.c |
8103 | @@ -27,6 +27,9 @@ BLOCKING_NOTIFIER_HEAD(pm_chain_head); |
8104 | |
8105 | DEFINE_MUTEX(pm_mutex); |
8106 | |
8107 | +unsigned int pm_flags; |
8108 | +EXPORT_SYMBOL(pm_flags); |
8109 | + |
8110 | #ifdef CONFIG_SUSPEND |
8111 | |
8112 | /* This is just an arbitrary number */ |
8113 | diff --git a/kernel/power/pm.c b/kernel/power/pm.c |
8114 | index c50d152..60c73fa 100644 |
8115 | --- a/kernel/power/pm.c |
8116 | +++ b/kernel/power/pm.c |
8117 | @@ -27,8 +27,6 @@ |
8118 | #include <linux/interrupt.h> |
8119 | #include <linux/mutex.h> |
8120 | |
8121 | -int pm_active; |
8122 | - |
8123 | /* |
8124 | * Locking notes: |
8125 | * pm_devs_lock can be a semaphore providing pm ops are not called |
8126 | @@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data) |
8127 | |
8128 | EXPORT_SYMBOL(pm_register); |
8129 | EXPORT_SYMBOL(pm_send_all); |
8130 | -EXPORT_SYMBOL(pm_active); |
8131 | - |
8132 | |
8133 | diff --git a/kernel/relay.c b/kernel/relay.c |
8134 | index ad85501..91bbfb7 100644 |
8135 | --- a/kernel/relay.c |
8136 | +++ b/kernel/relay.c |
8137 | @@ -92,6 +92,7 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) |
8138 | return -EINVAL; |
8139 | |
8140 | vma->vm_ops = &relay_file_mmap_ops; |
8141 | + vma->vm_flags |= VM_DONTEXPAND; |
8142 | vma->vm_private_data = buf; |
8143 | buf->chan->cb->buf_mapped(buf, filp); |
8144 | |
8145 | diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c |
8146 | index 0962e05..1984669 100644 |
8147 | --- a/kernel/time/tick-broadcast.c |
8148 | +++ b/kernel/time/tick-broadcast.c |
8149 | @@ -387,45 +387,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
8150 | } |
8151 | |
8152 | /* |
8153 | - * Reprogram the broadcast device: |
8154 | - * |
8155 | - * Called with tick_broadcast_lock held and interrupts disabled. |
8156 | - */ |
8157 | -static int tick_broadcast_reprogram(void) |
8158 | -{ |
8159 | - ktime_t expires = { .tv64 = KTIME_MAX }; |
8160 | - struct tick_device *td; |
8161 | - int cpu; |
8162 | - |
8163 | - /* |
8164 | - * Find the event which expires next: |
8165 | - */ |
8166 | - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; |
8167 | - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { |
8168 | - td = &per_cpu(tick_cpu_device, cpu); |
8169 | - if (td->evtdev->next_event.tv64 < expires.tv64) |
8170 | - expires = td->evtdev->next_event; |
8171 | - } |
8172 | - |
8173 | - if (expires.tv64 == KTIME_MAX) |
8174 | - return 0; |
8175 | - |
8176 | - return tick_broadcast_set_event(expires, 0); |
8177 | -} |
8178 | - |
8179 | -/* |
8180 | * Handle oneshot mode broadcasting |
8181 | */ |
8182 | static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) |
8183 | { |
8184 | struct tick_device *td; |
8185 | cpumask_t mask; |
8186 | - ktime_t now; |
8187 | + ktime_t now, next_event; |
8188 | int cpu; |
8189 | |
8190 | spin_lock(&tick_broadcast_lock); |
8191 | again: |
8192 | dev->next_event.tv64 = KTIME_MAX; |
8193 | + next_event.tv64 = KTIME_MAX; |
8194 | mask = CPU_MASK_NONE; |
8195 | now = ktime_get(); |
8196 | /* Find all expired events */ |
8197 | @@ -434,19 +408,31 @@ again: |
8198 | td = &per_cpu(tick_cpu_device, cpu); |
8199 | if (td->evtdev->next_event.tv64 <= now.tv64) |
8200 | cpu_set(cpu, mask); |
8201 | + else if (td->evtdev->next_event.tv64 < next_event.tv64) |
8202 | + next_event.tv64 = td->evtdev->next_event.tv64; |
8203 | } |
8204 | |
8205 | /* |
8206 | - * Wakeup the cpus which have an expired event. The broadcast |
8207 | - * device is reprogrammed in the return from idle code. |
8208 | + * Wakeup the cpus which have an expired event. |
8209 | + */ |
8210 | + tick_do_broadcast(mask); |
8211 | + |
8212 | + /* |
8213 | + * Two reasons for reprogram: |
8214 | + * |
8215 | + * - The global event did not expire any CPU local |
8216 | + * events. This happens in dyntick mode, as the maximum PIT |
8217 | + * delta is quite small. |
8218 | + * |
8219 | + * - There are pending events on sleeping CPUs which were not |
8220 | + * in the event mask |
8221 | */ |
8222 | - if (!tick_do_broadcast(mask)) { |
8223 | + if (next_event.tv64 != KTIME_MAX) { |
8224 | /* |
8225 | - * The global event did not expire any CPU local |
8226 | - * events. This happens in dyntick mode, as the |
8227 | - * maximum PIT delta is quite small. |
8228 | + * Rearm the broadcast device. If event expired, |
8229 | + * repeat the above |
8230 | */ |
8231 | - if (tick_broadcast_reprogram()) |
8232 | + if (tick_broadcast_set_event(next_event, 0)) |
8233 | goto again; |
8234 | } |
8235 | spin_unlock(&tick_broadcast_lock); |
8236 | diff --git a/mm/mmap.c b/mm/mmap.c |
8237 | index 0d40e66..f6058f6 100644 |
8238 | --- a/mm/mmap.c |
8239 | +++ b/mm/mmap.c |
8240 | @@ -1619,6 +1619,12 @@ static inline int expand_downwards(struct vm_area_struct *vma, |
8241 | */ |
8242 | if (unlikely(anon_vma_prepare(vma))) |
8243 | return -ENOMEM; |
8244 | + |
8245 | + address &= PAGE_MASK; |
8246 | + error = security_file_mmap(0, 0, 0, 0, address, 1); |
8247 | + if (error) |
8248 | + return error; |
8249 | + |
8250 | anon_vma_lock(vma); |
8251 | |
8252 | /* |
8253 | @@ -1626,8 +1632,6 @@ static inline int expand_downwards(struct vm_area_struct *vma, |
8254 | * is required to hold the mmap_sem in read mode. We need the |
8255 | * anon_vma lock to serialize against concurrent expand_stacks. |
8256 | */ |
8257 | - address &= PAGE_MASK; |
8258 | - error = 0; |
8259 | |
8260 | /* Somebody else might have raced and expanded it already */ |
8261 | if (address < vma->vm_start) { |
8262 | @@ -1938,6 +1942,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len) |
8263 | if (is_hugepage_only_range(mm, addr, len)) |
8264 | return -EINVAL; |
8265 | |
8266 | + error = security_file_mmap(0, 0, 0, 0, addr, 1); |
8267 | + if (error) |
8268 | + return error; |
8269 | + |
8270 | flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; |
8271 | |
8272 | error = arch_mmap_check(addr, len, flags); |
8273 | @@ -2209,7 +2217,7 @@ int install_special_mapping(struct mm_struct *mm, |
8274 | vma->vm_start = addr; |
8275 | vma->vm_end = addr + len; |
8276 | |
8277 | - vma->vm_flags = vm_flags | mm->def_flags; |
8278 | + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; |
8279 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; |
8280 | |
8281 | vma->vm_ops = &special_mapping_vmops; |
8282 | diff --git a/mm/quicklist.c b/mm/quicklist.c |
8283 | index ae8189c..3f703f7 100644 |
8284 | --- a/mm/quicklist.c |
8285 | +++ b/mm/quicklist.c |
8286 | @@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; |
8287 | static unsigned long max_pages(unsigned long min_pages) |
8288 | { |
8289 | unsigned long node_free_pages, max; |
8290 | + struct zone *zones = NODE_DATA(numa_node_id())->node_zones; |
8291 | + |
8292 | + node_free_pages = |
8293 | +#ifdef CONFIG_ZONE_DMA |
8294 | + zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + |
8295 | +#endif |
8296 | +#ifdef CONFIG_ZONE_DMA32 |
8297 | + zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + |
8298 | +#endif |
8299 | + zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); |
8300 | |
8301 | - node_free_pages = node_page_state(numa_node_id(), |
8302 | - NR_FREE_PAGES); |
8303 | max = node_free_pages / FRACTION_OF_NODE_MEM; |
8304 | return max(max, min_pages); |
8305 | } |
8306 | diff --git a/mm/truncate.c b/mm/truncate.c |
8307 | index 5cdfbc1..39da569 100644 |
8308 | --- a/mm/truncate.c |
8309 | +++ b/mm/truncate.c |
8310 | @@ -95,11 +95,11 @@ truncate_complete_page(struct address_space *mapping, struct page *page) |
8311 | if (page->mapping != mapping) |
8312 | return; |
8313 | |
8314 | - cancel_dirty_page(page, PAGE_CACHE_SIZE); |
8315 | - |
8316 | if (PagePrivate(page)) |
8317 | do_invalidatepage(page, 0); |
8318 | |
8319 | + cancel_dirty_page(page, PAGE_CACHE_SIZE); |
8320 | + |
8321 | remove_from_page_cache(page); |
8322 | ClearPageUptodate(page); |
8323 | ClearPageMappedToDisk(page); |
8324 | diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c |
8325 | index ef3f789..21af441 100644 |
8326 | --- a/net/8021q/vlan.c |
8327 | +++ b/net/8021q/vlan.c |
8328 | @@ -768,7 +768,7 @@ static int vlan_ioctl_handler(void __user *arg) |
8329 | case SET_VLAN_NAME_TYPE_CMD: |
8330 | err = -EPERM; |
8331 | if (!capable(CAP_NET_ADMIN)) |
8332 | - return -EPERM; |
8333 | + break; |
8334 | if ((args.u.name_type >= 0) && |
8335 | (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { |
8336 | vlan_name_type = args.u.name_type; |
8337 | diff --git a/net/atm/mpc.c b/net/atm/mpc.c |
8338 | index 7c85aa5..181c1c8 100644 |
8339 | --- a/net/atm/mpc.c |
8340 | +++ b/net/atm/mpc.c |
8341 | @@ -542,6 +542,13 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev) |
8342 | if (eth->h_proto != htons(ETH_P_IP)) |
8343 | goto non_ip; /* Multi-Protocol Over ATM :-) */ |
8344 | |
8345 | + /* Weed out funny packets (e.g., AF_PACKET or raw). */ |
8346 | + if (skb->len < ETH_HLEN + sizeof(struct iphdr)) |
8347 | + goto non_ip; |
8348 | + skb_set_network_header(skb, ETH_HLEN); |
8349 | + if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5) |
8350 | + goto non_ip; |
8351 | + |
8352 | while (i < mpc->number_of_mps_macs) { |
8353 | if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) |
8354 | if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ |
8355 | diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c |
8356 | index 0ddaff0..8a9f0ac 100644 |
8357 | --- a/net/ax25/ax25_in.c |
8358 | +++ b/net/ax25/ax25_in.c |
8359 | @@ -124,7 +124,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
8360 | } |
8361 | |
8362 | skb_pull(skb, 1); /* Remove PID */ |
8363 | - skb_reset_mac_header(skb); |
8364 | + skb->mac_header = skb->network_header; |
8365 | skb_reset_network_header(skb); |
8366 | skb->dev = ax25->ax25_dev->dev; |
8367 | skb->pkt_type = PACKET_HOST; |
8368 | diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c |
8369 | index fc13130..22545bd 100644 |
8370 | --- a/net/bridge/br_netfilter.c |
8371 | +++ b/net/bridge/br_netfilter.c |
8372 | @@ -142,6 +142,23 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) |
8373 | return skb->nf_bridge; |
8374 | } |
8375 | |
8376 | +static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb) |
8377 | +{ |
8378 | + struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
8379 | + |
8380 | + if (atomic_read(&nf_bridge->use) > 1) { |
8381 | + struct nf_bridge_info *tmp = nf_bridge_alloc(skb); |
8382 | + |
8383 | + if (tmp) { |
8384 | + memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); |
8385 | + atomic_set(&tmp->use, 1); |
8386 | + nf_bridge_put(nf_bridge); |
8387 | + } |
8388 | + nf_bridge = tmp; |
8389 | + } |
8390 | + return nf_bridge; |
8391 | +} |
8392 | + |
8393 | static inline void nf_bridge_push_encap_header(struct sk_buff *skb) |
8394 | { |
8395 | unsigned int len = nf_bridge_encap_header_len(skb); |
8396 | @@ -247,8 +264,9 @@ static void __br_dnat_complain(void) |
8397 | * Let us first consider the case that ip_route_input() succeeds: |
8398 | * |
8399 | * If skb->dst->dev equals the logical bridge device the packet |
8400 | - * came in on, we can consider this bridging. We then call |
8401 | - * skb->dst->output() which will make the packet enter br_nf_local_out() |
8402 | + * came in on, we can consider this bridging. The packet is passed |
8403 | + * through the neighbour output function to build a new destination |
8404 | + * MAC address, which will make the packet enter br_nf_local_out() |
8405 | * not much later. In that function it is assured that the iptables |
8406 | * FORWARD chain is traversed for the packet. |
8407 | * |
8408 | @@ -285,12 +303,17 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) |
8409 | skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
8410 | |
8411 | skb->dev = bridge_parent(skb->dev); |
8412 | - if (!skb->dev) |
8413 | - kfree_skb(skb); |
8414 | - else { |
8415 | + if (skb->dev) { |
8416 | + struct dst_entry *dst = skb->dst; |
8417 | + |
8418 | nf_bridge_pull_encap_header(skb); |
8419 | - skb->dst->output(skb); |
8420 | + |
8421 | + if (dst->hh) |
8422 | + return neigh_hh_output(dst->hh, skb); |
8423 | + else if (dst->neighbour) |
8424 | + return dst->neighbour->output(skb); |
8425 | } |
8426 | + kfree_skb(skb); |
8427 | return 0; |
8428 | } |
8429 | |
8430 | @@ -638,6 +661,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, |
8431 | if (!skb->nf_bridge) |
8432 | return NF_ACCEPT; |
8433 | |
8434 | + /* Need exclusive nf_bridge_info since we might have multiple |
8435 | + * different physoutdevs. */ |
8436 | + if (!nf_bridge_unshare(skb)) |
8437 | + return NF_DROP; |
8438 | + |
8439 | parent = bridge_parent(out); |
8440 | if (!parent) |
8441 | return NF_DROP; |
8442 | @@ -721,6 +749,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, |
8443 | if (!skb->nf_bridge) |
8444 | return NF_ACCEPT; |
8445 | |
8446 | + /* Need exclusive nf_bridge_info since we might have multiple |
8447 | + * different physoutdevs. */ |
8448 | + if (!nf_bridge_unshare(skb)) |
8449 | + return NF_DROP; |
8450 | + |
8451 | nf_bridge = skb->nf_bridge; |
8452 | if (!(nf_bridge->mask & BRNF_BRIDGED_DNAT)) |
8453 | return NF_ACCEPT; |
8454 | diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c |
8455 | index 5dbe580..5ccc2d1 100644 |
8456 | --- a/net/ipv4/devinet.c |
8457 | +++ b/net/ipv4/devinet.c |
8458 | @@ -1030,7 +1030,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) |
8459 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); |
8460 | if (named++ == 0) |
8461 | continue; |
8462 | - dot = strchr(ifa->ifa_label, ':'); |
8463 | + dot = strchr(old, ':'); |
8464 | if (dot == NULL) { |
8465 | sprintf(old, ":%d", named); |
8466 | dot = old; |
8467 | diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c |
8468 | index 5c14ed6..4b09b25 100644 |
8469 | --- a/net/ipv4/ip_gre.c |
8470 | +++ b/net/ipv4/ip_gre.c |
8471 | @@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb) |
8472 | offset += 4; |
8473 | } |
8474 | |
8475 | - skb_reset_mac_header(skb); |
8476 | + skb->mac_header = skb->network_header; |
8477 | __pskb_pull(skb, offset); |
8478 | skb_reset_network_header(skb); |
8479 | skb_postpull_rcsum(skb, skb_transport_header(skb), offset); |
8480 | diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c |
8481 | index c6d7152..b45a610 100644 |
8482 | --- a/net/ipv4/raw.c |
8483 | +++ b/net/ipv4/raw.c |
8484 | @@ -270,6 +270,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, |
8485 | int hh_len; |
8486 | struct iphdr *iph; |
8487 | struct sk_buff *skb; |
8488 | + unsigned int iphlen; |
8489 | int err; |
8490 | |
8491 | if (length > rt->u.dst.dev->mtu) { |
8492 | @@ -303,7 +304,8 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, |
8493 | goto error_fault; |
8494 | |
8495 | /* We don't modify invalid header */ |
8496 | - if (length >= sizeof(*iph) && iph->ihl * 4U <= length) { |
8497 | + iphlen = iph->ihl * 4; |
8498 | + if (iphlen >= sizeof(*iph) && iphlen <= length) { |
8499 | if (!iph->saddr) |
8500 | iph->saddr = rt->rt_src; |
8501 | iph->check = 0; |
8502 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
8503 | index 198b732..efc4a3d 100644 |
8504 | --- a/net/ipv4/route.c |
8505 | +++ b/net/ipv4/route.c |
8506 | @@ -2648,11 +2648,10 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) |
8507 | int idx, s_idx; |
8508 | |
8509 | s_h = cb->args[0]; |
8510 | + if (s_h < 0) |
8511 | + s_h = 0; |
8512 | s_idx = idx = cb->args[1]; |
8513 | - for (h = 0; h <= rt_hash_mask; h++) { |
8514 | - if (h < s_h) continue; |
8515 | - if (h > s_h) |
8516 | - s_idx = 0; |
8517 | + for (h = s_h; h <= rt_hash_mask; h++) { |
8518 | rcu_read_lock_bh(); |
8519 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
8520 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
8521 | @@ -2669,6 +2668,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) |
8522 | dst_release(xchg(&skb->dst, NULL)); |
8523 | } |
8524 | rcu_read_unlock_bh(); |
8525 | + s_idx = 0; |
8526 | } |
8527 | |
8528 | done: |
8529 | diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c |
8530 | index 4c670cf..82fdca2 100644 |
8531 | --- a/net/irda/af_irda.c |
8532 | +++ b/net/irda/af_irda.c |
8533 | @@ -1115,8 +1115,6 @@ static int irda_create(struct socket *sock, int protocol) |
8534 | self->max_sdu_size_rx = TTP_SAR_UNBOUND; |
8535 | break; |
8536 | default: |
8537 | - IRDA_ERROR("%s: protocol not supported!\n", |
8538 | - __FUNCTION__); |
8539 | return -ESOCKTNOSUPPORT; |
8540 | } |
8541 | break; |
8542 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
8543 | index 7a5e993..9c75b9e 100644 |
8544 | --- a/net/key/af_key.c |
8545 | +++ b/net/key/af_key.c |
8546 | @@ -2780,12 +2780,22 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp) |
8547 | |
8548 | static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d) |
8549 | { |
8550 | - return t->aalgos & (1 << d->desc.sadb_alg_id); |
8551 | + unsigned int id = d->desc.sadb_alg_id; |
8552 | + |
8553 | + if (id >= sizeof(t->aalgos) * 8) |
8554 | + return 0; |
8555 | + |
8556 | + return (t->aalgos >> id) & 1; |
8557 | } |
8558 | |
8559 | static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d) |
8560 | { |
8561 | - return t->ealgos & (1 << d->desc.sadb_alg_id); |
8562 | + unsigned int id = d->desc.sadb_alg_id; |
8563 | + |
8564 | + if (id >= sizeof(t->ealgos) * 8) |
8565 | + return 0; |
8566 | + |
8567 | + return (t->ealgos >> id) & 1; |
8568 | } |
8569 | |
8570 | static int count_ah_combs(struct xfrm_tmpl *t) |
8571 | diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c |
8572 | index c7b5d93..69e77d5 100644 |
8573 | --- a/net/netrom/nr_dev.c |
8574 | +++ b/net/netrom/nr_dev.c |
8575 | @@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) |
8576 | |
8577 | /* Spoof incoming device */ |
8578 | skb->dev = dev; |
8579 | - skb_reset_mac_header(skb); |
8580 | + skb->mac_header = skb->network_header; |
8581 | skb_reset_network_header(skb); |
8582 | skb->pkt_type = PACKET_HOST; |
8583 | |
8584 | diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c |
8585 | index 8738ec7..3447803 100644 |
8586 | --- a/net/x25/x25_forward.c |
8587 | +++ b/net/x25/x25_forward.c |
8588 | @@ -118,13 +118,14 @@ int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) { |
8589 | goto out; |
8590 | |
8591 | if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){ |
8592 | - goto out; |
8593 | + goto output; |
8594 | |
8595 | } |
8596 | x25_transmit_link(skbn, nb); |
8597 | |
8598 | - x25_neigh_put(nb); |
8599 | rc = 1; |
8600 | +output: |
8601 | + x25_neigh_put(nb); |
8602 | out: |
8603 | return rc; |
8604 | } |
8605 | diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c |
8606 | index 7012891..75629f4 100644 |
8607 | --- a/net/xfrm/xfrm_policy.c |
8608 | +++ b/net/xfrm/xfrm_policy.c |
8609 | @@ -1479,8 +1479,9 @@ restart: |
8610 | |
8611 | if (sk && sk->sk_policy[1]) { |
8612 | policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
8613 | + err = PTR_ERR(policy); |
8614 | if (IS_ERR(policy)) |
8615 | - return PTR_ERR(policy); |
8616 | + goto dropdst; |
8617 | } |
8618 | |
8619 | if (!policy) { |
8620 | @@ -1491,8 +1492,9 @@ restart: |
8621 | |
8622 | policy = flow_cache_lookup(fl, dst_orig->ops->family, |
8623 | dir, xfrm_policy_lookup); |
8624 | + err = PTR_ERR(policy); |
8625 | if (IS_ERR(policy)) |
8626 | - return PTR_ERR(policy); |
8627 | + goto dropdst; |
8628 | } |
8629 | |
8630 | if (!policy) |
8631 | @@ -1661,8 +1663,9 @@ restart: |
8632 | return 0; |
8633 | |
8634 | error: |
8635 | - dst_release(dst_orig); |
8636 | xfrm_pols_put(pols, npols); |
8637 | +dropdst: |
8638 | + dst_release(dst_orig); |
8639 | *dst_p = NULL; |
8640 | return err; |
8641 | } |
8642 | diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c |
8643 | index 5d3c037..f95aa09 100644 |
8644 | --- a/sound/oss/via82cxxx_audio.c |
8645 | +++ b/sound/oss/via82cxxx_audio.c |
8646 | @@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, |
8647 | { |
8648 | struct via_info *card = vma->vm_private_data; |
8649 | struct via_channel *chan = &card->ch_out; |
8650 | + unsigned long max_bufs; |
8651 | struct page *dmapage; |
8652 | unsigned long pgoff; |
8653 | int rd, wr; |
8654 | @@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, |
8655 | rd = card->ch_in.is_mapped; |
8656 | wr = card->ch_out.is_mapped; |
8657 | |
8658 | -#ifndef VIA_NDEBUG |
8659 | - { |
8660 | - unsigned long max_bufs = chan->frag_number; |
8661 | - if (rd && wr) max_bufs *= 2; |
8662 | - /* via_dsp_mmap() should ensure this */ |
8663 | - assert (pgoff < max_bufs); |
8664 | - } |
8665 | -#endif |
8666 | + max_bufs = chan->frag_number; |
8667 | + if (rd && wr) |
8668 | + max_bufs *= 2; |
8669 | + if (pgoff >= max_bufs) |
8670 | + return NOPAGE_SIGBUS; |
8671 | |
8672 | /* if full-duplex (read+write) and we have two sets of bufs, |
8673 | * then the playback buffers come first, sez soundcard.c */ |
8674 | diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c |
8675 | index b76b3dd..e617d7e 100644 |
8676 | --- a/sound/usb/usx2y/usX2Yhwdep.c |
8677 | +++ b/sound/usb/usx2y/usX2Yhwdep.c |
8678 | @@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v |
8679 | us428->us428ctls_sharedmem->CtlSnapShotLast = -2; |
8680 | } |
8681 | area->vm_ops = &us428ctls_vm_ops; |
8682 | - area->vm_flags |= VM_RESERVED; |
8683 | + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; |
8684 | area->vm_private_data = hw->private_data; |
8685 | return 0; |
8686 | } |
8687 | diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c |
8688 | index a5e7bcd..6e70520 100644 |
8689 | --- a/sound/usb/usx2y/usx2yhwdeppcm.c |
8690 | +++ b/sound/usb/usx2y/usx2yhwdeppcm.c |
8691 | @@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st |
8692 | return -ENODEV; |
8693 | } |
8694 | area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops; |
8695 | - area->vm_flags |= VM_RESERVED; |
8696 | + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; |
8697 | area->vm_private_data = hw->private_data; |
8698 | return 0; |
8699 | } |