Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.24-r5/0104-2.6.24.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 574 - (hide annotations) (download)
Mon Apr 21 17:56:37 2008 UTC (16 years ago) by niro
File size: 78676 byte(s)
- 2.6.24-magellan-r5: updated to linux-2.6.24.5

1 niro 574 diff --git a/Makefile b/Makefile
2     index 254de79..822d1ba 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -189,7 +189,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
6     # Alternatively CROSS_COMPILE can be set in the environment.
7     # Default value for CROSS_COMPILE is not to prefix executables
8     # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
9     -
10     +export KBUILD_BUILDHOST := $(SUBARCH)
11     ARCH ?= $(SUBARCH)
12     CROSS_COMPILE ?=
13    
14     diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
15     index 4ab83d5..7177a6c 100644
16     --- a/arch/parisc/kernel/firmware.c
17     +++ b/arch/parisc/kernel/firmware.c
18     @@ -1080,6 +1080,9 @@ void pdc_io_reset_devices(void)
19     spin_unlock_irqrestore(&pdc_lock, flags);
20     }
21    
22     +/* locked by pdc_console_lock */
23     +static int __attribute__((aligned(8))) iodc_retbuf[32];
24     +static char __attribute__((aligned(64))) iodc_dbuf[4096];
25    
26     /**
27     * pdc_iodc_print - Console print using IODC.
28     @@ -1091,24 +1094,20 @@ void pdc_io_reset_devices(void)
29     * Since the HP console requires CR+LF to perform a 'newline', we translate
30     * "\n" to "\r\n".
31     */
32     -int pdc_iodc_print(unsigned char *str, unsigned count)
33     +int pdc_iodc_print(const unsigned char *str, unsigned count)
34     {
35     - /* XXX Should we spinlock posx usage */
36     static int posx; /* for simple TAB-Simulation... */
37     - int __attribute__((aligned(8))) iodc_retbuf[32];
38     - char __attribute__((aligned(64))) iodc_dbuf[4096];
39     unsigned int i;
40     unsigned long flags;
41    
42     - memset(iodc_dbuf, 0, 4096);
43     - for (i = 0; i < count && i < 2048;) {
44     + for (i = 0; i < count && i < 79;) {
45     switch(str[i]) {
46     case '\n':
47     iodc_dbuf[i+0] = '\r';
48     iodc_dbuf[i+1] = '\n';
49     i += 2;
50     posx = 0;
51     - break;
52     + goto print;
53     case '\t':
54     while (posx & 7) {
55     iodc_dbuf[i] = ' ';
56     @@ -1124,6 +1123,16 @@ int pdc_iodc_print(unsigned char *str, unsigned count)
57     }
58     }
59    
60     + /* if we're at the end of line, and not already inserting a newline,
61     + * insert one anyway. iodc console doesn't claim to support >79 char
62     + * lines. don't account for this in the return value.
63     + */
64     + if (i == 79 && iodc_dbuf[i-1] != '\n') {
65     + iodc_dbuf[i+0] = '\r';
66     + iodc_dbuf[i+1] = '\n';
67     + }
68     +
69     +print:
70     spin_lock_irqsave(&pdc_lock, flags);
71     real32_call(PAGE0->mem_cons.iodc_io,
72     (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
73     @@ -1142,11 +1151,9 @@ int pdc_iodc_print(unsigned char *str, unsigned count)
74     */
75     int pdc_iodc_getc(void)
76     {
77     - unsigned long flags;
78     - static int __attribute__((aligned(8))) iodc_retbuf[32];
79     - static char __attribute__((aligned(64))) iodc_dbuf[4096];
80     int ch;
81     int status;
82     + unsigned long flags;
83    
84     /* Bail if no console input device. */
85     if (!PAGE0->mem_kbd.iodc_io)
86     diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
87     index 33b1f84..7f471a4 100644
88     --- a/arch/parisc/kernel/pdc_cons.c
89     +++ b/arch/parisc/kernel/pdc_cons.c
90     @@ -52,10 +52,18 @@
91     #include <linux/tty.h>
92     #include <asm/pdc.h> /* for iodc_call() proto and friends */
93    
94     +static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
95    
96     static void pdc_console_write(struct console *co, const char *s, unsigned count)
97     {
98     - pdc_iodc_print(s, count);
99     + int i = 0;
100     + unsigned long flags;
101     +
102     + spin_lock_irqsave(&pdc_console_lock, flags);
103     + do {
104     + i += pdc_iodc_print(s + i, count - i);
105     + } while (i < count);
106     + spin_unlock_irqrestore(&pdc_console_lock, flags);
107     }
108    
109     void pdc_printf(const char *fmt, ...)
110     @@ -73,7 +81,14 @@ void pdc_printf(const char *fmt, ...)
111    
112     int pdc_console_poll_key(struct console *co)
113     {
114     - return pdc_iodc_getc();
115     + int c;
116     + unsigned long flags;
117     +
118     + spin_lock_irqsave(&pdc_console_lock, flags);
119     + c = pdc_iodc_getc();
120     + spin_unlock_irqrestore(&pdc_console_lock, flags);
121     +
122     + return c;
123     }
124    
125     static int pdc_console_setup(struct console *co, char *options)
126     diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
127     index 2ce3806..e060d26 100644
128     --- a/arch/parisc/kernel/signal.c
129     +++ b/arch/parisc/kernel/signal.c
130     @@ -534,7 +534,8 @@ insert_restart_trampoline(struct pt_regs *regs)
131     * Flushing one cacheline is cheap.
132     * "sync" on bigger (> 4 way) boxes is not.
133     */
134     - flush_icache_range(regs->gr[30], regs->gr[30] + 4);
135     + flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4);
136     + flush_user_icache_range(regs->gr[30], regs->gr[30] + 4);
137    
138     regs->gr[31] = regs->gr[30] + 8;
139     /* Preserve original r28. */
140     diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
141     index 81111a1..d4af131 100644
142     --- a/arch/sparc64/kernel/ptrace.c
143     +++ b/arch/sparc64/kernel/ptrace.c
144     @@ -127,6 +127,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
145     if (tlb_type == hypervisor)
146     return;
147    
148     + preempt_disable();
149     +
150     #ifdef DCACHE_ALIASING_POSSIBLE
151     /* If bit 13 of the kernel address we used to access the
152     * user page is the same as the virtual address that page
153     @@ -165,6 +167,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
154     for (; start < end; start += icache_line_size)
155     flushi(start);
156     }
157     +
158     + preempt_enable();
159     }
160    
161     asmlinkage void do_ptrace(struct pt_regs *regs)
162     diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
163     index fb13775..48afa08 100644
164     --- a/arch/sparc64/kernel/signal.c
165     +++ b/arch/sparc64/kernel/signal.c
166     @@ -354,7 +354,7 @@ static int invalid_frame_pointer(void __user *fp, int fplen)
167     static inline int
168     save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
169     {
170     - unsigned long *fpregs = (unsigned long *)(regs+1);
171     + unsigned long *fpregs = current_thread_info()->fpregs;
172     unsigned long fprs;
173     int err = 0;
174    
175     diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
176     index 3f10fc9..a0f000b 100644
177     --- a/arch/sparc64/mm/tlb.c
178     +++ b/arch/sparc64/mm/tlb.c
179     @@ -23,10 +23,11 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
180    
181     void flush_tlb_pending(void)
182     {
183     - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
184     + struct mmu_gather *mp;
185    
186     preempt_disable();
187    
188     + mp = &__get_cpu_var(mmu_gathers);
189     if (mp->tlb_nr) {
190     flush_tsb_user(mp);
191    
192     diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
193     index aa3d2c8..1f09cd9 100644
194     --- a/arch/x86/kernel/machine_kexec_64.c
195     +++ b/arch/x86/kernel/machine_kexec_64.c
196     @@ -233,6 +233,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
197    
198     void arch_crash_save_vmcoreinfo(void)
199     {
200     + VMCOREINFO_SYMBOL(phys_base);
201     VMCOREINFO_SYMBOL(init_level4_pgt);
202    
203     #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
204     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
205     index 79ad152..f177f86 100644
206     --- a/arch/x86/xen/enlighten.c
207     +++ b/arch/x86/xen/enlighten.c
208     @@ -95,7 +95,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
209     *
210     * 0: not available, 1: available
211     */
212     -static int have_vcpu_info_placement = 0;
213     +static int have_vcpu_info_placement = 1;
214    
215     static void __init xen_vcpu_setup(int cpu)
216     {
217     @@ -103,6 +103,7 @@ static void __init xen_vcpu_setup(int cpu)
218     int err;
219     struct vcpu_info *vcpup;
220    
221     + BUG_ON(HYPERVISOR_shared_info == &dummy_shared_info);
222     per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
223    
224     if (!have_vcpu_info_placement)
225     @@ -153,6 +154,7 @@ static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
226     if (*eax == 1)
227     maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */
228     (1 << X86_FEATURE_ACPI) | /* disable ACPI */
229     + (1 << X86_FEATURE_SEP) | /* disable SEP */
230     (1 << X86_FEATURE_ACC)); /* thermal monitoring */
231    
232     asm(XEN_EMULATE_PREFIX "cpuid"
233     @@ -791,30 +793,40 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
234     xen_write_cr3(__pa(base));
235     }
236    
237     -static __init void xen_pagetable_setup_done(pgd_t *base)
238     +static __init void setup_shared_info(void)
239     {
240     - /* This will work as long as patching hasn't happened yet
241     - (which it hasn't) */
242     - pv_mmu_ops.alloc_pt = xen_alloc_pt;
243     - pv_mmu_ops.set_pte = xen_set_pte;
244     -
245     if (!xen_feature(XENFEAT_auto_translated_physmap)) {
246     + unsigned long addr = fix_to_virt(FIX_PARAVIRT_BOOTMAP);
247     +
248     /*
249     * Create a mapping for the shared info page.
250     * Should be set_fixmap(), but shared_info is a machine
251     * address with no corresponding pseudo-phys address.
252     */
253     - set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
254     + set_pte_mfn(addr,
255     PFN_DOWN(xen_start_info->shared_info),
256     PAGE_KERNEL);
257    
258     - HYPERVISOR_shared_info =
259     - (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
260     -
261     + HYPERVISOR_shared_info = (struct shared_info *)addr;
262     } else
263     HYPERVISOR_shared_info =
264     (struct shared_info *)__va(xen_start_info->shared_info);
265    
266     +#ifndef CONFIG_SMP
267     + /* In UP this is as good a place as any to set up shared info */
268     + xen_setup_vcpu_info_placement();
269     +#endif
270     +}
271     +
272     +static __init void xen_pagetable_setup_done(pgd_t *base)
273     +{
274     + /* This will work as long as patching hasn't happened yet
275     + (which it hasn't) */
276     + pv_mmu_ops.alloc_pt = xen_alloc_pt;
277     + pv_mmu_ops.set_pte = xen_set_pte;
278     +
279     + setup_shared_info();
280     +
281     /* Actually pin the pagetable down, but we can't set PG_pinned
282     yet because the page structures don't exist yet. */
283     {
284     @@ -1165,15 +1177,9 @@ asmlinkage void __init xen_start_kernel(void)
285     x86_write_percpu(xen_cr3, __pa(pgd));
286     x86_write_percpu(xen_current_cr3, __pa(pgd));
287    
288     -#ifdef CONFIG_SMP
289     /* Don't do the full vcpu_info placement stuff until we have a
290     - possible map. */
291     + possible map and a non-dummy shared_info. */
292     per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
293     -#else
294     - /* May as well do it now, since there's no good time to call
295     - it later on UP. */
296     - xen_setup_vcpu_info_placement();
297     -#endif
298    
299     pv_info.kernel_rpl = 1;
300     if (xen_feature(XENFEAT_supervisor_mode_kernel))
301     diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
302     index 1a43b60..6b71904 100644
303     --- a/arch/x86/xen/xen-asm.S
304     +++ b/arch/x86/xen/xen-asm.S
305     @@ -33,12 +33,17 @@
306     events, then enter the hypervisor to get them handled.
307     */
308     ENTRY(xen_irq_enable_direct)
309     - /* Clear mask and test pending */
310     - andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
311     + /* Unmask events */
312     + movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
313     +
314     /* Preempt here doesn't matter because that will deal with
315     any pending interrupts. The pending check may end up being
316     run on the wrong CPU, but that doesn't hurt. */
317     +
318     + /* Test for pending */
319     + testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
320     jz 1f
321     +
322     2: call check_events
323     1:
324     ENDPATCH(xen_irq_enable_direct)
325     diff --git a/crypto/xcbc.c b/crypto/xcbc.c
326     index a957373..25a1537 100644
327     --- a/crypto/xcbc.c
328     +++ b/crypto/xcbc.c
329     @@ -116,13 +116,11 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
330     struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
331     struct crypto_cipher *tfm = ctx->child;
332     int bs = crypto_hash_blocksize(parent);
333     - unsigned int i = 0;
334    
335     - do {
336     -
337     - struct page *pg = sg_page(&sg[i]);
338     - unsigned int offset = sg[i].offset;
339     - unsigned int slen = sg[i].length;
340     + for (;;) {
341     + struct page *pg = sg_page(sg);
342     + unsigned int offset = sg->offset;
343     + unsigned int slen = sg->length;
344    
345     if (unlikely(slen > nbytes))
346     slen = nbytes;
347     @@ -182,8 +180,11 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
348     offset = 0;
349     pg++;
350     }
351     - i++;
352     - } while (nbytes>0);
353     +
354     + if (!nbytes)
355     + break;
356     + sg = sg_next(sg);
357     + }
358    
359     return 0;
360     }
361     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
362     index f4487c3..3401e2c 100644
363     --- a/drivers/acpi/bus.c
364     +++ b/drivers/acpi/bus.c
365     @@ -350,10 +350,11 @@ int acpi_bus_receive_event(struct acpi_bus_event *event)
366     }
367    
368     spin_lock_irqsave(&acpi_bus_event_lock, flags);
369     - entry =
370     - list_entry(acpi_bus_event_list.next, struct acpi_bus_event, node);
371     - if (entry)
372     + if (!list_empty(&acpi_bus_event_list)) {
373     + entry = list_entry(acpi_bus_event_list.next,
374     + struct acpi_bus_event, node);
375     list_del(&entry->node);
376     + }
377     spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
378    
379     if (!entry)
380     diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
381     index e48ee4f..021153f 100644
382     --- a/drivers/acpi/processor_core.c
383     +++ b/drivers/acpi/processor_core.c
384     @@ -792,7 +792,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
385     acpi_processor_remove_fs(device);
386    
387     processors[pr->id] = NULL;
388     -
389     + processor_device_array[pr->id] = NULL;
390     kfree(pr);
391    
392     return 0;
393     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
394     index 6380726..455a98c 100644
395     --- a/drivers/ata/libata-core.c
396     +++ b/drivers/ata/libata-core.c
397     @@ -1936,24 +1936,34 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
398     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
399     if (err_mask) {
400     if (err_mask & AC_ERR_NODEV_HINT) {
401     - DPRINTK("ata%u.%d: NODEV after polling detection\n",
402     - ap->print_id, dev->devno);
403     + ata_dev_printk(dev, KERN_DEBUG,
404     + "NODEV after polling detection\n");
405     return -ENOENT;
406     }
407    
408     - /* Device or controller might have reported the wrong
409     - * device class. Give a shot at the other IDENTIFY if
410     - * the current one is aborted by the device.
411     - */
412     - if (may_fallback &&
413     - (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
414     - may_fallback = 0;
415     + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
416     + /* Device or controller might have reported
417     + * the wrong device class. Give a shot at the
418     + * other IDENTIFY if the current one is
419     + * aborted by the device.
420     + */
421     + if (may_fallback) {
422     + may_fallback = 0;
423    
424     - if (class == ATA_DEV_ATA)
425     - class = ATA_DEV_ATAPI;
426     - else
427     - class = ATA_DEV_ATA;
428     - goto retry;
429     + if (class == ATA_DEV_ATA)
430     + class = ATA_DEV_ATAPI;
431     + else
432     + class = ATA_DEV_ATA;
433     + goto retry;
434     + }
435     +
436     + /* Control reaches here iff the device aborted
437     + * both flavors of IDENTIFYs which happens
438     + * sometimes with phantom devices.
439     + */
440     + ata_dev_printk(dev, KERN_DEBUG,
441     + "both IDENTIFYs aborted, assuming NODEV\n");
442     + return -ENOENT;
443     }
444    
445     rc = -EIO;
446     diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
447     index b2fb6ba..1c6440b 100644
448     --- a/drivers/firmware/dmi_scan.c
449     +++ b/drivers/firmware/dmi_scan.c
450     @@ -219,7 +219,7 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
451     dev->name = "IPMI controller";
452     dev->device_data = data;
453    
454     - list_add(&dev->list, &dmi_devices);
455     + list_add_tail(&dev->list, &dmi_devices);
456     }
457    
458     /*
459     diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
460     index e0fa752..92c0a7f 100644
461     --- a/drivers/hwmon/w83781d.c
462     +++ b/drivers/hwmon/w83781d.c
463     @@ -1380,7 +1380,8 @@ w83781d_isa_probe(struct platform_device *pdev)
464    
465     /* Reserve the ISA region */
466     res = platform_get_resource(pdev, IORESOURCE_IO, 0);
467     - if (!request_region(res->start, W83781D_EXTENT, "w83781d")) {
468     + if (!request_region(res->start + W83781D_ADDR_REG_OFFSET, 2,
469     + "w83781d")) {
470     err = -EBUSY;
471     goto exit;
472     }
473     @@ -1432,7 +1433,7 @@ w83781d_isa_probe(struct platform_device *pdev)
474     device_remove_file(&pdev->dev, &dev_attr_name);
475     kfree(data);
476     exit_release_region:
477     - release_region(res->start, W83781D_EXTENT);
478     + release_region(res->start + W83781D_ADDR_REG_OFFSET, 2);
479     exit:
480     return err;
481     }
482     @@ -1446,7 +1447,7 @@ w83781d_isa_remove(struct platform_device *pdev)
483     sysfs_remove_group(&pdev->dev.kobj, &w83781d_group);
484     sysfs_remove_group(&pdev->dev.kobj, &w83781d_group_opt);
485     device_remove_file(&pdev->dev, &dev_attr_name);
486     - release_region(data->client.addr, W83781D_EXTENT);
487     + release_region(data->client.addr + W83781D_ADDR_REG_OFFSET, 2);
488     kfree(data);
489    
490     return 0;
491     @@ -1820,8 +1821,17 @@ w83781d_isa_found(unsigned short address)
492     {
493     int val, save, found = 0;
494    
495     - if (!request_region(address, W83781D_EXTENT, "w83781d"))
496     + /* We have to request the region in two parts because some
497     + boards declare base+4 to base+7 as a PNP device */
498     + if (!request_region(address, 4, "w83781d")) {
499     + pr_debug("w83781d: Failed to request low part of region\n");
500     return 0;
501     + }
502     + if (!request_region(address + 4, 4, "w83781d")) {
503     + pr_debug("w83781d: Failed to request high part of region\n");
504     + release_region(address, 4);
505     + return 0;
506     + }
507    
508     #define REALLY_SLOW_IO
509     /* We need the timeouts for at least some W83781D-like
510     @@ -1896,7 +1906,8 @@ w83781d_isa_found(unsigned short address)
511     val == 0x30 ? "W83782D" : "W83781D", (int)address);
512    
513     release:
514     - release_region(address, W83781D_EXTENT);
515     + release_region(address + 4, 4);
516     + release_region(address, 4);
517     return found;
518     }
519    
520     diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
521     index 6123c70..8932906 100644
522     --- a/drivers/macintosh/via-pmu.c
523     +++ b/drivers/macintosh/via-pmu.c
524     @@ -2842,7 +2842,7 @@ EXPORT_SYMBOL(pmu_wait_complete);
525     EXPORT_SYMBOL(pmu_suspend);
526     EXPORT_SYMBOL(pmu_resume);
527     EXPORT_SYMBOL(pmu_unlock);
528     -#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PPC32)
529     +#if defined(CONFIG_PPC32)
530     EXPORT_SYMBOL(pmu_enable_irled);
531     EXPORT_SYMBOL(pmu_battery_count);
532     EXPORT_SYMBOL(pmu_batteries);
533     diff --git a/drivers/md/md.c b/drivers/md/md.c
534     index cef9ebd..5113e8d 100644
535     --- a/drivers/md/md.c
536     +++ b/drivers/md/md.c
537     @@ -1847,17 +1847,6 @@ static struct rdev_sysfs_entry rdev_state =
538     __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
539    
540     static ssize_t
541     -super_show(mdk_rdev_t *rdev, char *page)
542     -{
543     - if (rdev->sb_loaded && rdev->sb_size) {
544     - memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
545     - return rdev->sb_size;
546     - } else
547     - return 0;
548     -}
549     -static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
550     -
551     -static ssize_t
552     errors_show(mdk_rdev_t *rdev, char *page)
553     {
554     return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
555     @@ -1959,7 +1948,6 @@ __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
556    
557     static struct attribute *rdev_default_attrs[] = {
558     &rdev_state.attr,
559     - &rdev_super.attr,
560     &rdev_errors.attr,
561     &rdev_slot.attr,
562     &rdev_offset.attr,
563     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
564     index e8c8157..032e36d 100644
565     --- a/drivers/md/raid5.c
566     +++ b/drivers/md/raid5.c
567     @@ -2348,25 +2348,15 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
568     static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
569     struct stripe_head_state *s, int disks)
570     {
571     + int canceled_check = 0;
572     +
573     set_bit(STRIPE_HANDLE, &sh->state);
574     - /* Take one of the following actions:
575     - * 1/ start a check parity operation if (uptodate == disks)
576     - * 2/ finish a check parity operation and act on the result
577     - * 3/ skip to the writeback section if we previously
578     - * initiated a recovery operation
579     - */
580     - if (s->failed == 0 &&
581     - !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
582     - if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
583     - BUG_ON(s->uptodate != disks);
584     - clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
585     - sh->ops.count++;
586     - s->uptodate--;
587     - } else if (
588     - test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
589     - clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
590     - clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
591    
592     + /* complete a check operation */
593     + if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
594     + clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
595     + clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
596     + if (s->failed == 0) {
597     if (sh->ops.zero_sum_result == 0)
598     /* parity is correct (on disc,
599     * not in buffer any more)
600     @@ -2391,7 +2381,8 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
601     s->uptodate++;
602     }
603     }
604     - }
605     + } else
606     + canceled_check = 1; /* STRIPE_INSYNC is not set */
607     }
608    
609     /* check if we can clear a parity disk reconstruct */
610     @@ -2404,12 +2395,28 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
611     clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
612     }
613    
614     + /* start a new check operation if there are no failures, the stripe is
615     + * not insync, and a repair is not in flight
616     + */
617     + if (s->failed == 0 &&
618     + !test_bit(STRIPE_INSYNC, &sh->state) &&
619     + !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
620     + if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
621     + BUG_ON(s->uptodate != disks);
622     + clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
623     + sh->ops.count++;
624     + s->uptodate--;
625     + }
626     + }
627     +
628     /* Wait for check parity and compute block operations to complete
629     - * before write-back
630     + * before write-back. If a failure occurred while the check operation
631     + * was in flight we need to cycle this stripe through handle_stripe
632     + * since the parity block may not be uptodate
633     */
634     - if (!test_bit(STRIPE_INSYNC, &sh->state) &&
635     - !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
636     - !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
637     + if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) &&
638     + !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
639     + !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
640     struct r5dev *dev;
641     /* either failed parity check, or recovery is happening */
642     if (s->failed == 0)
643     diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
644     index 88dc436..3b9da9c 100644
645     --- a/drivers/media/dvb/dvb-usb/ttusb2.c
646     +++ b/drivers/media/dvb/dvb-usb/ttusb2.c
647     @@ -144,6 +144,7 @@ static int ttusb2_power_ctrl(struct dvb_usb_device *d, int onoff)
648     static struct tda10086_config tda10086_config = {
649     .demod_address = 0x0e,
650     .invert = 0,
651     + .diseqc_tone = 1,
652     };
653    
654     static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap)
655     diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c
656     index 9d26ace..0d2b69a 100644
657     --- a/drivers/media/dvb/frontends/tda10086.c
658     +++ b/drivers/media/dvb/frontends/tda10086.c
659     @@ -106,9 +106,12 @@ static int tda10086_write_mask(struct tda10086_state *state, int reg, int mask,
660     static int tda10086_init(struct dvb_frontend* fe)
661     {
662     struct tda10086_state* state = fe->demodulator_priv;
663     + u8 t22k_off = 0x80;
664    
665     dprintk ("%s\n", __FUNCTION__);
666    
667     + if (state->config->diseqc_tone)
668     + t22k_off = 0;
669     // reset
670     tda10086_write_byte(state, 0x00, 0x00);
671     msleep(10);
672     @@ -158,7 +161,7 @@ static int tda10086_init(struct dvb_frontend* fe)
673     tda10086_write_byte(state, 0x3d, 0x80);
674    
675     // setup SEC
676     - tda10086_write_byte(state, 0x36, 0x80); // all SEC off, no 22k tone
677     + tda10086_write_byte(state, 0x36, t22k_off); // all SEC off, 22k tone
678     tda10086_write_byte(state, 0x34, (((1<<19) * (22000/1000)) / (SACLK/1000))); // } tone frequency
679     tda10086_write_byte(state, 0x35, (((1<<19) * (22000/1000)) / (SACLK/1000)) >> 8); // }
680    
681     @@ -180,16 +183,20 @@ static void tda10086_diseqc_wait(struct tda10086_state *state)
682     static int tda10086_set_tone (struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
683     {
684     struct tda10086_state* state = fe->demodulator_priv;
685     + u8 t22k_off = 0x80;
686    
687     dprintk ("%s\n", __FUNCTION__);
688    
689     + if (state->config->diseqc_tone)
690     + t22k_off = 0;
691     +
692     switch (tone) {
693     case SEC_TONE_OFF:
694     - tda10086_write_byte(state, 0x36, 0x80);
695     + tda10086_write_byte(state, 0x36, t22k_off);
696     break;
697    
698     case SEC_TONE_ON:
699     - tda10086_write_byte(state, 0x36, 0x81);
700     + tda10086_write_byte(state, 0x36, 0x01 + t22k_off);
701     break;
702     }
703    
704     @@ -202,9 +209,13 @@ static int tda10086_send_master_cmd (struct dvb_frontend* fe,
705     struct tda10086_state* state = fe->demodulator_priv;
706     int i;
707     u8 oldval;
708     + u8 t22k_off = 0x80;
709    
710     dprintk ("%s\n", __FUNCTION__);
711    
712     + if (state->config->diseqc_tone)
713     + t22k_off = 0;
714     +
715     if (cmd->msg_len > 6)
716     return -EINVAL;
717     oldval = tda10086_read_byte(state, 0x36);
718     @@ -212,7 +223,8 @@ static int tda10086_send_master_cmd (struct dvb_frontend* fe,
719     for(i=0; i< cmd->msg_len; i++) {
720     tda10086_write_byte(state, 0x48+i, cmd->msg[i]);
721     }
722     - tda10086_write_byte(state, 0x36, 0x88 | ((cmd->msg_len - 1) << 4));
723     + tda10086_write_byte(state, 0x36, (0x08 + t22k_off)
724     + | ((cmd->msg_len - 1) << 4));
725    
726     tda10086_diseqc_wait(state);
727    
728     @@ -225,16 +237,20 @@ static int tda10086_send_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t minic
729     {
730     struct tda10086_state* state = fe->demodulator_priv;
731     u8 oldval = tda10086_read_byte(state, 0x36);
732     + u8 t22k_off = 0x80;
733    
734     dprintk ("%s\n", __FUNCTION__);
735    
736     + if (state->config->diseqc_tone)
737     + t22k_off = 0;
738     +
739     switch(minicmd) {
740     case SEC_MINI_A:
741     - tda10086_write_byte(state, 0x36, 0x84);
742     + tda10086_write_byte(state, 0x36, 0x04 + t22k_off);
743     break;
744    
745     case SEC_MINI_B:
746     - tda10086_write_byte(state, 0x36, 0x86);
747     + tda10086_write_byte(state, 0x36, 0x06 + t22k_off);
748     break;
749     }
750    
751     diff --git a/drivers/media/dvb/frontends/tda10086.h b/drivers/media/dvb/frontends/tda10086.h
752     index ed584a8..eeceaee 100644
753     --- a/drivers/media/dvb/frontends/tda10086.h
754     +++ b/drivers/media/dvb/frontends/tda10086.h
755     @@ -33,6 +33,9 @@ struct tda10086_config
756    
757     /* does the "inversion" need inverted? */
758     u8 invert;
759     +
760     + /* do we need the diseqc signal with carrier? */
761     + u8 diseqc_tone;
762     };
763    
764     #if defined(CONFIG_DVB_TDA10086) || (defined(CONFIG_DVB_TDA10086_MODULE) && defined(MODULE))
765     diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
766     index 9268a82..14b00f5 100644
767     --- a/drivers/media/dvb/ttpci/budget.c
768     +++ b/drivers/media/dvb/ttpci/budget.c
769     @@ -351,6 +351,7 @@ static struct s5h1420_config s5h1420_config = {
770     static struct tda10086_config tda10086_config = {
771     .demod_address = 0x0e,
772     .invert = 0,
773     + .diseqc_tone = 1,
774     };
775    
776     static u8 read_pwm(struct budget* budget)
777     diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
778     index 6d2dd87..9aa5b14 100644
779     --- a/drivers/media/video/ivtv/ivtv-driver.c
780     +++ b/drivers/media/video/ivtv/ivtv-driver.c
781     @@ -687,6 +687,9 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
782     itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
783     itv->vbi.sliced_in = &itv->vbi.in.fmt.sliced;
784    
785     + /* Init the sg table for osd/yuv output */
786     + sg_init_table(itv->udma.SGlist, IVTV_DMA_SG_OSD_ENT);
787     +
788     /* OSD */
789     itv->osd_global_alpha_state = 1;
790     itv->osd_global_alpha = 255;
791     diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
792     index e1ab099..e204773 100644
793     --- a/drivers/media/video/saa7134/saa7134-dvb.c
794     +++ b/drivers/media/video/saa7134/saa7134-dvb.c
795     @@ -826,6 +826,7 @@ static struct tda1004x_config ads_tech_duo_config = {
796     static struct tda10086_config flydvbs = {
797     .demod_address = 0x0e,
798     .invert = 0,
799     + .diseqc_tone = 0,
800     };
801    
802     /* ==================================================================
803     diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
804     index 1707f98..dbbe9a7 100644
805     --- a/drivers/mtd/chips/cfi_cmdset_0001.c
806     +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
807     @@ -669,7 +669,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
808     /* Someone else might have been playing with it. */
809     return -EAGAIN;
810     }
811     -
812     + /* Fall through */
813     case FL_READY:
814     case FL_CFI_QUERY:
815     case FL_JEDEC_QUERY:
816     @@ -729,14 +729,14 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
817     chip->state = FL_READY;
818     return 0;
819    
820     + case FL_SHUTDOWN:
821     + /* The machine is rebooting now,so no one can get chip anymore */
822     + return -EIO;
823     case FL_POINT:
824     /* Only if there's no operation suspended... */
825     if (mode == FL_READY && chip->oldstate == FL_READY)
826     return 0;
827     -
828     - case FL_SHUTDOWN:
829     - /* The machine is rebooting now,so no one can get chip anymore */
830     - return -EIO;
831     + /* Fall through */
832     default:
833     sleep:
834     set_current_state(TASK_UNINTERRUPTIBLE);
835     diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
836     index be4b994..84038ca 100644
837     --- a/drivers/mtd/devices/block2mtd.c
838     +++ b/drivers/mtd/devices/block2mtd.c
839     @@ -408,7 +408,6 @@ static int block2mtd_setup2(const char *val)
840     if (token[1]) {
841     ret = parse_num(&erase_size, token[1]);
842     if (ret) {
843     - kfree(name);
844     parse_err("illegal erase size");
845     }
846     }
847     diff --git a/drivers/net/macb.c b/drivers/net/macb.c
848     index c796948..20838f9 100644
849     --- a/drivers/net/macb.c
850     +++ b/drivers/net/macb.c
851     @@ -1257,6 +1257,8 @@ static int __devexit macb_remove(struct platform_device *pdev)
852    
853     if (dev) {
854     bp = netdev_priv(dev);
855     + if (bp->phy_dev)
856     + phy_disconnect(bp->phy_dev);
857     mdiobus_unregister(&bp->mii_bus);
858     kfree(bp->mii_bus.irq);
859     unregister_netdev(dev);
860     diff --git a/drivers/net/plip.c b/drivers/net/plip.c
861     index 57c9866..0f40c00 100644
862     --- a/drivers/net/plip.c
863     +++ b/drivers/net/plip.c
864     @@ -903,17 +903,18 @@ plip_interrupt(void *dev_id)
865     struct net_local *nl;
866     struct plip_local *rcv;
867     unsigned char c0;
868     + unsigned long flags;
869    
870     nl = netdev_priv(dev);
871     rcv = &nl->rcv_data;
872    
873     - spin_lock_irq (&nl->lock);
874     + spin_lock_irqsave (&nl->lock, flags);
875    
876     c0 = read_status(dev);
877     if ((c0 & 0xf8) != 0xc0) {
878     if ((dev->irq != -1) && (net_debug > 1))
879     printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
880     - spin_unlock_irq (&nl->lock);
881     + spin_unlock_irqrestore (&nl->lock, flags);
882     return;
883     }
884    
885     @@ -942,7 +943,7 @@ plip_interrupt(void *dev_id)
886     break;
887     }
888    
889     - spin_unlock_irq(&nl->lock);
890     + spin_unlock_irqrestore(&nl->lock, flags);
891     }
892    
893     static int
894     diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
895     index a7556cd..88a7e4e 100644
896     --- a/drivers/net/pppol2tp.c
897     +++ b/drivers/net/pppol2tp.c
898     @@ -302,14 +302,14 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
899     struct pppol2tp_session *session;
900     struct hlist_node *walk;
901    
902     - read_lock(&tunnel->hlist_lock);
903     + read_lock_bh(&tunnel->hlist_lock);
904     hlist_for_each_entry(session, walk, session_list, hlist) {
905     if (session->tunnel_addr.s_session == session_id) {
906     - read_unlock(&tunnel->hlist_lock);
907     + read_unlock_bh(&tunnel->hlist_lock);
908     return session;
909     }
910     }
911     - read_unlock(&tunnel->hlist_lock);
912     + read_unlock_bh(&tunnel->hlist_lock);
913    
914     return NULL;
915     }
916     @@ -320,14 +320,14 @@ static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
917     {
918     struct pppol2tp_tunnel *tunnel = NULL;
919    
920     - read_lock(&pppol2tp_tunnel_list_lock);
921     + read_lock_bh(&pppol2tp_tunnel_list_lock);
922     list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
923     if (tunnel->stats.tunnel_id == tunnel_id) {
924     - read_unlock(&pppol2tp_tunnel_list_lock);
925     + read_unlock_bh(&pppol2tp_tunnel_list_lock);
926     return tunnel;
927     }
928     }
929     - read_unlock(&pppol2tp_tunnel_list_lock);
930     + read_unlock_bh(&pppol2tp_tunnel_list_lock);
931    
932     return NULL;
933     }
934     @@ -342,10 +342,11 @@ static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
935     static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
936     {
937     struct sk_buff *skbp;
938     + struct sk_buff *tmp;
939     u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
940    
941     - spin_lock(&session->reorder_q.lock);
942     - skb_queue_walk(&session->reorder_q, skbp) {
943     + spin_lock_bh(&session->reorder_q.lock);
944     + skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
945     if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
946     __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
947     PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
948     @@ -360,7 +361,7 @@ static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_
949     __skb_queue_tail(&session->reorder_q, skb);
950    
951     out:
952     - spin_unlock(&session->reorder_q.lock);
953     + spin_unlock_bh(&session->reorder_q.lock);
954     }
955    
956     /* Dequeue a single skb.
957     @@ -371,10 +372,9 @@ static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct s
958     int length = PPPOL2TP_SKB_CB(skb)->length;
959     struct sock *session_sock = NULL;
960    
961     - /* We're about to requeue the skb, so unlink it and return resources
962     + /* We're about to requeue the skb, so return resources
963     * to its current owner (a socket receive buffer).
964     */
965     - skb_unlink(skb, &session->reorder_q);
966     skb_orphan(skb);
967    
968     tunnel->stats.rx_packets++;
969     @@ -442,7 +442,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
970     * expect to send up next, dequeue it and any other
971     * in-sequence packets behind it.
972     */
973     - spin_lock(&session->reorder_q.lock);
974     + spin_lock_bh(&session->reorder_q.lock);
975     skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
976     if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
977     session->stats.rx_seq_discards++;
978     @@ -469,13 +469,18 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
979     goto out;
980     }
981     }
982     - spin_unlock(&session->reorder_q.lock);
983     + __skb_unlink(skb, &session->reorder_q);
984     +
985     + /* Process the skb. We release the queue lock while we
986     + * do so to let other contexts process the queue.
987     + */
988     + spin_unlock_bh(&session->reorder_q.lock);
989     pppol2tp_recv_dequeue_skb(session, skb);
990     - spin_lock(&session->reorder_q.lock);
991     + spin_lock_bh(&session->reorder_q.lock);
992     }
993    
994     out:
995     - spin_unlock(&session->reorder_q.lock);
996     + spin_unlock_bh(&session->reorder_q.lock);
997     }
998    
999     /* Internal receive frame. Do the real work of receiving an L2TP data frame
1000     @@ -1058,7 +1063,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1001    
1002     /* Get routing info from the tunnel socket */
1003     dst_release(skb->dst);
1004     - skb->dst = sk_dst_get(sk_tun);
1005     + skb->dst = dst_clone(__sk_dst_get(sk_tun));
1006     skb_orphan(skb);
1007     skb->sk = sk_tun;
1008    
1009     @@ -1106,7 +1111,7 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1010     PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1011     "%s: closing all sessions...\n", tunnel->name);
1012    
1013     - write_lock(&tunnel->hlist_lock);
1014     + write_lock_bh(&tunnel->hlist_lock);
1015     for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1016     again:
1017     hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1018     @@ -1126,7 +1131,7 @@ again:
1019     * disappear as we're jumping between locks.
1020     */
1021     sock_hold(sk);
1022     - write_unlock(&tunnel->hlist_lock);
1023     + write_unlock_bh(&tunnel->hlist_lock);
1024     lock_sock(sk);
1025    
1026     if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
1027     @@ -1148,11 +1153,11 @@ again:
1028     * list so we are guaranteed to make forward
1029     * progress.
1030     */
1031     - write_lock(&tunnel->hlist_lock);
1032     + write_lock_bh(&tunnel->hlist_lock);
1033     goto again;
1034     }
1035     }
1036     - write_unlock(&tunnel->hlist_lock);
1037     + write_unlock_bh(&tunnel->hlist_lock);
1038     }
1039    
1040     /* Really kill the tunnel.
1041     @@ -1161,9 +1166,9 @@ again:
1042     static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1043     {
1044     /* Remove from socket list */
1045     - write_lock(&pppol2tp_tunnel_list_lock);
1046     + write_lock_bh(&pppol2tp_tunnel_list_lock);
1047     list_del_init(&tunnel->list);
1048     - write_unlock(&pppol2tp_tunnel_list_lock);
1049     + write_unlock_bh(&pppol2tp_tunnel_list_lock);
1050    
1051     atomic_dec(&pppol2tp_tunnel_count);
1052     kfree(tunnel);
1053     @@ -1239,9 +1244,9 @@ static void pppol2tp_session_destruct(struct sock *sk)
1054     /* Delete the session socket from the
1055     * hash
1056     */
1057     - write_lock(&tunnel->hlist_lock);
1058     + write_lock_bh(&tunnel->hlist_lock);
1059     hlist_del_init(&session->hlist);
1060     - write_unlock(&tunnel->hlist_lock);
1061     + write_unlock_bh(&tunnel->hlist_lock);
1062    
1063     atomic_dec(&pppol2tp_session_count);
1064     }
1065     @@ -1386,9 +1391,9 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1066    
1067     /* Add tunnel to our list */
1068     INIT_LIST_HEAD(&tunnel->list);
1069     - write_lock(&pppol2tp_tunnel_list_lock);
1070     + write_lock_bh(&pppol2tp_tunnel_list_lock);
1071     list_add(&tunnel->list, &pppol2tp_tunnel_list);
1072     - write_unlock(&pppol2tp_tunnel_list_lock);
1073     + write_unlock_bh(&pppol2tp_tunnel_list_lock);
1074     atomic_inc(&pppol2tp_tunnel_count);
1075    
1076     /* Bump the reference count. The tunnel context is deleted
1077     @@ -1593,11 +1598,11 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1078     sk->sk_user_data = session;
1079    
1080     /* Add session to the tunnel's hash list */
1081     - write_lock(&tunnel->hlist_lock);
1082     + write_lock_bh(&tunnel->hlist_lock);
1083     hlist_add_head(&session->hlist,
1084     pppol2tp_session_id_hash(tunnel,
1085     session->tunnel_addr.s_session));
1086     - write_unlock(&tunnel->hlist_lock);
1087     + write_unlock_bh(&tunnel->hlist_lock);
1088    
1089     atomic_inc(&pppol2tp_session_count);
1090    
1091     @@ -2199,7 +2204,7 @@ static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, str
1092     int next = 0;
1093     int i;
1094    
1095     - read_lock(&tunnel->hlist_lock);
1096     + read_lock_bh(&tunnel->hlist_lock);
1097     for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
1098     hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
1099     if (curr == NULL) {
1100     @@ -2217,7 +2222,7 @@ static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, str
1101     }
1102     }
1103     out:
1104     - read_unlock(&tunnel->hlist_lock);
1105     + read_unlock_bh(&tunnel->hlist_lock);
1106     if (!found)
1107     session = NULL;
1108    
1109     @@ -2228,13 +2233,13 @@ static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
1110     {
1111     struct pppol2tp_tunnel *tunnel = NULL;
1112    
1113     - read_lock(&pppol2tp_tunnel_list_lock);
1114     + read_lock_bh(&pppol2tp_tunnel_list_lock);
1115     if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
1116     goto out;
1117     }
1118     tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
1119     out:
1120     - read_unlock(&pppol2tp_tunnel_list_lock);
1121     + read_unlock_bh(&pppol2tp_tunnel_list_lock);
1122    
1123     return tunnel;
1124     }
1125     diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
1126     index b570402..053eab6 100644
1127     --- a/drivers/net/sis190.c
1128     +++ b/drivers/net/sis190.c
1129     @@ -1632,13 +1632,18 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1130    
1131     static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1132     {
1133     - u8 from;
1134     + int rc;
1135     +
1136     + rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1137     + if (rc < 0) {
1138     + u8 reg;
1139    
1140     - pci_read_config_byte(pdev, 0x73, &from);
1141     + pci_read_config_byte(pdev, 0x73, &reg);
1142    
1143     - return (from & 0x00000001) ?
1144     - sis190_get_mac_addr_from_apc(pdev, dev) :
1145     - sis190_get_mac_addr_from_eeprom(pdev, dev);
1146     + if (reg & 0x00000001)
1147     + rc = sis190_get_mac_addr_from_apc(pdev, dev);
1148     + }
1149     + return rc;
1150     }
1151    
1152     static void sis190_set_speed_auto(struct net_device *dev)
1153     diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
1154     index 6887214..6fac216 100644
1155     --- a/drivers/net/sungem.c
1156     +++ b/drivers/net/sungem.c
1157     @@ -910,7 +910,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
1158     * rx ring - must call napi_disable(), which
1159     * schedule_timeout()'s if polling is already disabled.
1160     */
1161     - work_done += gem_rx(gp, budget);
1162     + work_done += gem_rx(gp, budget - work_done);
1163    
1164     if (work_done >= budget)
1165     return work_done;
1166     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1167     index 72e0bd5..05cdda9 100644
1168     --- a/drivers/pci/quirks.c
1169     +++ b/drivers/pci/quirks.c
1170     @@ -950,6 +950,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
1171     * accesses to the SMBus registers, with potentially bad effects. Thus you
1172     * should be very careful when adding new entries: if SMM is accessing the
1173     * Intel SMBus, this is a very good reason to leave it hidden.
1174     + *
1175     + * Likewise, many recent laptops use ACPI for thermal management. If the
1176     + * ACPI DSDT code accesses the SMBus, then Linux should not access it
1177     + * natively, and keeping the SMBus hidden is the right thing to do. If you
1178     + * are about to add an entry in the table below, please first disassemble
1179     + * the DSDT and double-check that there is no code accessing the SMBus.
1180     */
1181     static int asus_hides_smbus;
1182    
1183     @@ -1022,11 +1028,6 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1184     case 0x12bd: /* HP D530 */
1185     asus_hides_smbus = 1;
1186     }
1187     - else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1188     - switch (dev->subsystem_device) {
1189     - case 0x099c: /* HP Compaq nx6110 */
1190     - asus_hides_smbus = 1;
1191     - }
1192     } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1193     if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1194     switch(dev->subsystem_device) {
1195     diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
1196     index 6b9840c..be22d23 100644
1197     --- a/drivers/pnp/pnpacpi/rsparser.c
1198     +++ b/drivers/pnp/pnpacpi/rsparser.c
1199     @@ -85,7 +85,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
1200     i < PNP_MAX_IRQ)
1201     i++;
1202     if (i >= PNP_MAX_IRQ && !warned) {
1203     - printk(KERN_ERR "pnpacpi: exceeded the max number of IRQ "
1204     + printk(KERN_WARNING "pnpacpi: exceeded the max number of IRQ "
1205     "resources: %d \n", PNP_MAX_IRQ);
1206     warned = 1;
1207     return;
1208     @@ -187,7 +187,7 @@ static void pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res,
1209     res->dma_resource[i].start = dma;
1210     res->dma_resource[i].end = dma;
1211     } else if (!warned) {
1212     - printk(KERN_ERR "pnpacpi: exceeded the max number of DMA "
1213     + printk(KERN_WARNING "pnpacpi: exceeded the max number of DMA "
1214     "resources: %d \n", PNP_MAX_DMA);
1215     warned = 1;
1216     }
1217     @@ -213,7 +213,7 @@ static void pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
1218     res->port_resource[i].start = io;
1219     res->port_resource[i].end = io + len - 1;
1220     } else if (!warned) {
1221     - printk(KERN_ERR "pnpacpi: exceeded the max number of IO "
1222     + printk(KERN_WARNING "pnpacpi: exceeded the max number of IO "
1223     "resources: %d \n", PNP_MAX_PORT);
1224     warned = 1;
1225     }
1226     @@ -241,7 +241,7 @@ static void pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
1227     res->mem_resource[i].start = mem;
1228     res->mem_resource[i].end = mem + len - 1;
1229     } else if (!warned) {
1230     - printk(KERN_ERR "pnpacpi: exceeded the max number of mem "
1231     + printk(KERN_WARNING "pnpacpi: exceeded the max number of mem "
1232     "resources: %d\n", PNP_MAX_MEM);
1233     warned = 1;
1234     }
1235     diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
1236     index 865f32b..b4e1022 100644
1237     --- a/drivers/uio/uio.c
1238     +++ b/drivers/uio/uio.c
1239     @@ -447,6 +447,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
1240    
1241     vma->vm_flags |= VM_IO | VM_RESERVED;
1242    
1243     + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1244     +
1245     return remap_pfn_range(vma,
1246     vma->vm_start,
1247     idev->info->mem[mi].addr >> PAGE_SHIFT,
1248     diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1249     index fcd40ec..d864239 100644
1250     --- a/drivers/usb/core/message.c
1251     +++ b/drivers/usb/core/message.c
1252     @@ -1189,7 +1189,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1253     return -EINVAL;
1254     }
1255    
1256     - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1257     + if (dev->quirks & USB_QUIRK_NO_SET_INTF)
1258     + ret = -EPIPE;
1259     + else
1260     + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1261     USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
1262     alternate, interface, NULL, 0, 5000);
1263    
1264     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1265     index d42c561..fb552d7 100644
1266     --- a/drivers/usb/core/quirks.c
1267     +++ b/drivers/usb/core/quirks.c
1268     @@ -39,6 +39,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1269     /* M-Systems Flash Disk Pioneers */
1270     { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
1271    
1272     + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
1273     + { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
1274     +
1275     /* Philips PSC805 audio device */
1276     { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
1277    
1278     diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
1279     index 8a0d174..74ce8bc 100644
1280     --- a/drivers/usb/serial/keyspan.h
1281     +++ b/drivers/usb/serial/keyspan.h
1282     @@ -637,6 +637,7 @@ static struct usb_serial_driver keyspan_pre_device = {
1283     .description = "Keyspan - (without firmware)",
1284     .id_table = keyspan_pre_ids,
1285     .num_interrupt_in = NUM_DONT_CARE,
1286     + .num_interrupt_out = NUM_DONT_CARE,
1287     .num_bulk_in = NUM_DONT_CARE,
1288     .num_bulk_out = NUM_DONT_CARE,
1289     .num_ports = 1,
1290     @@ -651,6 +652,7 @@ static struct usb_serial_driver keyspan_1port_device = {
1291     .description = "Keyspan 1 port adapter",
1292     .id_table = keyspan_1port_ids,
1293     .num_interrupt_in = NUM_DONT_CARE,
1294     + .num_interrupt_out = NUM_DONT_CARE,
1295     .num_bulk_in = NUM_DONT_CARE,
1296     .num_bulk_out = NUM_DONT_CARE,
1297     .num_ports = 1,
1298     @@ -678,6 +680,7 @@ static struct usb_serial_driver keyspan_2port_device = {
1299     .description = "Keyspan 2 port adapter",
1300     .id_table = keyspan_2port_ids,
1301     .num_interrupt_in = NUM_DONT_CARE,
1302     + .num_interrupt_out = NUM_DONT_CARE,
1303     .num_bulk_in = NUM_DONT_CARE,
1304     .num_bulk_out = NUM_DONT_CARE,
1305     .num_ports = 2,
1306     @@ -705,6 +708,7 @@ static struct usb_serial_driver keyspan_4port_device = {
1307     .description = "Keyspan 4 port adapter",
1308     .id_table = keyspan_4port_ids,
1309     .num_interrupt_in = NUM_DONT_CARE,
1310     + .num_interrupt_out = NUM_DONT_CARE,
1311     .num_bulk_in = NUM_DONT_CARE,
1312     .num_bulk_out = NUM_DONT_CARE,
1313     .num_ports = 4,
1314     diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
1315     index 1f01494..337f5ce 100644
1316     --- a/drivers/usb/serial/ti_usb_3410_5052.c
1317     +++ b/drivers/usb/serial/ti_usb_3410_5052.c
1318     @@ -264,8 +264,8 @@ static struct usb_serial_driver ti_1port_device = {
1319     .description = "TI USB 3410 1 port adapter",
1320     .usb_driver = &ti_usb_driver,
1321     .id_table = ti_id_table_3410,
1322     - .num_interrupt_in = 1,
1323     - .num_bulk_in = 1,
1324     + .num_interrupt_in = NUM_DONT_CARE,
1325     + .num_bulk_in = NUM_DONT_CARE,
1326     .num_bulk_out = 1,
1327     .num_ports = 1,
1328     .attach = ti_startup,
1329     diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
1330     index 7ee087f..762fb04 100644
1331     --- a/drivers/usb/serial/visor.c
1332     +++ b/drivers/usb/serial/visor.c
1333     @@ -191,7 +191,7 @@ static struct usb_serial_driver handspring_device = {
1334     .id_table = id_table,
1335     .num_interrupt_in = NUM_DONT_CARE,
1336     .num_bulk_in = 2,
1337     - .num_bulk_out = 2,
1338     + .num_bulk_out = NUM_DONT_CARE,
1339     .num_ports = 2,
1340     .open = visor_open,
1341     .close = visor_close,
1342     diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
1343     index c646750..ec3641e 100644
1344     --- a/drivers/usb/storage/transport.c
1345     +++ b/drivers/usb/storage/transport.c
1346     @@ -1010,7 +1010,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1347     US_DEBUGP("Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
1348     le32_to_cpu(bcs->Signature), bcs->Tag,
1349     residue, bcs->Status);
1350     - if (bcs->Tag != us->tag || bcs->Status > US_BULK_STAT_PHASE) {
1351     + if (!(bcs->Tag == us->tag || (us->flags & US_FL_BULK_IGNORE_TAG)) ||
1352     + bcs->Status > US_BULK_STAT_PHASE) {
1353     US_DEBUGP("Bulk logical error\n");
1354     return USB_STOR_TRANSPORT_ERROR;
1355     }
1356     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1357     index fe12737..054f53c 100644
1358     --- a/drivers/usb/storage/unusual_devs.h
1359     +++ b/drivers/usb/storage/unusual_devs.h
1360     @@ -1557,6 +1557,17 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0001,
1361     US_SC_DEVICE, US_PR_DEVICE, NULL,
1362     US_FL_FIX_CAPACITY),
1363    
1364     +/*
1365     + * Patch by Constantin Baranov <const@tltsu.ru>
1366     + * Report by Andreas Koenecke.
1367     + * Motorola ROKR Z6.
1368     + */
1369     +UNUSUAL_DEV( 0x22b8, 0x6426, 0x0101, 0x0101,
1370     + "Motorola",
1371     + "MSnc.",
1372     + US_SC_DEVICE, US_PR_DEVICE, NULL,
1373     + US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
1374     +
1375     /* Reported by Radovan Garabik <garabik@kassiopeia.juls.savba.sk> */
1376     UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1377     "MPIO",
1378     diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
1379     index 1194f5e..01072f4 100644
1380     --- a/drivers/video/fbmem.c
1381     +++ b/drivers/video/fbmem.c
1382     @@ -1521,6 +1521,7 @@ module_init(fbmem_init);
1383     static void __exit
1384     fbmem_exit(void)
1385     {
1386     + remove_proc_entry("fb", NULL);
1387     class_destroy(fb_class);
1388     unregister_chrdev(FB_MAJOR, "fb");
1389     }
1390     diff --git a/fs/buffer.c b/fs/buffer.c
1391     index 7249e01..5bb5ffc 100644
1392     --- a/fs/buffer.c
1393     +++ b/fs/buffer.c
1394     @@ -2565,14 +2565,13 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
1395     struct inode *inode = page->mapping->host;
1396     struct buffer_head *head = fsdata;
1397     struct buffer_head *bh;
1398     + BUG_ON(fsdata != NULL && page_has_buffers(page));
1399    
1400     - if (!PageMappedToDisk(page)) {
1401     - if (unlikely(copied < len) && !page_has_buffers(page))
1402     - attach_nobh_buffers(page, head);
1403     - if (page_has_buffers(page))
1404     - return generic_write_end(file, mapping, pos, len,
1405     - copied, page, fsdata);
1406     - }
1407     + if (unlikely(copied < len) && !page_has_buffers(page))
1408     + attach_nobh_buffers(page, head);
1409     + if (page_has_buffers(page))
1410     + return generic_write_end(file, mapping, pos, len,
1411     + copied, page, fsdata);
1412    
1413     SetPageUptodate(page);
1414     set_page_dirty(page);
1415     diff --git a/fs/dcache.c b/fs/dcache.c
1416     index d9ca1e5..c369bf9 100644
1417     --- a/fs/dcache.c
1418     +++ b/fs/dcache.c
1419     @@ -1408,9 +1408,6 @@ void d_delete(struct dentry * dentry)
1420     if (atomic_read(&dentry->d_count) == 1) {
1421     dentry_iput(dentry);
1422     fsnotify_nameremove(dentry, isdir);
1423     -
1424     - /* remove this and other inotify debug checks after 2.6.18 */
1425     - dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
1426     return;
1427     }
1428    
1429     diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
1430     index 1955ee6..078684f 100644
1431     --- a/fs/hfsplus/dir.c
1432     +++ b/fs/hfsplus/dir.c
1433     @@ -340,16 +340,23 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
1434    
1435     if (inode->i_nlink > 0)
1436     drop_nlink(inode);
1437     - hfsplus_delete_inode(inode);
1438     - if (inode->i_ino != cnid && !inode->i_nlink) {
1439     - if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
1440     - res = hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
1441     - if (!res)
1442     - hfsplus_delete_inode(inode);
1443     + if (inode->i_ino == cnid)
1444     + clear_nlink(inode);
1445     + if (!inode->i_nlink) {
1446     + if (inode->i_ino != cnid) {
1447     + HFSPLUS_SB(sb).file_count--;
1448     + if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
1449     + res = hfsplus_delete_cat(inode->i_ino,
1450     + HFSPLUS_SB(sb).hidden_dir,
1451     + NULL);
1452     + if (!res)
1453     + hfsplus_delete_inode(inode);
1454     + } else
1455     + inode->i_flags |= S_DEAD;
1456     } else
1457     - inode->i_flags |= S_DEAD;
1458     + hfsplus_delete_inode(inode);
1459     } else
1460     - clear_nlink(inode);
1461     + HFSPLUS_SB(sb).file_count--;
1462     inode->i_ctime = CURRENT_TIME_SEC;
1463     mark_inode_dirty(inode);
1464    
1465     diff --git a/fs/inotify.c b/fs/inotify.c
1466     index 2c5b921..690e725 100644
1467     --- a/fs/inotify.c
1468     +++ b/fs/inotify.c
1469     @@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
1470     struct dentry *child;
1471    
1472     list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
1473     - if (!child->d_inode) {
1474     - WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
1475     + if (!child->d_inode)
1476     continue;
1477     - }
1478     +
1479     spin_lock(&child->d_lock);
1480     - if (watched) {
1481     - WARN_ON(child->d_flags &
1482     - DCACHE_INOTIFY_PARENT_WATCHED);
1483     + if (watched)
1484     child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
1485     - } else {
1486     - WARN_ON(!(child->d_flags &
1487     - DCACHE_INOTIFY_PARENT_WATCHED));
1488     - child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
1489     - }
1490     + else
1491     + child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
1492     spin_unlock(&child->d_lock);
1493     }
1494     }
1495     @@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
1496     if (!inode)
1497     return;
1498    
1499     - WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
1500     spin_lock(&entry->d_lock);
1501     parent = entry->d_parent;
1502     if (parent->d_inode && inotify_inode_watched(parent->d_inode))
1503     @@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
1504     struct inode *inode, u32 mask)
1505     {
1506     int ret = 0;
1507     + int newly_watched;
1508    
1509     /* don't allow invalid bits: we don't want flags set */
1510     mask &= IN_ALL_EVENTS | IN_ONESHOT;
1511     @@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
1512     */
1513     watch->inode = igrab(inode);
1514    
1515     - if (!inotify_inode_watched(inode))
1516     - set_dentry_child_flags(inode, 1);
1517     -
1518     /* Add the watch to the handle's and the inode's list */
1519     + newly_watched = !inotify_inode_watched(inode);
1520     list_add(&watch->h_list, &ih->watches);
1521     list_add(&watch->i_list, &inode->inotify_watches);
1522     + /*
1523     + * Set child flags _after_ adding the watch, so there is no race
1524     + * windows where newly instantiated children could miss their parent's
1525     + * watched flag.
1526     + */
1527     + if (newly_watched)
1528     + set_dentry_child_flags(inode, 1);
1529     +
1530     out:
1531     mutex_unlock(&ih->mutex);
1532     mutex_unlock(&inode->inotify_mutex);
1533     diff --git a/fs/locks.c b/fs/locks.c
1534     index 8b8388e..2fd29d9 100644
1535     --- a/fs/locks.c
1536     +++ b/fs/locks.c
1537     @@ -1805,17 +1805,21 @@ again:
1538     if (error)
1539     goto out;
1540    
1541     - for (;;) {
1542     - error = vfs_lock_file(filp, cmd, file_lock, NULL);
1543     - if (error != -EAGAIN || cmd == F_SETLK)
1544     - break;
1545     - error = wait_event_interruptible(file_lock->fl_wait,
1546     - !file_lock->fl_next);
1547     - if (!error)
1548     - continue;
1549     + if (filp->f_op && filp->f_op->lock != NULL)
1550     + error = filp->f_op->lock(filp, cmd, file_lock);
1551     + else {
1552     + for (;;) {
1553     + error = posix_lock_file(filp, file_lock, NULL);
1554     + if (error != -EAGAIN || cmd == F_SETLK)
1555     + break;
1556     + error = wait_event_interruptible(file_lock->fl_wait,
1557     + !file_lock->fl_next);
1558     + if (!error)
1559     + continue;
1560    
1561     - locks_delete_block(file_lock);
1562     - break;
1563     + locks_delete_block(file_lock);
1564     + break;
1565     + }
1566     }
1567    
1568     /*
1569     @@ -1929,17 +1933,21 @@ again:
1570     if (error)
1571     goto out;
1572    
1573     - for (;;) {
1574     - error = vfs_lock_file(filp, cmd, file_lock, NULL);
1575     - if (error != -EAGAIN || cmd == F_SETLK64)
1576     - break;
1577     - error = wait_event_interruptible(file_lock->fl_wait,
1578     - !file_lock->fl_next);
1579     - if (!error)
1580     - continue;
1581     + if (filp->f_op && filp->f_op->lock != NULL)
1582     + error = filp->f_op->lock(filp, cmd, file_lock);
1583     + else {
1584     + for (;;) {
1585     + error = posix_lock_file(filp, file_lock, NULL);
1586     + if (error != -EAGAIN || cmd == F_SETLK64)
1587     + break;
1588     + error = wait_event_interruptible(file_lock->fl_wait,
1589     + !file_lock->fl_next);
1590     + if (!error)
1591     + continue;
1592    
1593     - locks_delete_block(file_lock);
1594     - break;
1595     + locks_delete_block(file_lock);
1596     + break;
1597     + }
1598     }
1599    
1600     /*
1601     diff --git a/fs/signalfd.c b/fs/signalfd.c
1602     index fb7f7e8..1b414ef 100644
1603     --- a/fs/signalfd.c
1604     +++ b/fs/signalfd.c
1605     @@ -110,9 +110,14 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
1606     err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
1607     err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
1608     break;
1609     - default: /* this is just in case for now ... */
1610     + default:
1611     + /*
1612     + * This case catches also the signals queued by sigqueue().
1613     + */
1614     err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
1615     err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
1616     + err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
1617     + err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
1618     break;
1619     }
1620    
1621     diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
1622     index dbee6e6..fdc6d05 100644
1623     --- a/include/asm-parisc/futex.h
1624     +++ b/include/asm-parisc/futex.h
1625     @@ -56,6 +56,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
1626     int err = 0;
1627     int uval;
1628    
1629     + /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
1630     + * our gateway page, and causes no end of trouble...
1631     + */
1632     + if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
1633     + return -EFAULT;
1634     +
1635     if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
1636     return -EFAULT;
1637    
1638     @@ -67,5 +73,5 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
1639     return uval;
1640     }
1641    
1642     -#endif
1643     -#endif
1644     +#endif /*__KERNEL__*/
1645     +#endif /*_ASM_PARISC_FUTEX_H*/
1646     diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h
1647     index deda8c3..9eaa794 100644
1648     --- a/include/asm-parisc/pdc.h
1649     +++ b/include/asm-parisc/pdc.h
1650     @@ -645,8 +645,7 @@ int pdc_soft_power_button(int sw_control);
1651     void pdc_io_reset(void);
1652     void pdc_io_reset_devices(void);
1653     int pdc_iodc_getc(void);
1654     -int pdc_iodc_print(unsigned char *str, unsigned count);
1655     -void pdc_printf(const char *fmt, ...);
1656     +int pdc_iodc_print(const unsigned char *str, unsigned count);
1657    
1658     void pdc_emergency_unlock(void);
1659     int pdc_sti_call(unsigned long func, unsigned long flags,
1660     diff --git a/include/asm-sparc64/backoff.h b/include/asm-sparc64/backoff.h
1661     index dadd6c3..fa1fdf6 100644
1662     --- a/include/asm-sparc64/backoff.h
1663     +++ b/include/asm-sparc64/backoff.h
1664     @@ -12,7 +12,8 @@
1665     mov reg, tmp; \
1666     88: brnz,pt tmp, 88b; \
1667     sub tmp, 1, tmp; \
1668     - cmp reg, BACKOFF_LIMIT; \
1669     + set BACKOFF_LIMIT, tmp; \
1670     + cmp reg, tmp; \
1671     bg,pn %xcc, label; \
1672     nop; \
1673     ba,pt %xcc, label; \
1674     diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
1675     index 71d4ada..3e6b68f 100644
1676     --- a/include/linux/ethtool.h
1677     +++ b/include/linux/ethtool.h
1678     @@ -12,6 +12,7 @@
1679     #ifndef _LINUX_ETHTOOL_H
1680     #define _LINUX_ETHTOOL_H
1681    
1682     +#include <linux/types.h>
1683    
1684     /* This should work for both 32 and 64 bit userland. */
1685     struct ethtool_cmd {
1686     diff --git a/include/linux/percpu.h b/include/linux/percpu.h
1687     index 926adaa..1702ab5 100644
1688     --- a/include/linux/percpu.h
1689     +++ b/include/linux/percpu.h
1690     @@ -34,7 +34,7 @@
1691     #ifdef CONFIG_SMP
1692    
1693     struct percpu_data {
1694     - void *ptrs[NR_CPUS];
1695     + void *ptrs[1];
1696     };
1697    
1698     #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
1699     diff --git a/include/linux/sched.h b/include/linux/sched.h
1700     index cc14656..59e00df 100644
1701     --- a/include/linux/sched.h
1702     +++ b/include/linux/sched.h
1703     @@ -1449,6 +1449,12 @@ static inline void idle_task_exit(void) {}
1704    
1705     extern void sched_idle_next(void);
1706    
1707     +#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1708     +extern void wake_up_idle_cpu(int cpu);
1709     +#else
1710     +static inline void wake_up_idle_cpu(int cpu) { }
1711     +#endif
1712     +
1713     #ifdef CONFIG_SCHED_DEBUG
1714     extern unsigned int sysctl_sched_latency;
1715     extern unsigned int sysctl_sched_min_granularity;
1716     diff --git a/include/linux/security.h b/include/linux/security.h
1717     index ac05083..d842ee3 100644
1718     --- a/include/linux/security.h
1719     +++ b/include/linux/security.h
1720     @@ -62,7 +62,6 @@ extern int cap_inode_need_killpriv(struct dentry *dentry);
1721     extern int cap_inode_killpriv(struct dentry *dentry);
1722     extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
1723     extern void cap_task_reparent_to_init (struct task_struct *p);
1724     -extern int cap_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid);
1725     extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp);
1726     extern int cap_task_setioprio (struct task_struct *p, int ioprio);
1727     extern int cap_task_setnice (struct task_struct *p, int nice);
1728     @@ -2112,7 +2111,7 @@ static inline int security_task_kill (struct task_struct *p,
1729     struct siginfo *info, int sig,
1730     u32 secid)
1731     {
1732     - return cap_task_kill(p, info, sig, secid);
1733     + return 0;
1734     }
1735    
1736     static inline int security_task_wait (struct task_struct *p)
1737     diff --git a/include/linux/time.h b/include/linux/time.h
1738     index b04136d..3e8fd9e 100644
1739     --- a/include/linux/time.h
1740     +++ b/include/linux/time.h
1741     @@ -173,6 +173,10 @@ static inline void timespec_add_ns(struct timespec *a, u64 ns)
1742     {
1743     ns += a->tv_nsec;
1744     while(unlikely(ns >= NSEC_PER_SEC)) {
1745     + /* The following asm() prevents the compiler from
1746     + * optimising this loop into a modulo operation. */
1747     + asm("" : "+r"(ns));
1748     +
1749     ns -= NSEC_PER_SEC;
1750     a->tv_sec++;
1751     }
1752     diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
1753     index 2692ec9..1f999ec 100644
1754     --- a/include/linux/usb/quirks.h
1755     +++ b/include/linux/usb/quirks.h
1756     @@ -9,3 +9,6 @@
1757    
1758     /* device can't resume correctly so reset it instead */
1759     #define USB_QUIRK_RESET_RESUME 0x00000002
1760     +
1761     +/* device can't handle Set-Interface requests */
1762     +#define USB_QUIRK_NO_SET_INTF 0x00000004
1763     diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
1764     index a417b09..e3380e3 100644
1765     --- a/include/linux/usb_usual.h
1766     +++ b/include/linux/usb_usual.h
1767     @@ -50,7 +50,9 @@
1768     US_FLAG(CAPACITY_HEURISTICS, 0x00001000) \
1769     /* sometimes sizes is too big */ \
1770     US_FLAG(MAX_SECTORS_MIN,0x00002000) \
1771     - /* Sets max_sectors to arch min */
1772     + /* Sets max_sectors to arch min */ \
1773     + US_FLAG(BULK_IGNORE_TAG,0x00004000) \
1774     + /* Ignore tag mismatch in bulk operations */
1775    
1776    
1777     #define US_FLAG(name, value) US_FL_##name = value ,
1778     diff --git a/kernel/sched.c b/kernel/sched.c
1779     index 5ba5db9..f8dc213 100644
1780     --- a/kernel/sched.c
1781     +++ b/kernel/sched.c
1782     @@ -727,6 +727,49 @@ static void resched_cpu(int cpu)
1783     resched_task(cpu_curr(cpu));
1784     spin_unlock_irqrestore(&rq->lock, flags);
1785     }
1786     +
1787     +#ifdef CONFIG_NO_HZ
1788     +/*
1789     + * When add_timer_on() enqueues a timer into the timer wheel of an
1790     + * idle CPU then this timer might expire before the next timer event
1791     + * which is scheduled to wake up that CPU. In case of a completely
1792     + * idle system the next event might even be infinite time into the
1793     + * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1794     + * leaves the inner idle loop so the newly added timer is taken into
1795     + * account when the CPU goes back to idle and evaluates the timer
1796     + * wheel for the next timer event.
1797     + */
1798     +void wake_up_idle_cpu(int cpu)
1799     +{
1800     + struct rq *rq = cpu_rq(cpu);
1801     +
1802     + if (cpu == smp_processor_id())
1803     + return;
1804     +
1805     + /*
1806     + * This is safe, as this function is called with the timer
1807     + * wheel base lock of (cpu) held. When the CPU is on the way
1808     + * to idle and has not yet set rq->curr to idle then it will
1809     + * be serialized on the timer wheel base lock and take the new
1810     + * timer into account automatically.
1811     + */
1812     + if (rq->curr != rq->idle)
1813     + return;
1814     +
1815     + /*
1816     + * We can set TIF_RESCHED on the idle task of the other CPU
1817     + * lockless. The worst case is that the other CPU runs the
1818     + * idle task through an additional NOOP schedule()
1819     + */
1820     + set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
1821     +
1822     + /* NEED_RESCHED must be visible before we test polling */
1823     + smp_mb();
1824     + if (!tsk_is_polling(rq->idle))
1825     + smp_send_reschedule(cpu);
1826     +}
1827     +#endif
1828     +
1829     #else
1830     static inline void resched_task(struct task_struct *p)
1831     {
1832     diff --git a/kernel/timer.c b/kernel/timer.c
1833     index 2a00c22..4ee4f89 100644
1834     --- a/kernel/timer.c
1835     +++ b/kernel/timer.c
1836     @@ -453,10 +453,18 @@ void add_timer_on(struct timer_list *timer, int cpu)
1837     spin_lock_irqsave(&base->lock, flags);
1838     timer_set_base(timer, base);
1839     internal_add_timer(base, timer);
1840     + /*
1841     + * Check whether the other CPU is idle and needs to be
1842     + * triggered to reevaluate the timer wheel when nohz is
1843     + * active. We are protected against the other CPU fiddling
1844     + * with the timer by holding the timer base lock. This also
1845     + * makes sure that a CPU on the way to idle can not evaluate
1846     + * the timer wheel.
1847     + */
1848     + wake_up_idle_cpu(cpu);
1849     spin_unlock_irqrestore(&base->lock, flags);
1850     }
1851    
1852     -
1853     /**
1854     * mod_timer - modify a timer's timeout
1855     * @timer: the timer to be modified
1856     diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
1857     index 00b0262..b0012e2 100644
1858     --- a/mm/allocpercpu.c
1859     +++ b/mm/allocpercpu.c
1860     @@ -6,6 +6,10 @@
1861     #include <linux/mm.h>
1862     #include <linux/module.h>
1863    
1864     +#ifndef cache_line_size
1865     +#define cache_line_size() L1_CACHE_BYTES
1866     +#endif
1867     +
1868     /**
1869     * percpu_depopulate - depopulate per-cpu data for given cpu
1870     * @__pdata: per-cpu data to depopulate
1871     @@ -52,6 +56,11 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
1872     struct percpu_data *pdata = __percpu_disguise(__pdata);
1873     int node = cpu_to_node(cpu);
1874    
1875     + /*
1876     + * We should make sure each CPU gets private memory.
1877     + */
1878     + size = roundup(size, cache_line_size());
1879     +
1880     BUG_ON(pdata->ptrs[cpu]);
1881     if (node_online(node))
1882     pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
1883     @@ -98,7 +107,11 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask);
1884     */
1885     void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
1886     {
1887     - void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
1888     + /*
1889     + * We allocate whole cache lines to avoid false sharing
1890     + */
1891     + size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
1892     + void *pdata = kzalloc(sz, gfp);
1893     void *__pdata = __percpu_disguise(pdata);
1894    
1895     if (unlikely(!pdata))
1896     diff --git a/mm/slab.c b/mm/slab.c
1897     index 79c3be0..8323e7d 100644
1898     --- a/mm/slab.c
1899     +++ b/mm/slab.c
1900     @@ -1484,7 +1484,7 @@ void __init kmem_cache_init(void)
1901     list_add(&cache_cache.next, &cache_chain);
1902     cache_cache.colour_off = cache_line_size();
1903     cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1904     - cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1905     + cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1906    
1907     /*
1908     * struct kmem_cache size depends on nr_node_ids, which
1909     @@ -1605,7 +1605,7 @@ void __init kmem_cache_init(void)
1910     int nid;
1911    
1912     for_each_online_node(nid) {
1913     - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
1914     + init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1915    
1916     init_list(malloc_sizes[INDEX_AC].cs_cachep,
1917     &initkmem_list3[SIZE_AC + nid], nid);
1918     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
1919     index 032bf44..156ad38 100644
1920     --- a/net/8021q/vlan.c
1921     +++ b/net/8021q/vlan.c
1922     @@ -326,7 +326,7 @@ static int vlan_dev_init(struct net_device *dev)
1923     int subclass = 0;
1924    
1925     /* IFF_BROADCAST|IFF_MULTICAST; ??? */
1926     - dev->flags = real_dev->flags & ~IFF_UP;
1927     + dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
1928     dev->iflink = real_dev->ifindex;
1929     dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
1930     (1<<__LINK_STATE_DORMANT))) |
1931     diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
1932     index 92b517a..bf706f8 100644
1933     --- a/net/ax25/ax25_out.c
1934     +++ b/net/ax25/ax25_out.c
1935     @@ -117,6 +117,12 @@ void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
1936     unsigned char *p;
1937     int frontlen, len, fragno, ka9qfrag, first = 1;
1938    
1939     + if (paclen < 16) {
1940     + WARN_ON_ONCE(1);
1941     + kfree_skb(skb);
1942     + return;
1943     + }
1944     +
1945     if ((skb->len - 1) > paclen) {
1946     if (*skb->data == AX25_P_TEXT) {
1947     skb_pull(skb, 1); /* skip PID */
1948     @@ -251,8 +257,6 @@ void ax25_kick(ax25_cb *ax25)
1949     if (start == end)
1950     return;
1951    
1952     - ax25->vs = start;
1953     -
1954     /*
1955     * Transmit data until either we're out of data to send or
1956     * the window is full. Send a poll on the final I frame if
1957     @@ -261,8 +265,13 @@ void ax25_kick(ax25_cb *ax25)
1958    
1959     /*
1960     * Dequeue the frame and copy it.
1961     + * Check for race with ax25_clear_queues().
1962     */
1963     skb = skb_dequeue(&ax25->write_queue);
1964     + if (!skb)
1965     + return;
1966     +
1967     + ax25->vs = start;
1968    
1969     do {
1970     if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
1971     diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
1972     index 372b0d3..76f5f6f 100644
1973     --- a/net/bluetooth/hci_core.c
1974     +++ b/net/bluetooth/hci_core.c
1975     @@ -901,8 +901,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
1976    
1977     BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
1978    
1979     - hci_unregister_sysfs(hdev);
1980     -
1981     write_lock_bh(&hci_dev_list_lock);
1982     list_del(&hdev->list);
1983     write_unlock_bh(&hci_dev_list_lock);
1984     @@ -914,6 +912,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
1985    
1986     hci_notify(hdev, HCI_DEV_UNREG);
1987    
1988     + hci_unregister_sysfs(hdev);
1989     +
1990     __hci_dev_put(hdev);
1991    
1992     return 0;
1993     diff --git a/net/core/dev.c b/net/core/dev.c
1994     index 4d44372..82f77ef 100644
1995     --- a/net/core/dev.c
1996     +++ b/net/core/dev.c
1997     @@ -3240,7 +3240,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
1998     return -EOPNOTSUPP;
1999    
2000     case SIOCADDMULTI:
2001     - if (!dev->set_multicast_list ||
2002     + if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
2003     ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2004     return -EINVAL;
2005     if (!netif_device_present(dev))
2006     @@ -3249,7 +3249,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
2007     dev->addr_len, 1);
2008    
2009     case SIOCDELMULTI:
2010     - if (!dev->set_multicast_list ||
2011     + if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
2012     ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
2013     return -EINVAL;
2014     if (!netif_device_present(dev))
2015     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
2016     index c499b5c..2386c5e 100644
2017     --- a/net/core/netpoll.c
2018     +++ b/net/core/netpoll.c
2019     @@ -219,10 +219,12 @@ static void zap_completion_queue(void)
2020     while (clist != NULL) {
2021     struct sk_buff *skb = clist;
2022     clist = clist->next;
2023     - if (skb->destructor)
2024     + if (skb->destructor) {
2025     + atomic_inc(&skb->users);
2026     dev_kfree_skb_any(skb); /* put this one back */
2027     - else
2028     + } else {
2029     __kfree_skb(skb);
2030     + }
2031     }
2032     }
2033    
2034     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2035     index 8e65182..4305154 100644
2036     --- a/net/ipv4/tcp.c
2037     +++ b/net/ipv4/tcp.c
2038     @@ -583,7 +583,7 @@ new_segment:
2039     if (!(psize -= copy))
2040     goto out;
2041    
2042     - if (skb->len < mss_now || (flags & MSG_OOB))
2043     + if (skb->len < size_goal || (flags & MSG_OOB))
2044     continue;
2045    
2046     if (forced_push(tp)) {
2047     @@ -829,7 +829,7 @@ new_segment:
2048     if ((seglen -= copy) == 0 && iovlen == 0)
2049     goto out;
2050    
2051     - if (skb->len < mss_now || (flags & MSG_OOB))
2052     + if (skb->len < size_goal || (flags & MSG_OOB))
2053     continue;
2054    
2055     if (forced_push(tp)) {
2056     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2057     index f4c1eef..52931e8 100644
2058     --- a/net/ipv4/tcp_output.c
2059     +++ b/net/ipv4/tcp_output.c
2060     @@ -258,7 +258,7 @@ static u16 tcp_select_window(struct sock *sk)
2061     *
2062     * Relax Will Robinson.
2063     */
2064     - new_win = cur_win;
2065     + new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2066     }
2067     tp->rcv_wnd = new_win;
2068     tp->rcv_wup = tp->rcv_nxt;
2069     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
2070     index e170c67..89f95f9 100644
2071     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
2072     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
2073     @@ -147,7 +147,9 @@ static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
2074    
2075     static void nf_ct_frag6_evictor(void)
2076     {
2077     + local_bh_disable();
2078     inet_frag_evictor(&nf_frags);
2079     + local_bh_enable();
2080     }
2081    
2082     static void nf_ct_frag6_expire(unsigned long data)
2083     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
2084     index 46cf962..8c50eb4 100644
2085     --- a/net/llc/af_llc.c
2086     +++ b/net/llc/af_llc.c
2087     @@ -155,6 +155,9 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol)
2088     struct sock *sk;
2089     int rc = -ESOCKTNOSUPPORT;
2090    
2091     + if (!capable(CAP_NET_RAW))
2092     + return -EPERM;
2093     +
2094     if (net != &init_net)
2095     return -EAFNOSUPPORT;
2096    
2097     diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2098     index e595e65..7888955 100644
2099     --- a/net/sched/sch_generic.c
2100     +++ b/net/sched/sch_generic.c
2101     @@ -178,10 +178,22 @@ static inline int qdisc_restart(struct net_device *dev)
2102    
2103     void __qdisc_run(struct net_device *dev)
2104     {
2105     - do {
2106     - if (!qdisc_restart(dev))
2107     + unsigned long start_time = jiffies;
2108     +
2109     + while (qdisc_restart(dev)) {
2110     + if (netif_queue_stopped(dev))
2111     + break;
2112     +
2113     + /*
2114     + * Postpone processing if
2115     + * 1. another process needs the CPU;
2116     + * 2. we've been doing it for too long.
2117     + */
2118     + if (need_resched() || jiffies != start_time) {
2119     + netif_schedule(dev);
2120     break;
2121     - } while (!netif_queue_stopped(dev));
2122     + }
2123     + }
2124    
2125     clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
2126     }
2127     diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
2128     index 5e608a6..88860dd 100644
2129     --- a/net/sched/sch_htb.c
2130     +++ b/net/sched/sch_htb.c
2131     @@ -708,9 +708,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
2132     */
2133     static psched_time_t htb_do_events(struct htb_sched *q, int level)
2134     {
2135     - int i;
2136     -
2137     - for (i = 0; i < 500; i++) {
2138     + /* don't run for longer than 2 jiffies; 2 is used instead of
2139     + 1 to simplify things when jiffy is going to be incremented
2140     + too soon */
2141     + unsigned long stop_at = jiffies + 2;
2142     + while (time_before(jiffies, stop_at)) {
2143     struct htb_class *cl;
2144     long diff;
2145     struct rb_node *p = rb_first(&q->wait_pq[level]);
2146     @@ -728,9 +730,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
2147     if (cl->cmode != HTB_CAN_SEND)
2148     htb_add_to_wait_tree(q, cl, diff);
2149     }
2150     - if (net_ratelimit())
2151     - printk(KERN_WARNING "htb: too many events !\n");
2152     - return q->now + PSCHED_TICKS_PER_SEC / 10;
2153     + /* too much load - let's continue on next jiffie */
2154     + return q->now + PSCHED_TICKS_PER_SEC / HZ;
2155     }
2156    
2157     /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
2158     diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
2159     index 6a7d010..a98c003 100644
2160     --- a/net/sctp/bind_addr.c
2161     +++ b/net/sctp/bind_addr.c
2162     @@ -209,6 +209,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
2163     int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
2164     {
2165     struct sctp_sockaddr_entry *addr, *temp;
2166     + int found = 0;
2167    
2168     /* We hold the socket lock when calling this function,
2169     * and that acts as a writer synchronizing lock.
2170     @@ -216,13 +217,14 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
2171     list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
2172     if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
2173     /* Found the exact match. */
2174     + found = 1;
2175     addr->valid = 0;
2176     list_del_rcu(&addr->list);
2177     break;
2178     }
2179     }
2180    
2181     - if (addr && !addr->valid) {
2182     + if (found) {
2183     call_rcu(&addr->rcu, sctp_local_addr_free);
2184     SCTP_DBG_OBJCNT_DEC(addr);
2185     return 0;
2186     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2187     index 7f31ff6..1eaa1b5 100644
2188     --- a/net/sctp/ipv6.c
2189     +++ b/net/sctp/ipv6.c
2190     @@ -89,6 +89,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
2191     struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
2192     struct sctp_sockaddr_entry *addr = NULL;
2193     struct sctp_sockaddr_entry *temp;
2194     + int found = 0;
2195    
2196     switch (ev) {
2197     case NETDEV_UP:
2198     @@ -111,13 +112,14 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
2199     &sctp_local_addr_list, list) {
2200     if (ipv6_addr_equal(&addr->a.v6.sin6_addr,
2201     &ifa->addr)) {
2202     + found = 1;
2203     addr->valid = 0;
2204     list_del_rcu(&addr->list);
2205     break;
2206     }
2207     }
2208     spin_unlock_bh(&sctp_local_addr_lock);
2209     - if (addr && !addr->valid)
2210     + if (found)
2211     call_rcu(&addr->rcu, sctp_local_addr_free);
2212     break;
2213     }
2214     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
2215     index d50f610..022adbd 100644
2216     --- a/net/sctp/protocol.c
2217     +++ b/net/sctp/protocol.c
2218     @@ -626,6 +626,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
2219     struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2220     struct sctp_sockaddr_entry *addr = NULL;
2221     struct sctp_sockaddr_entry *temp;
2222     + int found = 0;
2223    
2224     switch (ev) {
2225     case NETDEV_UP:
2226     @@ -645,13 +646,14 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
2227     list_for_each_entry_safe(addr, temp,
2228     &sctp_local_addr_list, list) {
2229     if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
2230     + found = 1;
2231     addr->valid = 0;
2232     list_del_rcu(&addr->list);
2233     break;
2234     }
2235     }
2236     spin_unlock_bh(&sctp_local_addr_lock);
2237     - if (addr && !addr->valid)
2238     + if (found)
2239     call_rcu(&addr->rcu, sctp_local_addr_free);
2240     break;
2241     }
2242     diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2243     index d988f5d..f6b332c 100644
2244     --- a/scripts/Makefile.modpost
2245     +++ b/scripts/Makefile.modpost
2246     @@ -53,6 +53,9 @@ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
2247     # Stop after building .o files if NOFINAL is set. Makes compile tests quicker
2248     _modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
2249    
2250     +ifneq ($(KBUILD_BUILDHOST),$(ARCH))
2251     + cross_build := 1
2252     +endif
2253    
2254     # Step 2), invoke modpost
2255     # Includes step 3,4
2256     @@ -62,7 +65,8 @@ modpost = scripts/mod/modpost \
2257     $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
2258     $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
2259     $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
2260     - $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
2261     + $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \
2262     + $(if $(cross_build),-c)
2263    
2264     quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
2265     cmd_modpost = $(modpost) -s
2266     diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
2267     index 9ddf944..348d868 100644
2268     --- a/scripts/mod/file2alias.c
2269     +++ b/scripts/mod/file2alias.c
2270     @@ -51,11 +51,13 @@ do { \
2271     sprintf(str + strlen(str), "*"); \
2272     } while(0)
2273    
2274     +unsigned int cross_build = 0;
2275     /**
2276     * Check that sizeof(device_id type) are consistent with size of section
2277     * in .o file. If in-consistent then userspace and kernel does not agree
2278     * on actual size which is a bug.
2279     * Also verify that the final entry in the table is all zeros.
2280     + * Ignore both checks if build host differ from target host and size differs.
2281     **/
2282     static void device_id_check(const char *modname, const char *device_id,
2283     unsigned long size, unsigned long id_size,
2284     @@ -64,6 +66,8 @@ static void device_id_check(const char *modname, const char *device_id,
2285     int i;
2286    
2287     if (size % id_size || size < id_size) {
2288     + if (cross_build != 0)
2289     + return;
2290     fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
2291     "of the size of section __mod_%s_device_table=%lu.\n"
2292     "Fix definition of struct %s_device_id "
2293     diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2294     index 93ac52a..a990011 100644
2295     --- a/scripts/mod/modpost.c
2296     +++ b/scripts/mod/modpost.c
2297     @@ -1659,7 +1659,7 @@ int main(int argc, char **argv)
2298     int opt;
2299     int err;
2300    
2301     - while ((opt = getopt(argc, argv, "i:I:mso:aw")) != -1) {
2302     + while ((opt = getopt(argc, argv, "i:I:cmso:aw")) != -1) {
2303     switch(opt) {
2304     case 'i':
2305     kernel_read = optarg;
2306     @@ -1668,6 +1668,9 @@ int main(int argc, char **argv)
2307     module_read = optarg;
2308     external_module = 1;
2309     break;
2310     + case 'c':
2311     + cross_build = 1;
2312     + break;
2313     case 'm':
2314     modversions = 1;
2315     break;
2316     diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
2317     index 0ffed17..b50e3c9 100644
2318     --- a/scripts/mod/modpost.h
2319     +++ b/scripts/mod/modpost.h
2320     @@ -130,6 +130,7 @@ struct elf_info {
2321     };
2322    
2323     /* file2alias.c */
2324     +extern unsigned int cross_build;
2325     void handle_moddevtable(struct module *mod, struct elf_info *info,
2326     Elf_Sym *sym, const char *symname);
2327     void add_moddevtable(struct buffer *buf, struct module *mod);
2328     diff --git a/security/capability.c b/security/capability.c
2329     index 9e99f36..2c6e06d 100644
2330     --- a/security/capability.c
2331     +++ b/security/capability.c
2332     @@ -40,7 +40,6 @@ static struct security_operations capability_ops = {
2333     .inode_need_killpriv = cap_inode_need_killpriv,
2334     .inode_killpriv = cap_inode_killpriv,
2335    
2336     - .task_kill = cap_task_kill,
2337     .task_setscheduler = cap_task_setscheduler,
2338     .task_setioprio = cap_task_setioprio,
2339     .task_setnice = cap_task_setnice,
2340     diff --git a/security/commoncap.c b/security/commoncap.c
2341     index e87422e..6e9065c 100644
2342     --- a/security/commoncap.c
2343     +++ b/security/commoncap.c
2344     @@ -527,40 +527,6 @@ int cap_task_setnice (struct task_struct *p, int nice)
2345     return cap_safe_nice(p);
2346     }
2347    
2348     -int cap_task_kill(struct task_struct *p, struct siginfo *info,
2349     - int sig, u32 secid)
2350     -{
2351     - if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
2352     - return 0;
2353     -
2354     - /*
2355     - * Running a setuid root program raises your capabilities.
2356     - * Killing your own setuid root processes was previously
2357     - * allowed.
2358     - * We must preserve legacy signal behavior in this case.
2359     - */
2360     - if (p->uid == current->uid)
2361     - return 0;
2362     -
2363     - /* sigcont is permitted within same session */
2364     - if (sig == SIGCONT && (task_session_nr(current) == task_session_nr(p)))
2365     - return 0;
2366     -
2367     - if (secid)
2368     - /*
2369     - * Signal sent as a particular user.
2370     - * Capabilities are ignored. May be wrong, but it's the
2371     - * only thing we can do at the moment.
2372     - * Used only by usb drivers?
2373     - */
2374     - return 0;
2375     - if (cap_issubset(p->cap_permitted, current->cap_permitted))
2376     - return 0;
2377     - if (capable(CAP_KILL))
2378     - return 0;
2379     -
2380     - return -EPERM;
2381     -}
2382     #else
2383     int cap_task_setscheduler (struct task_struct *p, int policy,
2384     struct sched_param *lp)
2385     @@ -575,11 +541,6 @@ int cap_task_setnice (struct task_struct *p, int nice)
2386     {
2387     return 0;
2388     }
2389     -int cap_task_kill(struct task_struct *p, struct siginfo *info,
2390     - int sig, u32 secid)
2391     -{
2392     - return 0;
2393     -}
2394     #endif
2395    
2396     void cap_task_reparent_to_init (struct task_struct *p)