Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.29-r8/0104-2.6.29.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1114 - (hide annotations) (download)
Sun Aug 22 17:59:15 2010 UTC (13 years, 8 months ago) by niro
File size: 93341 byte(s)
-added

1 niro 1114 diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
2     index cec829b..5c484ae 100644
3     --- a/Documentation/filesystems/ext4.txt
4     +++ b/Documentation/filesystems/ext4.txt
5     @@ -85,7 +85,7 @@ Note: More extensive information for getting started with ext4 can be
6     * extent format more robust in face of on-disk corruption due to magics,
7     * internal redundancy in tree
8     * improved file allocation (multi-block alloc)
9     -* fix 32000 subdirectory limit
10     +* lift 32000 subdirectory limit imposed by i_links_count[1]
11     * nsec timestamps for mtime, atime, ctime, create time
12     * inode version field on disk (NFSv4, Lustre)
13     * reduced e2fsck time via uninit_bg feature
14     @@ -100,6 +100,9 @@ Note: More extensive information for getting started with ext4 can be
15     * efficent new ordered mode in JBD2 and ext4(avoid using buffer head to force
16     the ordering)
17    
18     +[1] Filesystems with a block size of 1k may see a limit imposed by the
19     +directory hash tree having a maximum depth of two.
20     +
21     2.2 Candidate features for future inclusion
22    
23     * Online defrag (patches available but not well tested)
24     diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
25     index f900a39..3bc41c3 100644
26     --- a/arch/powerpc/mm/tlb_nohash_low.S
27     +++ b/arch/powerpc/mm/tlb_nohash_low.S
28     @@ -161,6 +161,6 @@ _GLOBAL(_tlbil_va)
29     isync
30     1: wrtee r10
31     blr
32     -#elif
33     +#else
34     #error Unsupported processor type !
35     #endif
36     diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
37     index 0a83bd7..c8f14c1 100644
38     --- a/arch/sparc/kernel/of_device_32.c
39     +++ b/arch/sparc/kernel/of_device_32.c
40     @@ -246,8 +246,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
41    
42     static int of_bus_sbus_match(struct device_node *np)
43     {
44     - return !strcmp(np->name, "sbus") ||
45     - !strcmp(np->name, "sbi");
46     + struct device_node *dp = np;
47     +
48     + while (dp) {
49     + if (!strcmp(dp->name, "sbus") ||
50     + !strcmp(dp->name, "sbi"))
51     + return 1;
52     +
53     + /* Have a look at use_1to1_mapping(). We're trying
54     + * to match SBUS if that's the top-level bus and we
55     + * don't have some intervening real bus that provides
56     + * ranges based translations.
57     + */
58     + if (of_find_property(dp, "ranges", NULL) != NULL)
59     + break;
60     +
61     + dp = dp->parent;
62     + }
63     +
64     + return 0;
65     }
66    
67     static void of_bus_sbus_count_cells(struct device_node *child,
68     diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
69     index b4a12c9..9013c4b 100644
70     --- a/arch/sparc/kernel/of_device_64.c
71     +++ b/arch/sparc/kernel/of_device_64.c
72     @@ -301,8 +301,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
73    
74     static int of_bus_sbus_match(struct device_node *np)
75     {
76     - return !strcmp(np->name, "sbus") ||
77     - !strcmp(np->name, "sbi");
78     + struct device_node *dp = np;
79     +
80     + while (dp) {
81     + if (!strcmp(dp->name, "sbus") ||
82     + !strcmp(dp->name, "sbi"))
83     + return 1;
84     +
85     + /* Have a look at use_1to1_mapping(). We're trying
86     + * to match SBUS if that's the top-level bus and we
87     + * don't have some intervening real bus that provides
88     + * ranges based translations.
89     + */
90     + if (of_find_property(dp, "ranges", NULL) != NULL)
91     + break;
92     +
93     + dp = dp->parent;
94     + }
95     +
96     + return 0;
97     }
98    
99     static void of_bus_sbus_count_cells(struct device_node *child,
100     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
101     index 79457f6..a35fef7 100644
102     --- a/arch/sparc/kernel/smp_64.c
103     +++ b/arch/sparc/kernel/smp_64.c
104     @@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
105     while (!cpu_isset(cpuid, smp_commenced_mask))
106     rmb();
107    
108     - ipi_call_lock();
109     + ipi_call_lock_irq();
110     cpu_set(cpuid, cpu_online_map);
111     - ipi_call_unlock();
112     + ipi_call_unlock_irq();
113    
114     /* idle thread is expected to have preempt disabled */
115     preempt_disable();
116     diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
117     index a00545f..db3134f 100644
118     --- a/arch/x86/kernel/hpet.c
119     +++ b/arch/x86/kernel/hpet.c
120     @@ -191,27 +191,42 @@ static struct clock_event_device hpet_clockevent = {
121     .rating = 50,
122     };
123    
124     -static void hpet_start_counter(void)
125     +static void hpet_stop_counter(void)
126     {
127     unsigned long cfg = hpet_readl(HPET_CFG);
128     -
129     cfg &= ~HPET_CFG_ENABLE;
130     hpet_writel(cfg, HPET_CFG);
131     +}
132     +
133     +static void hpet_reset_counter(void)
134     +{
135     hpet_writel(0, HPET_COUNTER);
136     hpet_writel(0, HPET_COUNTER + 4);
137     +}
138     +
139     +static void hpet_start_counter(void)
140     +{
141     + unsigned long cfg = hpet_readl(HPET_CFG);
142     cfg |= HPET_CFG_ENABLE;
143     hpet_writel(cfg, HPET_CFG);
144     }
145    
146     +static void hpet_restart_counter(void)
147     +{
148     + hpet_stop_counter();
149     + hpet_reset_counter();
150     + hpet_start_counter();
151     +}
152     +
153     static void hpet_resume_device(void)
154     {
155     force_hpet_resume();
156     }
157    
158     -static void hpet_restart_counter(void)
159     +static void hpet_resume_counter(void)
160     {
161     hpet_resume_device();
162     - hpet_start_counter();
163     + hpet_restart_counter();
164     }
165    
166     static void hpet_enable_legacy_int(void)
167     @@ -264,6 +279,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
168    
169     switch (mode) {
170     case CLOCK_EVT_MODE_PERIODIC:
171     + hpet_stop_counter();
172     delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
173     delta >>= evt->shift;
174     now = hpet_readl(HPET_COUNTER);
175     @@ -274,14 +290,17 @@ static void hpet_set_mode(enum clock_event_mode mode,
176     cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
177     HPET_TN_SETVAL | HPET_TN_32BIT;
178     hpet_writel(cfg, HPET_Tn_CFG(timer));
179     - /*
180     - * The first write after writing TN_SETVAL to the
181     - * config register sets the counter value, the second
182     - * write sets the period.
183     - */
184     hpet_writel(cmp, HPET_Tn_CMP(timer));
185     udelay(1);
186     + /*
187     + * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
188     + * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
189     + * bit is automatically cleared after the first write.
190     + * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
191     + * Publication # 24674)
192     + */
193     hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
194     + hpet_start_counter();
195     break;
196    
197     case CLOCK_EVT_MODE_ONESHOT:
198     @@ -695,7 +714,7 @@ static struct clocksource clocksource_hpet = {
199     .mask = HPET_MASK,
200     .shift = HPET_SHIFT,
201     .flags = CLOCK_SOURCE_IS_CONTINUOUS,
202     - .resume = hpet_restart_counter,
203     + .resume = hpet_resume_counter,
204     #ifdef CONFIG_X86_64
205     .vread = vread_hpet,
206     #endif
207     @@ -707,7 +726,7 @@ static int hpet_clocksource_register(void)
208     cycle_t t1;
209    
210     /* Start the counter */
211     - hpet_start_counter();
212     + hpet_restart_counter();
213    
214     /* Verify whether hpet counter works */
215     t1 = read_hpet();
216     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
217     index 8005da2..2d2affd 100644
218     --- a/arch/x86/kvm/mmu.c
219     +++ b/arch/x86/kvm/mmu.c
220     @@ -2906,8 +2906,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
221    
222     static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
223     {
224     - kvm_x86_ops->tlb_flush(vcpu);
225     - set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
226     + kvm_set_cr3(vcpu, vcpu->arch.cr3);
227     return 1;
228     }
229    
230     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
231     index 92f0457..570184a 100644
232     --- a/arch/x86/kvm/x86.c
233     +++ b/arch/x86/kvm/x86.c
234     @@ -334,6 +334,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
235    
236     void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
237     {
238     + unsigned long old_cr4 = vcpu->arch.cr4;
239     + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
240     +
241     if (cr4 & CR4_RESERVED_BITS) {
242     printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
243     kvm_inject_gp(vcpu, 0);
244     @@ -347,7 +350,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
245     kvm_inject_gp(vcpu, 0);
246     return;
247     }
248     - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
249     + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
250     + && ((cr4 ^ old_cr4) & pdptr_bits)
251     && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
252     printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
253     kvm_inject_gp(vcpu, 0);
254     diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
255     index 8f307d9..f46c340 100644
256     --- a/arch/x86/mm/hugetlbpage.c
257     +++ b/arch/x86/mm/hugetlbpage.c
258     @@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
259     unsigned long sbase = saddr & PUD_MASK;
260     unsigned long s_end = sbase + PUD_SIZE;
261    
262     + /* Allow segments to share if only one is marked locked */
263     + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
264     + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
265     +
266     /*
267     * match the virtual addresses, permission and the alignment of the
268     * page table page.
269     */
270     if (pmd_index(addr) != pmd_index(saddr) ||
271     - vma->vm_flags != svma->vm_flags ||
272     + vm_flags != svm_flags ||
273     sbase < svma->vm_start || svma->vm_end < s_end)
274     return 0;
275    
276     diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
277     index 9136946..d6033b8 100644
278     --- a/arch/x86/pci/mmconfig-shared.c
279     +++ b/arch/x86/pci/mmconfig-shared.c
280     @@ -254,7 +254,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
281     if (!fixmem32)
282     return AE_OK;
283     if ((mcfg_res->start >= fixmem32->address) &&
284     - (mcfg_res->end <= (fixmem32->address +
285     + (mcfg_res->end < (fixmem32->address +
286     fixmem32->address_length))) {
287     mcfg_res->flags = 1;
288     return AE_CTRL_TERMINATE;
289     @@ -271,7 +271,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
290     return AE_OK;
291    
292     if ((mcfg_res->start >= address.minimum) &&
293     - (mcfg_res->end <= (address.minimum + address.address_length))) {
294     + (mcfg_res->end < (address.minimum + address.address_length))) {
295     mcfg_res->flags = 1;
296     return AE_CTRL_TERMINATE;
297     }
298     @@ -297,7 +297,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
299     struct resource mcfg_res;
300    
301     mcfg_res.start = start;
302     - mcfg_res.end = end;
303     + mcfg_res.end = end - 1;
304     mcfg_res.flags = 0;
305    
306     acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
307     diff --git a/crypto/api.c b/crypto/api.c
308     index 38a2bc0..e7aa72d 100644
309     --- a/crypto/api.c
310     +++ b/crypto/api.c
311     @@ -221,7 +221,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
312    
313     request_module(name);
314    
315     - if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
316     + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
317     + CRYPTO_ALG_NEED_FALLBACK) &&
318     snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
319     request_module(tmp);
320    
321     diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
322     index 7bc22a4..61f6b7a 100644
323     --- a/drivers/acpi/processor_idle.c
324     +++ b/drivers/acpi/processor_idle.c
325     @@ -212,6 +212,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
326     struct acpi_processor_power *pwr = &pr->power;
327     u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
328    
329     + if (boot_cpu_has(X86_FEATURE_AMDC1E))
330     + type = ACPI_STATE_C1;
331     +
332     /*
333     * Check, if one of the previous states already marked the lapic
334     * unstable
335     @@ -648,6 +651,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
336     switch (cx->type) {
337     case ACPI_STATE_C1:
338     cx->valid = 1;
339     + acpi_timer_check_state(i, pr, cx);
340     break;
341    
342     case ACPI_STATE_C2:
343     @@ -866,17 +870,19 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
344    
345     /* Do not access any ACPI IO ports in suspend path */
346     if (acpi_idle_suspend) {
347     - acpi_safe_halt();
348     local_irq_enable();
349     + cpu_relax();
350     return 0;
351     }
352    
353     + acpi_state_timer_broadcast(pr, cx, 1);
354     t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
355     acpi_idle_do_entry(cx);
356     t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
357    
358     local_irq_enable();
359     cx->usage++;
360     + acpi_state_timer_broadcast(pr, cx, 0);
361    
362     return ticks_elapsed_in_us(t1, t2);
363     }
364     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
365     index 8f90508..6971a12 100644
366     --- a/drivers/block/xen-blkfront.c
367     +++ b/drivers/block/xen-blkfront.c
368     @@ -977,8 +977,10 @@ static void backend_changed(struct xenbus_device *dev,
369     break;
370    
371     case XenbusStateClosing:
372     - if (info->gd == NULL)
373     - xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
374     + if (info->gd == NULL) {
375     + xenbus_frontend_closed(dev);
376     + break;
377     + }
378     bd = bdget_disk(info->gd, 0);
379     if (bd == NULL)
380     xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
381     diff --git a/drivers/char/random.c b/drivers/char/random.c
382     index 7c13581..1801245 100644
383     --- a/drivers/char/random.c
384     +++ b/drivers/char/random.c
385     @@ -1660,15 +1660,20 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
386     * value is not cryptographically secure but for several uses the cost of
387     * depleting entropy is too high
388     */
389     +DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
390     unsigned int get_random_int(void)
391     {
392     - /*
393     - * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
394     - * every second, from the entropy pool (and thus creates a limited
395     - * drain on it), and uses halfMD4Transform within the second. We
396     - * also mix it with jiffies and the PID:
397     - */
398     - return secure_ip_id((__force __be32)(current->pid + jiffies));
399     + struct keydata *keyptr;
400     + __u32 *hash = get_cpu_var(get_random_int_hash);
401     + int ret;
402     +
403     + keyptr = get_keyptr();
404     + hash[0] += current->pid + jiffies + get_cycles();
405     +
406     + ret = half_md4_transform(hash, keyptr->secret);
407     + put_cpu_var(get_random_int_hash);
408     +
409     + return ret;
410     }
411    
412     /*
413     diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
414     index ed306eb..0c2f55a 100644
415     --- a/drivers/char/tpm/tpm_bios.c
416     +++ b/drivers/char/tpm/tpm_bios.c
417     @@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
418     unsigned char * event_entry)
419     {
420     const char *name = "";
421     - char data[40] = "";
422     + /* 41 so there is room for 40 data and 1 nul */
423     + char data[41] = "";
424     int i, n_len = 0, d_len = 0;
425     struct tcpa_pc_event *pc_event;
426    
427     diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
428     index 3f0fdd1..856b3cc 100644
429     --- a/drivers/crypto/padlock-aes.c
430     +++ b/drivers/crypto/padlock-aes.c
431     @@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
432     MODULE_LICENSE("GPL");
433     MODULE_AUTHOR("Michal Ludvig");
434    
435     -MODULE_ALIAS("aes-all");
436     +MODULE_ALIAS("aes");
437     diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
438     index c31afbd..c41f339 100644
439     --- a/drivers/gpu/drm/r128/r128_cce.c
440     +++ b/drivers/gpu/drm/r128/r128_cce.c
441     @@ -511,9 +511,9 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
442    
443     #if __OS_HAS_AGP
444     if (!dev_priv->is_pci) {
445     - drm_core_ioremap(dev_priv->cce_ring, dev);
446     - drm_core_ioremap(dev_priv->ring_rptr, dev);
447     - drm_core_ioremap(dev->agp_buffer_map, dev);
448     + drm_core_ioremap_wc(dev_priv->cce_ring, dev);
449     + drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
450     + drm_core_ioremap_wc(dev->agp_buffer_map, dev);
451     if (!dev_priv->cce_ring->handle ||
452     !dev_priv->ring_rptr->handle ||
453     !dev->agp_buffer_map->handle) {
454     diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
455     index f0a0f72..5e60131 100644
456     --- a/drivers/hid/usbhid/hid-core.c
457     +++ b/drivers/hid/usbhid/hid-core.c
458     @@ -765,7 +765,7 @@ static int usbhid_parse(struct hid_device *hid)
459     goto err;
460     }
461    
462     - hid->quirks = quirks;
463     + hid->quirks |= quirks;
464    
465     return 0;
466     err:
467     diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
468     index b5e3b28..a1787fd 100644
469     --- a/drivers/hwmon/lm78.c
470     +++ b/drivers/hwmon/lm78.c
471     @@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
472     .name = "lm78",
473     },
474     .probe = lm78_isa_probe,
475     - .remove = lm78_isa_remove,
476     + .remove = __devexit_p(lm78_isa_remove),
477     };
478    
479    
480     diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
481     index af70777..ff8229c 100644
482     --- a/drivers/ide/ide-io.c
483     +++ b/drivers/ide/ide-io.c
484     @@ -967,7 +967,7 @@ void ide_timer_expiry (unsigned long data)
485     }
486     spin_lock_irq(&hwif->lock);
487     enable_irq(hwif->irq);
488     - if (startstop == ide_stopped) {
489     + if (startstop == ide_stopped && hwif->polling == 0) {
490     ide_unlock_port(hwif);
491     plug_device = 1;
492     }
493     @@ -1145,7 +1145,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
494     * same irq as is currently being serviced here, and Linux
495     * won't allow another of the same (on any CPU) until we return.
496     */
497     - if (startstop == ide_stopped) {
498     + if (startstop == ide_stopped && hwif->polling == 0) {
499     BUG_ON(hwif->handler);
500     ide_unlock_port(hwif);
501     plug_device = 1;
502     diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
503     index b1892bd..5defb72 100644
504     --- a/drivers/ide/ide-iops.c
505     +++ b/drivers/ide/ide-iops.c
506     @@ -587,8 +587,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
507    
508     /*
509     * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
510     - * We list them here and depend on the device side cable detection for them.
511     - *
512     * Some optical devices with the buggy firmwares have the same problem.
513     */
514     static const struct drive_list_entry ivb_list[] = {
515     @@ -632,10 +630,25 @@ u8 eighty_ninty_three (ide_drive_t *drive)
516     * - force bit13 (80c cable present) check also for !ivb devices
517     * (unless the slave device is pre-ATA3)
518     */
519     - if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
520     - (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
521     + if (id[ATA_ID_HW_CONFIG] & 0x4000)
522     return 1;
523    
524     + if (ivb) {
525     + const char *model = (char *)&id[ATA_ID_PROD];
526     +
527     + if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
528     + /*
529     + * These ATAPI devices always report 80c cable
530     + * so we have to depend on the host in this case.
531     + */
532     + if (hwif->cbl == ATA_CBL_PATA80)
533     + return 1;
534     + } else {
535     + /* Depend on the device side cable detection. */
536     + if (id[ATA_ID_HW_CONFIG] & 0x2000)
537     + return 1;
538     + }
539     + }
540     no_80w:
541     if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
542     return 0;
543     diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
544     index bddae2b..515fd4e 100644
545     --- a/drivers/ide/ide-pci-generic.c
546     +++ b/drivers/ide/ide-pci-generic.c
547     @@ -35,6 +35,16 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
548    
549     #define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
550    
551     +static void netcell_quirkproc(ide_drive_t *drive)
552     +{
553     + /* mark words 85-87 as valid */
554     + drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
555     +}
556     +
557     +static const struct ide_port_ops netcell_port_ops = {
558     + .quirkproc = netcell_quirkproc,
559     +};
560     +
561     #define DECLARE_GENERIC_PCI_DEV(extra_flags) \
562     { \
563     .name = DRV_NAME, \
564     @@ -76,6 +86,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
565    
566     { /* 6: Revolution */
567     .name = DRV_NAME,
568     + .port_ops = &netcell_port_ops,
569     .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
570     IDE_HFLAG_TRUST_BIOS_FOR_DMA |
571     IDE_HFLAG_OFF_BOARD,
572     diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
573     index 4e6181c..af7860c 100644
574     --- a/drivers/ide/ide-tape.c
575     +++ b/drivers/ide/ide-tape.c
576     @@ -654,12 +654,6 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
577     {
578     idetape_tape_t *tape = drive->driver_data;
579    
580     - if (drive->pc->c[0] == REQUEST_SENSE &&
581     - pc->c[0] == REQUEST_SENSE) {
582     - printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
583     - "Two request sense in serial were issued\n");
584     - }
585     -
586     if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
587     tape->failed_pc = pc;
588    
589     diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
590     index 6d9f810..635c787 100644
591     --- a/drivers/mtd/devices/mtd_dataflash.c
592     +++ b/drivers/mtd/devices/mtd_dataflash.c
593     @@ -184,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
594     /* Calculate flash page address; use block erase (for speed) if
595     * we're at a block boundary and need to erase the whole block.
596     */
597     - pageaddr = div_u64(instr->len, priv->page_size);
598     + pageaddr = div_u64(instr->addr, priv->page_size);
599     do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
600     pageaddr = pageaddr << priv->page_offset;
601    
602     diff --git a/drivers/net/Makefile b/drivers/net/Makefile
603     index 471baaf..ac7b12c 100644
604     --- a/drivers/net/Makefile
605     +++ b/drivers/net/Makefile
606     @@ -100,7 +100,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
607     obj-$(CONFIG_NET) += Space.o loopback.o
608     obj-$(CONFIG_SEEQ8005) += seeq8005.o
609     obj-$(CONFIG_NET_SB1000) += sb1000.o
610     -obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
611     +obj-$(CONFIG_MAC8390) += mac8390.o
612     obj-$(CONFIG_APNE) += apne.o 8390.o
613     obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
614     obj-$(CONFIG_HP100) += hp100.o
615     diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
616     index 6b6530f..a7e688a 100644
617     --- a/drivers/net/bnx2.c
618     +++ b/drivers/net/bnx2.c
619     @@ -2585,6 +2585,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
620     /* Tell compiler that status block fields can change. */
621     barrier();
622     cons = *bnapi->hw_tx_cons_ptr;
623     + barrier();
624     if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
625     cons++;
626     return cons;
627     @@ -2864,6 +2865,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
628     /* Tell compiler that status block fields can change. */
629     barrier();
630     cons = *bnapi->hw_rx_cons_ptr;
631     + barrier();
632     if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
633     cons++;
634     return cons;
635     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
636     index 27fb7f5..77da4c5 100644
637     --- a/drivers/net/bonding/bond_alb.c
638     +++ b/drivers/net/bonding/bond_alb.c
639     @@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
640    
641     if (arp->op_code == htons(ARPOP_REPLY)) {
642     /* update rx hash table for this ARP */
643     - printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
644     - bond_dev->name);
645     bond = netdev_priv(bond_dev);
646     rlb_update_entry_from_arp(bond, arp);
647     pr_debug("Server received an ARP Reply from client\n");
648     @@ -1739,9 +1737,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
649     }
650     }
651    
652     - write_unlock_bh(&bond->curr_slave_lock);
653     - read_unlock(&bond->lock);
654     -
655     if (swap_slave) {
656     alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
657     alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
658     @@ -1749,16 +1744,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
659     alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
660     bond->alb_info.rlb_enabled);
661    
662     + read_lock(&bond->lock);
663     alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
664     if (bond->alb_info.rlb_enabled) {
665     /* inform clients mac address has changed */
666     rlb_req_update_slave_clients(bond, bond->curr_active_slave);
667     }
668     + read_unlock(&bond->lock);
669     }
670    
671     - read_lock(&bond->lock);
672     - write_lock_bh(&bond->curr_slave_lock);
673     -
674     return 0;
675     }
676    
677     diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
678     index d436e27..df6459c 100644
679     --- a/drivers/net/e1000/e1000_main.c
680     +++ b/drivers/net/e1000/e1000_main.c
681     @@ -4009,8 +4009,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
682     PCI_DMA_FROMDEVICE);
683    
684     length = le16_to_cpu(rx_desc->length);
685     -
686     - if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
687     + /* !EOP means multiple descriptors were used to store a single
688     + * packet, also make sure the frame isn't just CRC only */
689     + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
690     /* All receives must fit into a single buffer */
691     E1000_DBG("%s: Receive packet consumed multiple"
692     " buffers\n", netdev->name);
693     diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
694     index 3c831f1..44ceb36 100644
695     --- a/drivers/net/igb/igb_ethtool.c
696     +++ b/drivers/net/igb/igb_ethtool.c
697     @@ -2024,6 +2024,10 @@ static struct ethtool_ops igb_ethtool_ops = {
698     .get_ethtool_stats = igb_get_ethtool_stats,
699     .get_coalesce = igb_get_coalesce,
700     .set_coalesce = igb_set_coalesce,
701     + .get_flags = ethtool_op_get_flags,
702     +#ifdef CONFIG_IGB_LRO
703     + .set_flags = ethtool_op_set_flags,
704     +#endif
705     };
706    
707     void igb_set_ethtool_ops(struct net_device *netdev)
708     diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
709     index 8e88486..f26667d 100644
710     --- a/drivers/net/mac8390.c
711     +++ b/drivers/net/mac8390.c
712     @@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
713     if (!MACH_IS_MAC)
714     return ERR_PTR(-ENODEV);
715    
716     - dev = alloc_ei_netdev();
717     + dev = ____alloc_ei_netdev(0);
718     if (!dev)
719     return ERR_PTR(-ENOMEM);
720    
721     @@ -481,10 +481,10 @@ void cleanup_module(void)
722     static const struct net_device_ops mac8390_netdev_ops = {
723     .ndo_open = mac8390_open,
724     .ndo_stop = mac8390_close,
725     - .ndo_start_xmit = ei_start_xmit,
726     - .ndo_tx_timeout = ei_tx_timeout,
727     - .ndo_get_stats = ei_get_stats,
728     - .ndo_set_multicast_list = ei_set_multicast_list,
729     + .ndo_start_xmit = __ei_start_xmit,
730     + .ndo_tx_timeout = __ei_tx_timeout,
731     + .ndo_get_stats = __ei_get_stats,
732     + .ndo_set_multicast_list = __ei_set_multicast_list,
733     .ndo_validate_addr = eth_validate_addr,
734     .ndo_set_mac_address = eth_mac_addr,
735     .ndo_change_mtu = eth_change_mtu,
736     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
737     index 7e24b50..f5824af 100644
738     --- a/drivers/net/macvlan.c
739     +++ b/drivers/net/macvlan.c
740     @@ -329,7 +329,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
741     const struct macvlan_dev *vlan = netdev_priv(dev);
742     struct net_device *lowerdev = vlan->lowerdev;
743    
744     - if (lowerdev->ethtool_ops->get_rx_csum == NULL)
745     + if (lowerdev->ethtool_ops == NULL ||
746     + lowerdev->ethtool_ops->get_rx_csum == NULL)
747     return 0;
748     return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
749     }
750     @@ -340,7 +341,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
751     const struct macvlan_dev *vlan = netdev_priv(dev);
752     struct net_device *lowerdev = vlan->lowerdev;
753    
754     - if (!lowerdev->ethtool_ops->get_settings)
755     + if (!lowerdev->ethtool_ops ||
756     + !lowerdev->ethtool_ops->get_settings)
757     return -EOPNOTSUPP;
758    
759     return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
760     @@ -351,7 +353,8 @@ static u32 macvlan_ethtool_get_flags(struct net_device *dev)
761     const struct macvlan_dev *vlan = netdev_priv(dev);
762     struct net_device *lowerdev = vlan->lowerdev;
763    
764     - if (!lowerdev->ethtool_ops->get_flags)
765     + if (!lowerdev->ethtool_ops ||
766     + !lowerdev->ethtool_ops->get_flags)
767     return 0;
768     return lowerdev->ethtool_ops->get_flags(lowerdev);
769     }
770     diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
771     index e9c1296..53ff238 100644
772     --- a/drivers/net/myri10ge/myri10ge.c
773     +++ b/drivers/net/myri10ge/myri10ge.c
774     @@ -2446,6 +2446,7 @@ static int myri10ge_open(struct net_device *dev)
775     lro_mgr->lro_arr = ss->rx_done.lro_desc;
776     lro_mgr->get_frag_header = myri10ge_get_frag_header;
777     lro_mgr->max_aggr = myri10ge_lro_max_pkts;
778     + lro_mgr->frag_align_pad = 2;
779     if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
780     lro_mgr->max_aggr = MAX_SKB_FRAGS;
781    
782     diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
783     index 9201e5a..0b8ec7f 100644
784     --- a/drivers/net/r8169.c
785     +++ b/drivers/net/r8169.c
786     @@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
787     int handled = 0;
788     int status;
789    
790     + /* loop handling interrupts until we have no new ones or
791     + * we hit a invalid/hotplug case.
792     + */
793     status = RTL_R16(IntrStatus);
794     + while (status && status != 0xffff) {
795     + handled = 1;
796    
797     - /* hotplug/major error/no more work/shared irq */
798     - if ((status == 0xffff) || !status)
799     - goto out;
800     -
801     - handled = 1;
802     + /* Handle all of the error cases first. These will reset
803     + * the chip, so just exit the loop.
804     + */
805     + if (unlikely(!netif_running(dev))) {
806     + rtl8169_asic_down(ioaddr);
807     + break;
808     + }
809    
810     - if (unlikely(!netif_running(dev))) {
811     - rtl8169_asic_down(ioaddr);
812     - goto out;
813     - }
814     + /* Work around for rx fifo overflow */
815     + if (unlikely(status & RxFIFOOver) &&
816     + (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
817     + netif_stop_queue(dev);
818     + rtl8169_tx_timeout(dev);
819     + break;
820     + }
821    
822     - status &= tp->intr_mask;
823     - RTL_W16(IntrStatus,
824     - (status & RxFIFOOver) ? (status | RxOverflow) : status);
825     + if (unlikely(status & SYSErr)) {
826     + rtl8169_pcierr_interrupt(dev);
827     + break;
828     + }
829    
830     - if (!(status & tp->intr_event))
831     - goto out;
832     + if (status & LinkChg)
833     + rtl8169_check_link_status(dev, tp, ioaddr);
834    
835     - /* Work around for rx fifo overflow */
836     - if (unlikely(status & RxFIFOOver) &&
837     - (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
838     - netif_stop_queue(dev);
839     - rtl8169_tx_timeout(dev);
840     - goto out;
841     - }
842     + /* We need to see the lastest version of tp->intr_mask to
843     + * avoid ignoring an MSI interrupt and having to wait for
844     + * another event which may never come.
845     + */
846     + smp_rmb();
847     + if (status & tp->intr_mask & tp->napi_event) {
848     + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
849     + tp->intr_mask = ~tp->napi_event;
850     +
851     + if (likely(netif_rx_schedule_prep(&tp->napi)))
852     + __netif_rx_schedule(&tp->napi);
853     + else if (netif_msg_intr(tp)) {
854     + printk(KERN_INFO "%s: interrupt %04x in poll\n",
855     + dev->name, status);
856     + }
857     + }
858    
859     - if (unlikely(status & SYSErr)) {
860     - rtl8169_pcierr_interrupt(dev);
861     - goto out;
862     + /* We only get a new MSI interrupt when all active irq
863     + * sources on the chip have been acknowledged. So, ack
864     + * everything we've seen and check if new sources have become
865     + * active to avoid blocking all interrupts from the chip.
866     + */
867     + RTL_W16(IntrStatus,
868     + (status & RxFIFOOver) ? (status | RxOverflow) : status);
869     + status = RTL_R16(IntrStatus);
870     }
871    
872     - if (status & LinkChg)
873     - rtl8169_check_link_status(dev, tp, ioaddr);
874     -
875     - if (status & tp->napi_event) {
876     - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
877     - tp->intr_mask = ~tp->napi_event;
878     -
879     - if (likely(netif_rx_schedule_prep(&tp->napi)))
880     - __netif_rx_schedule(&tp->napi);
881     - else if (netif_msg_intr(tp)) {
882     - printk(KERN_INFO "%s: interrupt %04x in poll\n",
883     - dev->name, status);
884     - }
885     - }
886     -out:
887     return IRQ_RETVAL(handled);
888     }
889    
890     @@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
891    
892     if (work_done < budget) {
893     netif_rx_complete(napi);
894     - tp->intr_mask = 0xffff;
895     - /*
896     - * 20040426: the barrier is not strictly required but the
897     - * behavior of the irq handler could be less predictable
898     - * without it. Btw, the lack of flush for the posted pci
899     - * write is safe - FR
900     +
901     + /* We need for force the visibility of tp->intr_mask
902     + * for other CPUs, as we can loose an MSI interrupt
903     + * and potentially wait for a retransmit timeout if we don't.
904     + * The posted write to IntrMask is safe, as it will
905     + * eventually make it to the chip and we won't loose anything
906     + * until it does.
907     */
908     + tp->intr_mask = 0xffff;
909     smp_wmb();
910     RTL_W16(IntrMask, tp->intr_event);
911     }
912     diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
913     index 70a8b21..ab621b0 100644
914     --- a/drivers/net/wireless/iwlwifi/iwl-sta.c
915     +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
916     @@ -708,6 +708,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
917     {
918     unsigned long flags;
919     int ret = 0;
920     + __le16 key_flags = 0;
921     +
922     + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
923     + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
924     + key_flags &= ~STA_KEY_FLG_INVALID;
925     +
926     + if (sta_id == priv->hw_params.bcast_sta_id)
927     + key_flags |= STA_KEY_MULTICAST_MSK;
928    
929     keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
930     keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
931     @@ -727,6 +735,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
932     WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
933     "no space for new kew");
934    
935     + priv->stations[sta_id].sta.key.key_flags = key_flags;
936     +
937     +
938     /* This copy is acutally not needed: we get the key with each TX */
939     memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
940    
941     @@ -743,9 +754,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
942     {
943     u8 sta_id = IWL_INVALID_STATION;
944     unsigned long flags;
945     - __le16 key_flags = 0;
946     int i;
947     - DECLARE_MAC_BUF(mac);
948    
949     sta_id = iwl_find_station(priv, addr);
950     if (sta_id == IWL_INVALID_STATION) {
951     @@ -760,16 +769,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
952     return;
953     }
954    
955     - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
956     - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
957     - key_flags &= ~STA_KEY_FLG_INVALID;
958     -
959     - if (sta_id == priv->hw_params.bcast_sta_id)
960     - key_flags |= STA_KEY_MULTICAST_MSK;
961     -
962     spin_lock_irqsave(&priv->sta_lock, flags);
963    
964     - priv->stations[sta_id].sta.key.key_flags = key_flags;
965     priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
966    
967     for (i = 0; i < 5; i++)
968     diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
969     index c03f1d2..faa0fcf 100644
970     --- a/drivers/scsi/3w-xxxx.c
971     +++ b/drivers/scsi/3w-xxxx.c
972     @@ -6,7 +6,7 @@
973     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
974     Brad Strand <linux@3ware.com>
975    
976     - Copyright (C) 1999-2007 3ware Inc.
977     + Copyright (C) 1999-2009 3ware Inc.
978    
979     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
980     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
981     @@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
982     {
983     dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
984    
985     - scsi_dma_unmap(cmd);
986     + if (cmd->SCp.phase == TW_PHASE_SGLIST)
987     + scsi_dma_unmap(cmd);
988     } /* End tw_unmap_scsi_data() */
989    
990     /* This function will reset a device extension */
991     diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
992     index 0742e68..e938615 100644
993     --- a/drivers/scsi/3w-xxxx.h
994     +++ b/drivers/scsi/3w-xxxx.h
995     @@ -6,7 +6,7 @@
996     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
997     Brad Strand <linux@3ware.com>
998    
999     - Copyright (C) 1999-2007 3ware Inc.
1000     + Copyright (C) 1999-2009 3ware Inc.
1001    
1002     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
1003     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
1004     diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
1005     index 2b7531d..08eefec 100644
1006     --- a/drivers/serial/icom.c
1007     +++ b/drivers/serial/icom.c
1008     @@ -1482,8 +1482,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
1009    
1010     free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
1011     iounmap(icom_adapter->base_addr);
1012     - icom_free_adapter(icom_adapter);
1013     pci_release_regions(icom_adapter->pci_dev);
1014     + icom_free_adapter(icom_adapter);
1015     }
1016    
1017     static void icom_kref_release(struct kref *kref)
1018     diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
1019     index 28d2c8d..28b07ce 100644
1020     --- a/drivers/serial/mpc52xx_uart.c
1021     +++ b/drivers/serial/mpc52xx_uart.c
1022     @@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
1023     pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
1024     co, co->index, options);
1025    
1026     - if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
1027     + if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
1028     pr_debug("PSC%x out of range\n", co->index);
1029     return -EINVAL;
1030     }
1031     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1032     index b3d5a23..7f2909a 100644
1033     --- a/drivers/usb/class/cdc-acm.c
1034     +++ b/drivers/usb/class/cdc-acm.c
1035     @@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
1036     tty->driver_data = acm;
1037     acm->tty = tty;
1038    
1039     - /* force low_latency on so that our tty_push actually forces the data through,
1040     - otherwise it is scheduled, and with high data rates data can get lost. */
1041     - tty->low_latency = 1;
1042     -
1043     if (usb_autopm_get_interface(acm->control) < 0)
1044     goto early_bail;
1045     else
1046     diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
1047     index b899f1a..f14d74f 100644
1048     --- a/drivers/usb/host/isp1760-hcd.c
1049     +++ b/drivers/usb/host/isp1760-hcd.c
1050     @@ -1644,6 +1644,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1051     u32 reg_base, or_reg, skip_reg;
1052     unsigned long flags;
1053     struct ptd ptd;
1054     + packet_enqueue *pe;
1055    
1056     switch (usb_pipetype(urb->pipe)) {
1057     case PIPE_ISOCHRONOUS:
1058     @@ -1655,6 +1656,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1059     reg_base = INT_REGS_OFFSET;
1060     or_reg = HC_INT_IRQ_MASK_OR_REG;
1061     skip_reg = HC_INT_PTD_SKIPMAP_REG;
1062     + pe = enqueue_an_INT_packet;
1063     break;
1064    
1065     default:
1066     @@ -1662,6 +1664,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1067     reg_base = ATL_REGS_OFFSET;
1068     or_reg = HC_ATL_IRQ_MASK_OR_REG;
1069     skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1070     + pe = enqueue_an_ATL_packet;
1071     break;
1072     }
1073    
1074     @@ -1673,6 +1676,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1075     u32 skip_map;
1076     u32 or_map;
1077     struct isp1760_qtd *qtd;
1078     + struct isp1760_qh *qh = ints->qh;
1079    
1080     skip_map = isp1760_readl(hcd->regs + skip_reg);
1081     skip_map |= 1 << i;
1082     @@ -1685,8 +1689,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1083     priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
1084     + i * sizeof(ptd), sizeof(ptd));
1085     qtd = ints->qtd;
1086     -
1087     - clean_up_qtdlist(qtd);
1088     + qtd = clean_up_qtdlist(qtd);
1089    
1090     free_mem(priv, ints->payload);
1091    
1092     @@ -1697,7 +1700,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1093     ints->payload = 0;
1094    
1095     isp1760_urb_done(priv, urb, status);
1096     + if (qtd)
1097     + pe(hcd, qh, qtd);
1098     break;
1099     +
1100     + } else if (ints->qtd) {
1101     + struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
1102     +
1103     + for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
1104     + if (qtd->urb == urb) {
1105     + prev_qtd->hw_next = clean_up_qtdlist(qtd);
1106     + isp1760_urb_done(priv, urb, status);
1107     + break;
1108     + }
1109     + prev_qtd = qtd;
1110     + }
1111     + /* we found the urb before the end of the list */
1112     + if (qtd)
1113     + break;
1114     }
1115     ints++;
1116     }
1117     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1118     index 5daa517..4423875 100644
1119     --- a/drivers/usb/serial/ftdi_sio.c
1120     +++ b/drivers/usb/serial/ftdi_sio.c
1121     @@ -1485,14 +1485,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1122    
1123     remove_sysfs_attrs(port);
1124    
1125     - /* all open ports are closed at this point
1126     - * (by usbserial.c:__serial_close, which calls ftdi_close)
1127     - */
1128     -
1129     - if (priv) {
1130     - usb_set_serial_port_data(port, NULL);
1131     - kref_put(&priv->kref, ftdi_sio_priv_release);
1132     - }
1133     + kref_put(&priv->kref, ftdi_sio_priv_release);
1134    
1135     return 0;
1136     }
1137     diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1138     index 4b36d88..61b7d9e 100644
1139     --- a/drivers/usb/serial/usb-serial.c
1140     +++ b/drivers/usb/serial/usb-serial.c
1141     @@ -978,6 +978,7 @@ int usb_serial_probe(struct usb_interface *interface,
1142     if (retval > 0) {
1143     /* quietly accept this device, but don't bind to a
1144     serial port as it's about to disappear */
1145     + serial->num_ports = 0;
1146     goto exit;
1147     }
1148     }
1149     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1150     index 90909f9..a2bd86e 100644
1151     --- a/fs/ext4/ext4.h
1152     +++ b/fs/ext4/ext4.h
1153     @@ -248,6 +248,30 @@ struct flex_groups {
1154     #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
1155     #define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */
1156    
1157     +/* Flags that should be inherited by new inodes from their parent. */
1158     +#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
1159     + EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
1160     + EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
1161     + EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
1162     + EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
1163     +
1164     +/* Flags that are appropriate for regular files (all but dir-specific ones). */
1165     +#define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL))
1166     +
1167     +/* Flags that are appropriate for non-directories/regular files. */
1168     +#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
1169     +
1170     +/* Mask out flags that are inappropriate for the given type of inode. */
1171     +static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
1172     +{
1173     + if (S_ISDIR(mode))
1174     + return flags;
1175     + else if (S_ISREG(mode))
1176     + return flags & EXT4_REG_FLMASK;
1177     + else
1178     + return flags & EXT4_OTHER_FLMASK;
1179     +}
1180     +
1181     /*
1182     * Inode dynamic state flags
1183     */
1184     @@ -255,6 +279,7 @@ struct flex_groups {
1185     #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
1186     #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
1187     #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
1188     +#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
1189    
1190     /* Used to pass group descriptor data when online resize is done */
1191     struct ext4_new_group_input {
1192     @@ -302,7 +327,9 @@ struct ext4_new_group_data {
1193     #define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
1194     #define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
1195     #define EXT4_IOC_MIGRATE _IO('f', 9)
1196     + /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
1197     /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
1198     +#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
1199    
1200     /*
1201     * ioctl commands in 32 bit emulation
1202     @@ -530,7 +557,7 @@ do { \
1203     #define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */
1204     #define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */
1205     #define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
1206     -#define EXT4_MOUNT_RESERVATION 0x10000 /* Preallocation */
1207     +#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
1208     #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
1209     #define EXT4_MOUNT_NOBH 0x40000 /* No bufferheads */
1210     #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
1211     @@ -1091,6 +1118,7 @@ extern int ext4_can_truncate(struct inode *inode);
1212     extern void ext4_truncate(struct inode *);
1213     extern void ext4_set_inode_flags(struct inode *);
1214     extern void ext4_get_inode_flags(struct ext4_inode_info *);
1215     +extern int ext4_alloc_da_blocks(struct inode *inode);
1216     extern void ext4_set_aops(struct inode *inode);
1217     extern int ext4_writepage_trans_blocks(struct inode *);
1218     extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
1219     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1220     index e0aa4fe..8723d0b 100644
1221     --- a/fs/ext4/extents.c
1222     +++ b/fs/ext4/extents.c
1223     @@ -1740,11 +1740,13 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1224     {
1225     struct ext4_ext_cache *cex;
1226     BUG_ON(len == 0);
1227     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1228     cex = &EXT4_I(inode)->i_cached_extent;
1229     cex->ec_type = type;
1230     cex->ec_block = block;
1231     cex->ec_len = len;
1232     cex->ec_start = start;
1233     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1234     }
1235    
1236     /*
1237     @@ -1801,12 +1803,17 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1238     struct ext4_extent *ex)
1239     {
1240     struct ext4_ext_cache *cex;
1241     + int ret = EXT4_EXT_CACHE_NO;
1242    
1243     + /*
1244     + * We borrow i_block_reservation_lock to protect i_cached_extent
1245     + */
1246     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1247     cex = &EXT4_I(inode)->i_cached_extent;
1248    
1249     /* has cache valid data? */
1250     if (cex->ec_type == EXT4_EXT_CACHE_NO)
1251     - return EXT4_EXT_CACHE_NO;
1252     + goto errout;
1253    
1254     BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1255     cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1256     @@ -1817,11 +1824,11 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1257     ext_debug("%u cached by %u:%u:%llu\n",
1258     block,
1259     cex->ec_block, cex->ec_len, cex->ec_start);
1260     - return cex->ec_type;
1261     + ret = cex->ec_type;
1262     }
1263     -
1264     - /* not in cache */
1265     - return EXT4_EXT_CACHE_NO;
1266     +errout:
1267     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1268     + return ret;
1269     }
1270    
1271     /*
1272     @@ -2776,6 +2783,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1273     if (allocated > max_blocks)
1274     allocated = max_blocks;
1275     set_buffer_unwritten(bh_result);
1276     + bh_result->b_bdev = inode->i_sb->s_bdev;
1277     + bh_result->b_blocknr = newblock;
1278     goto out2;
1279     }
1280    
1281     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1282     index f731cb5..588af8c 100644
1283     --- a/fs/ext4/file.c
1284     +++ b/fs/ext4/file.c
1285     @@ -33,9 +33,14 @@
1286     */
1287     static int ext4_release_file(struct inode *inode, struct file *filp)
1288     {
1289     + if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
1290     + ext4_alloc_da_blocks(inode);
1291     + EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
1292     + }
1293     /* if we are the last writer on the inode, drop the block reservation */
1294     if ((filp->f_mode & FMODE_WRITE) &&
1295     - (atomic_read(&inode->i_writecount) == 1))
1296     + (atomic_read(&inode->i_writecount) == 1) &&
1297     + !EXT4_I(inode)->i_reserved_data_blocks)
1298     {
1299     down_write(&EXT4_I(inode)->i_data_sem);
1300     ext4_discard_preallocations(inode);
1301     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1302     index 2d2b358..345cba1 100644
1303     --- a/fs/ext4/ialloc.c
1304     +++ b/fs/ext4/ialloc.c
1305     @@ -720,11 +720,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
1306     ret2 = find_group_flex(sb, dir, &group);
1307     if (ret2 == -1) {
1308     ret2 = find_group_other(sb, dir, &group);
1309     - if (ret2 == 0 && once)
1310     + if (ret2 == 0 && once) {
1311     once = 0;
1312     printk(KERN_NOTICE "ext4: find_group_flex "
1313     "failed, fallback succeeded dir %lu\n",
1314     dir->i_ino);
1315     + }
1316     }
1317     goto got_group;
1318     }
1319     @@ -885,16 +886,12 @@ got:
1320     ei->i_disksize = 0;
1321    
1322     /*
1323     - * Don't inherit extent flag from directory. We set extent flag on
1324     - * newly created directory and file only if -o extent mount option is
1325     - * specified
1326     + * Don't inherit extent flag from directory, amongst others. We set
1327     + * extent flag on newly created directory and file only if -o extent
1328     + * mount option is specified
1329     */
1330     - ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
1331     - if (S_ISLNK(mode))
1332     - ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
1333     - /* dirsync only applies to directories */
1334     - if (!S_ISDIR(mode))
1335     - ei->i_flags &= ~EXT4_DIRSYNC_FL;
1336     + ei->i_flags =
1337     + ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1338     ei->i_file_acl = 0;
1339     ei->i_dtime = 0;
1340     ei->i_block_group = group;
1341     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1342     index 2c0439d..725f20a 100644
1343     --- a/fs/ext4/inode.c
1344     +++ b/fs/ext4/inode.c
1345     @@ -1036,8 +1036,15 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1346     /* update per-inode reservations */
1347     BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1348     EXT4_I(inode)->i_reserved_data_blocks -= used;
1349     -
1350     spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1351     +
1352     + /*
1353     + * If we have done all the pending block allocations and if
1354     + * there aren't any writers on the inode, we can discard the
1355     + * inode's preallocations.
1356     + */
1357     + if (!total && (atomic_read(&inode->i_writecount) == 0))
1358     + ext4_discard_preallocations(inode);
1359     }
1360    
1361     /*
1362     @@ -1069,6 +1076,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1363     int retval;
1364    
1365     clear_buffer_mapped(bh);
1366     + clear_buffer_unwritten(bh);
1367    
1368     /*
1369     * Try to see if we can get the block without requesting
1370     @@ -1099,6 +1107,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1371     return retval;
1372    
1373     /*
1374     + * When we call get_blocks without the create flag, the
1375     + * BH_Unwritten flag could have gotten set if the blocks
1376     + * requested were part of a uninitialized extent. We need to
1377     + * clear this flag now that we are committed to convert all or
1378     + * part of the uninitialized extent to be an initialized
1379     + * extent. This is because we need to avoid the combination
1380     + * of BH_Unwritten and BH_Mapped flags being simultaneously
1381     + * set on the buffer_head.
1382     + */
1383     + clear_buffer_unwritten(bh);
1384     +
1385     + /*
1386     * New blocks allocate and/or writing to uninitialized extent
1387     * will possibly result in updating i_data, so we take
1388     * the write lock of i_data_sem, and call get_blocks()
1389     @@ -2213,6 +2233,10 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1390     struct buffer_head *bh_result, int create)
1391     {
1392     int ret = 0;
1393     + sector_t invalid_block = ~((sector_t) 0xffff);
1394     +
1395     + if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1396     + invalid_block = ~0;
1397    
1398     BUG_ON(create == 0);
1399     BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1400     @@ -2234,11 +2258,18 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1401     /* not enough space to reserve */
1402     return ret;
1403    
1404     - map_bh(bh_result, inode->i_sb, 0);
1405     + map_bh(bh_result, inode->i_sb, invalid_block);
1406     set_buffer_new(bh_result);
1407     set_buffer_delay(bh_result);
1408     } else if (ret > 0) {
1409     bh_result->b_size = (ret << inode->i_blkbits);
1410     + /*
1411     + * With sub-block writes into unwritten extents
1412     + * we also need to mark the buffer as new so that
1413     + * the unwritten parts of the buffer gets correctly zeroed.
1414     + */
1415     + if (buffer_unwritten(bh_result))
1416     + set_buffer_new(bh_result);
1417     ret = 0;
1418     }
1419    
1420     @@ -2816,6 +2847,48 @@ out:
1421     return;
1422     }
1423    
1424     +/*
1425     + * Force all delayed allocation blocks to be allocated for a given inode.
1426     + */
1427     +int ext4_alloc_da_blocks(struct inode *inode)
1428     +{
1429     + if (!EXT4_I(inode)->i_reserved_data_blocks &&
1430     + !EXT4_I(inode)->i_reserved_meta_blocks)
1431     + return 0;
1432     +
1433     + /*
1434     + * We do something simple for now. The filemap_flush() will
1435     + * also start triggering a write of the data blocks, which is
1436     + * not strictly speaking necessary (and for users of
1437     + * laptop_mode, not even desirable). However, to do otherwise
1438     + * would require replicating code paths in:
1439     + *
1440     + * ext4_da_writepages() ->
1441     + * write_cache_pages() ---> (via passed in callback function)
1442     + * __mpage_da_writepage() -->
1443     + * mpage_add_bh_to_extent()
1444     + * mpage_da_map_blocks()
1445     + *
1446     + * The problem is that write_cache_pages(), located in
1447     + * mm/page-writeback.c, marks pages clean in preparation for
1448     + * doing I/O, which is not desirable if we're not planning on
1449     + * doing I/O at all.
1450     + *
1451     + * We could call write_cache_pages(), and then redirty all of
1452     + * the pages by calling redirty_page_for_writeback() but that
1453     + * would be ugly in the extreme. So instead we would need to
1454     + * replicate parts of the code in the above functions,
1455     + * simplifying them becuase we wouldn't actually intend to
1456     + * write out the pages, but rather only collect contiguous
1457     + * logical block extents, call the multi-block allocator, and
1458     + * then update the buffer heads with the block allocations.
1459     + *
1460     + * For now, though, we'll cheat by calling filemap_flush(),
1461     + * which will map the blocks, and start the I/O, but not
1462     + * actually wait for the I/O to complete.
1463     + */
1464     + return filemap_flush(inode->i_mapping);
1465     +}
1466    
1467     /*
1468     * bmap() is special. It gets used by applications such as lilo and by
1469     @@ -3838,6 +3911,9 @@ void ext4_truncate(struct inode *inode)
1470     if (!ext4_can_truncate(inode))
1471     return;
1472    
1473     + if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
1474     + ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
1475     +
1476     if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1477     ext4_ext_truncate(inode);
1478     return;
1479     @@ -4248,11 +4324,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1480     ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1481     inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
1482     ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
1483     - if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
1484     - cpu_to_le32(EXT4_OS_HURD)) {
1485     + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
1486     ei->i_file_acl |=
1487     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
1488     - }
1489     inode->i_size = ext4_isize(raw_inode);
1490     ei->i_disksize = inode->i_size;
1491     inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1492     @@ -4299,6 +4373,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1493     (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
1494     }
1495    
1496     + if (ei->i_file_acl &&
1497     + ((ei->i_file_acl <
1498     + (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
1499     + EXT4_SB(sb)->s_gdb_count)) ||
1500     + (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
1501     + ext4_error(sb, __func__,
1502     + "bad extended attribute block %llu in inode #%lu",
1503     + ei->i_file_acl, inode->i_ino);
1504     + ret = -EIO;
1505     + goto bad_inode;
1506     + }
1507     +
1508     if (S_ISREG(inode->i_mode)) {
1509     inode->i_op = &ext4_file_inode_operations;
1510     inode->i_fop = &ext4_file_operations;
1511     @@ -4315,7 +4401,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1512     inode->i_op = &ext4_symlink_inode_operations;
1513     ext4_set_aops(inode);
1514     }
1515     - } else {
1516     + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
1517     + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1518     inode->i_op = &ext4_special_inode_operations;
1519     if (raw_inode->i_block[0])
1520     init_special_inode(inode, inode->i_mode,
1521     @@ -4323,6 +4410,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1522     else
1523     init_special_inode(inode, inode->i_mode,
1524     new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1525     + } else {
1526     + brelse(bh);
1527     + ret = -EIO;
1528     + ext4_error(inode->i_sb, __func__,
1529     + "bogus i_mode (%o) for inode=%lu",
1530     + inode->i_mode, inode->i_ino);
1531     + goto bad_inode;
1532     }
1533     brelse(iloc.bh);
1534     ext4_set_inode_flags(inode);
1535     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1536     index 42dc83f..91e75f7 100644
1537     --- a/fs/ext4/ioctl.c
1538     +++ b/fs/ext4/ioctl.c
1539     @@ -48,8 +48,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1540     if (err)
1541     return err;
1542    
1543     - if (!S_ISDIR(inode->i_mode))
1544     - flags &= ~EXT4_DIRSYNC_FL;
1545     + flags = ext4_mask_flags(inode->i_mode, flags);
1546    
1547     err = -EPERM;
1548     mutex_lock(&inode->i_mutex);
1549     @@ -263,6 +262,20 @@ setversion_out:
1550     return err;
1551     }
1552    
1553     + case EXT4_IOC_ALLOC_DA_BLKS:
1554     + {
1555     + int err;
1556     + if (!is_owner_or_cap(inode))
1557     + return -EACCES;
1558     +
1559     + err = mnt_want_write(filp->f_path.mnt);
1560     + if (err)
1561     + return err;
1562     + err = ext4_alloc_da_blocks(inode);
1563     + mnt_drop_write(filp->f_path.mnt);
1564     + return err;
1565     + }
1566     +
1567     default:
1568     return -ENOTTY;
1569     }
1570     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1571     index ba702bd..8977e60 100644
1572     --- a/fs/ext4/namei.c
1573     +++ b/fs/ext4/namei.c
1574     @@ -1052,8 +1052,16 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1575     return ERR_PTR(-EIO);
1576     }
1577     inode = ext4_iget(dir->i_sb, ino);
1578     - if (IS_ERR(inode))
1579     - return ERR_CAST(inode);
1580     + if (unlikely(IS_ERR(inode))) {
1581     + if (PTR_ERR(inode) == -ESTALE) {
1582     + ext4_error(dir->i_sb, __func__,
1583     + "deleted inode referenced: %u",
1584     + ino);
1585     + return ERR_PTR(-EIO);
1586     + } else {
1587     + return ERR_CAST(inode);
1588     + }
1589     + }
1590     }
1591     return d_splice_alias(inode, dentry);
1592     }
1593     @@ -2311,7 +2319,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1594     struct inode *old_inode, *new_inode;
1595     struct buffer_head *old_bh, *new_bh, *dir_bh;
1596     struct ext4_dir_entry_2 *old_de, *new_de;
1597     - int retval;
1598     + int retval, force_da_alloc = 0;
1599    
1600     old_bh = new_bh = dir_bh = NULL;
1601    
1602     @@ -2449,6 +2457,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1603     ext4_mark_inode_dirty(handle, new_inode);
1604     if (!new_inode->i_nlink)
1605     ext4_orphan_add(handle, new_inode);
1606     + if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
1607     + force_da_alloc = 1;
1608     }
1609     retval = 0;
1610    
1611     @@ -2457,6 +2467,8 @@ end_rename:
1612     brelse(old_bh);
1613     brelse(new_bh);
1614     ext4_journal_stop(handle);
1615     + if (retval == 0 && force_da_alloc)
1616     + ext4_alloc_da_blocks(old_inode);
1617     return retval;
1618     }
1619    
1620     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1621     index 39d1993..1ad3c20 100644
1622     --- a/fs/ext4/super.c
1623     +++ b/fs/ext4/super.c
1624     @@ -803,8 +803,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1625     if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
1626     seq_puts(seq, ",noacl");
1627     #endif
1628     - if (!test_opt(sb, RESERVATION))
1629     - seq_puts(seq, ",noreservation");
1630     if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
1631     seq_printf(seq, ",commit=%u",
1632     (unsigned) (sbi->s_commit_interval / HZ));
1633     @@ -855,6 +853,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1634     if (test_opt(sb, DATA_ERR_ABORT))
1635     seq_puts(seq, ",data_err=abort");
1636    
1637     + if (test_opt(sb, NO_AUTO_DA_ALLOC))
1638     + seq_puts(seq, ",auto_da_alloc=0");
1639     +
1640     ext4_show_quota_options(seq, sb);
1641     return 0;
1642     }
1643     @@ -1002,7 +1003,7 @@ enum {
1644     Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1645     Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
1646     Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1647     - Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
1648     + Opt_auto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
1649     Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
1650     Opt_journal_update, Opt_journal_dev,
1651     Opt_journal_checksum, Opt_journal_async_commit,
1652     @@ -1037,8 +1038,6 @@ static const match_table_t tokens = {
1653     {Opt_nouser_xattr, "nouser_xattr"},
1654     {Opt_acl, "acl"},
1655     {Opt_noacl, "noacl"},
1656     - {Opt_reservation, "reservation"},
1657     - {Opt_noreservation, "noreservation"},
1658     {Opt_noload, "noload"},
1659     {Opt_nobh, "nobh"},
1660     {Opt_bh, "bh"},
1661     @@ -1073,6 +1072,7 @@ static const match_table_t tokens = {
1662     {Opt_nodelalloc, "nodelalloc"},
1663     {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1664     {Opt_journal_ioprio, "journal_ioprio=%u"},
1665     + {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1666     {Opt_err, NULL},
1667     };
1668    
1669     @@ -1205,12 +1205,6 @@ static int parse_options(char *options, struct super_block *sb,
1670     "not supported\n");
1671     break;
1672     #endif
1673     - case Opt_reservation:
1674     - set_opt(sbi->s_mount_opt, RESERVATION);
1675     - break;
1676     - case Opt_noreservation:
1677     - clear_opt(sbi->s_mount_opt, RESERVATION);
1678     - break;
1679     case Opt_journal_update:
1680     /* @@@ FIXME */
1681     /* Eventually we will want to be able to create
1682     @@ -1471,6 +1465,14 @@ set_qf_format:
1683     *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
1684     option);
1685     break;
1686     + case Opt_auto_da_alloc:
1687     + if (match_int(&args[0], &option))
1688     + return 0;
1689     + if (option)
1690     + clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
1691     + else
1692     + set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1693     + break;
1694     default:
1695     printk(KERN_ERR
1696     "EXT4-fs: Unrecognized mount option \"%s\" "
1697     @@ -2099,7 +2101,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1698     sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
1699     sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
1700    
1701     - set_opt(sbi->s_mount_opt, RESERVATION);
1702     set_opt(sbi->s_mount_opt, BARRIER);
1703    
1704     /*
1705     diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
1706     index 257ff26..bbe6d59 100644
1707     --- a/fs/jbd2/revoke.c
1708     +++ b/fs/jbd2/revoke.c
1709     @@ -55,6 +55,25 @@
1710     * need do nothing.
1711     * RevokeValid set, Revoked set:
1712     * buffer has been revoked.
1713     + *
1714     + * Locking rules:
1715     + * We keep two hash tables of revoke records. One hashtable belongs to the
1716     + * running transaction (is pointed to by journal->j_revoke), the other one
1717     + * belongs to the committing transaction. Accesses to the second hash table
1718     + * happen only from the kjournald and no other thread touches this table. Also
1719     + * journal_switch_revoke_table() which switches which hashtable belongs to the
1720     + * running and which to the committing transaction is called only from
1721     + * kjournald. Therefore we need no locks when accessing the hashtable belonging
1722     + * to the committing transaction.
1723     + *
1724     + * All users operating on the hash table belonging to the running transaction
1725     + * have a handle to the transaction. Therefore they are safe from kjournald
1726     + * switching hash tables under them. For operations on the lists of entries in
1727     + * the hash table j_revoke_lock is used.
1728     + *
1729     + * Finally, also replay code uses the hash tables but at this moment noone else
1730     + * can touch them (filesystem isn't mounted yet) and hence no locking is
1731     + * needed.
1732     */
1733    
1734     #ifndef __KERNEL__
1735     @@ -401,8 +420,6 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
1736     * the second time we would still have a pending revoke to cancel. So,
1737     * do not trust the Revoked bit on buffers unless RevokeValid is also
1738     * set.
1739     - *
1740     - * The caller must have the journal locked.
1741     */
1742     int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
1743     {
1744     @@ -480,10 +497,7 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
1745     /*
1746     * Write revoke records to the journal for all entries in the current
1747     * revoke hash, deleting the entries as we go.
1748     - *
1749     - * Called with the journal lock held.
1750     */
1751     -
1752     void jbd2_journal_write_revoke_records(journal_t *journal,
1753     transaction_t *transaction)
1754     {
1755     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1756     index 3b2f697..5dd75c0 100644
1757     --- a/fs/nfs/dir.c
1758     +++ b/fs/nfs/dir.c
1759     @@ -1943,7 +1943,8 @@ int nfs_permission(struct inode *inode, int mask)
1760     case S_IFREG:
1761     /* NFSv4 has atomic_open... */
1762     if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
1763     - && (mask & MAY_OPEN))
1764     + && (mask & MAY_OPEN)
1765     + && !(mask & MAY_EXEC))
1766     goto out;
1767     break;
1768     case S_IFDIR:
1769     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
1770     index 98b93ca..1a2b0cb 100644
1771     --- a/include/linux/ptrace.h
1772     +++ b/include/linux/ptrace.h
1773     @@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code);
1774     extern void __ptrace_link(struct task_struct *child,
1775     struct task_struct *new_parent);
1776     extern void __ptrace_unlink(struct task_struct *child);
1777     +extern int __ptrace_detach(struct task_struct *tracer, struct task_struct *p);
1778     extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
1779     #define PTRACE_MODE_READ 1
1780     #define PTRACE_MODE_ATTACH 2
1781     diff --git a/kernel/exit.c b/kernel/exit.c
1782     index 467ffcd..a33f399 100644
1783     --- a/kernel/exit.c
1784     +++ b/kernel/exit.c
1785     @@ -703,22 +703,50 @@ static void exit_mm(struct task_struct * tsk)
1786     }
1787    
1788     /*
1789     - * Return nonzero if @parent's children should reap themselves.
1790     - *
1791     - * Called with write_lock_irq(&tasklist_lock) held.
1792     + * Called with irqs disabled, returns true if childs should reap themselves.
1793     */
1794     -static int ignoring_children(struct task_struct *parent)
1795     +static int ignoring_children(struct sighand_struct *sigh)
1796     {
1797     int ret;
1798     - struct sighand_struct *psig = parent->sighand;
1799     - unsigned long flags;
1800     - spin_lock_irqsave(&psig->siglock, flags);
1801     - ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1802     - (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
1803     - spin_unlock_irqrestore(&psig->siglock, flags);
1804     + spin_lock(&sigh->siglock);
1805     + ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
1806     + (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
1807     + spin_unlock(&sigh->siglock);
1808     return ret;
1809     }
1810    
1811     +/* Returns nonzero if the tracee should be released. */
1812     +int __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
1813     +{
1814     + __ptrace_unlink(p);
1815     +
1816     + if (p->exit_state != EXIT_ZOMBIE)
1817     + return 0;
1818     + /*
1819     + * If it's a zombie, our attachedness prevented normal
1820     + * parent notification or self-reaping. Do notification
1821     + * now if it would have happened earlier. If it should
1822     + * reap itself we return true.
1823     + *
1824     + * If it's our own child, there is no notification to do.
1825     + * But if our normal children self-reap, then this child
1826     + * was prevented by ptrace and we must reap it now.
1827     + */
1828     + if (!task_detached(p) && thread_group_empty(p)) {
1829     + if (!same_thread_group(p->real_parent, tracer))
1830     + do_notify_parent(p, p->exit_signal);
1831     + else if (ignoring_children(tracer->sighand))
1832     + p->exit_signal = -1;
1833     + }
1834     +
1835     + if (!task_detached(p))
1836     + return 0;
1837     +
1838     + /* Mark it as in the process of being reaped. */
1839     + p->exit_state = EXIT_DEAD;
1840     + return 1;
1841     +}
1842     +
1843     /*
1844     * Detach all tasks we were using ptrace on.
1845     * Any that need to be release_task'd are put on the @dead list.
1846     @@ -728,43 +756,10 @@ static int ignoring_children(struct task_struct *parent)
1847     static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
1848     {
1849     struct task_struct *p, *n;
1850     - int ign = -1;
1851    
1852     list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
1853     - __ptrace_unlink(p);
1854     -
1855     - if (p->exit_state != EXIT_ZOMBIE)
1856     - continue;
1857     -
1858     - /*
1859     - * If it's a zombie, our attachedness prevented normal
1860     - * parent notification or self-reaping. Do notification
1861     - * now if it would have happened earlier. If it should
1862     - * reap itself, add it to the @dead list. We can't call
1863     - * release_task() here because we already hold tasklist_lock.
1864     - *
1865     - * If it's our own child, there is no notification to do.
1866     - * But if our normal children self-reap, then this child
1867     - * was prevented by ptrace and we must reap it now.
1868     - */
1869     - if (!task_detached(p) && thread_group_empty(p)) {
1870     - if (!same_thread_group(p->real_parent, parent))
1871     - do_notify_parent(p, p->exit_signal);
1872     - else {
1873     - if (ign < 0)
1874     - ign = ignoring_children(parent);
1875     - if (ign)
1876     - p->exit_signal = -1;
1877     - }
1878     - }
1879     -
1880     - if (task_detached(p)) {
1881     - /*
1882     - * Mark it as in the process of being reaped.
1883     - */
1884     - p->exit_state = EXIT_DEAD;
1885     + if (__ptrace_detach(parent, p))
1886     list_add(&p->ptrace_entry, dead);
1887     - }
1888     }
1889     }
1890    
1891     diff --git a/kernel/futex.c b/kernel/futex.c
1892     index 438701a..2844297 100644
1893     --- a/kernel/futex.c
1894     +++ b/kernel/futex.c
1895     @@ -192,6 +192,7 @@ static void drop_futex_key_refs(union futex_key *key)
1896     * @shared: NULL for a PROCESS_PRIVATE futex,
1897     * &current->mm->mmap_sem for a PROCESS_SHARED futex
1898     * @key: address where result is stored.
1899     + * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
1900     *
1901     * Returns a negative error code or 0
1902     * The key words are stored in *key on success.
1903     @@ -204,7 +205,8 @@ static void drop_futex_key_refs(union futex_key *key)
1904     * For other futexes, it points to &current->mm->mmap_sem and
1905     * caller must have taken the reader lock. but NOT any spinlocks.
1906     */
1907     -static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1908     +static int
1909     +get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
1910     {
1911     unsigned long address = (unsigned long)uaddr;
1912     struct mm_struct *mm = current->mm;
1913     @@ -227,7 +229,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1914     * but access_ok() should be faster than find_vma()
1915     */
1916     if (!fshared) {
1917     - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
1918     + if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
1919     return -EFAULT;
1920     key->private.mm = mm;
1921     key->private.address = address;
1922     @@ -236,7 +238,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1923     }
1924    
1925     again:
1926     - err = get_user_pages_fast(address, 1, 0, &page);
1927     + err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
1928     if (err < 0)
1929     return err;
1930    
1931     @@ -707,7 +709,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
1932     if (!bitset)
1933     return -EINVAL;
1934    
1935     - ret = get_futex_key(uaddr, fshared, &key);
1936     + ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
1937     if (unlikely(ret != 0))
1938     goto out;
1939    
1940     @@ -753,10 +755,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1941     int ret, op_ret, attempt = 0;
1942    
1943     retryfull:
1944     - ret = get_futex_key(uaddr1, fshared, &key1);
1945     + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1946     if (unlikely(ret != 0))
1947     goto out;
1948     - ret = get_futex_key(uaddr2, fshared, &key2);
1949     + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
1950     if (unlikely(ret != 0))
1951     goto out_put_key1;
1952    
1953     @@ -859,10 +861,10 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1954     int ret, drop_count = 0;
1955    
1956     retry:
1957     - ret = get_futex_key(uaddr1, fshared, &key1);
1958     + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1959     if (unlikely(ret != 0))
1960     goto out;
1961     - ret = get_futex_key(uaddr2, fshared, &key2);
1962     + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
1963     if (unlikely(ret != 0))
1964     goto out_put_key1;
1965    
1966     @@ -1181,7 +1183,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1967     q.bitset = bitset;
1968     retry:
1969     q.key = FUTEX_KEY_INIT;
1970     - ret = get_futex_key(uaddr, fshared, &q.key);
1971     + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
1972     if (unlikely(ret != 0))
1973     goto out;
1974    
1975     @@ -1370,7 +1372,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1976     q.pi_state = NULL;
1977     retry:
1978     q.key = FUTEX_KEY_INIT;
1979     - ret = get_futex_key(uaddr, fshared, &q.key);
1980     + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1981     if (unlikely(ret != 0))
1982     goto out;
1983    
1984     @@ -1630,7 +1632,7 @@ retry:
1985     if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1986     return -EPERM;
1987    
1988     - ret = get_futex_key(uaddr, fshared, &key);
1989     + ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
1990     if (unlikely(ret != 0))
1991     goto out;
1992    
1993     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
1994     index dc3b98e..893c2c7 100644
1995     --- a/kernel/ptrace.c
1996     +++ b/kernel/ptrace.c
1997     @@ -235,18 +235,10 @@ out:
1998     return retval;
1999     }
2000    
2001     -static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
2002     -{
2003     - child->exit_code = data;
2004     - /* .. re-parent .. */
2005     - __ptrace_unlink(child);
2006     - /* .. and wake it up. */
2007     - if (child->exit_state != EXIT_ZOMBIE)
2008     - wake_up_process(child);
2009     -}
2010     -
2011     int ptrace_detach(struct task_struct *child, unsigned int data)
2012     {
2013     + int dead = 0;
2014     +
2015     if (!valid_signal(data))
2016     return -EIO;
2017    
2018     @@ -256,10 +248,19 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
2019    
2020     write_lock_irq(&tasklist_lock);
2021     /* protect against de_thread()->release_task() */
2022     - if (child->ptrace)
2023     - __ptrace_detach(child, data);
2024     + if (child->ptrace) {
2025     + child->exit_code = data;
2026     +
2027     + dead = __ptrace_detach(current, child);
2028     +
2029     + if (!child->exit_state)
2030     + wake_up_process(child);
2031     + }
2032     write_unlock_irq(&tasklist_lock);
2033    
2034     + if (unlikely(dead))
2035     + release_task(child);
2036     +
2037     return 0;
2038     }
2039    
2040     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2041     index 107da3d..2a9129b 100644
2042     --- a/mm/hugetlb.c
2043     +++ b/mm/hugetlb.c
2044     @@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
2045     static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
2046     {
2047     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2048     - if (!(vma->vm_flags & VM_SHARED))
2049     + if (!(vma->vm_flags & VM_MAYSHARE))
2050     return (struct resv_map *)(get_vma_private_data(vma) &
2051     ~HPAGE_RESV_MASK);
2052     return NULL;
2053     @@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
2054     static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
2055     {
2056     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2057     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
2058     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
2059    
2060     set_vma_private_data(vma, (get_vma_private_data(vma) &
2061     HPAGE_RESV_MASK) | (unsigned long)map);
2062     @@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
2063     static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
2064     {
2065     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2066     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
2067     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
2068    
2069     set_vma_private_data(vma, get_vma_private_data(vma) | flags);
2070     }
2071     @@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
2072     if (vma->vm_flags & VM_NORESERVE)
2073     return;
2074    
2075     - if (vma->vm_flags & VM_SHARED) {
2076     + if (vma->vm_flags & VM_MAYSHARE) {
2077     /* Shared mappings always use reserves */
2078     h->resv_huge_pages--;
2079     } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
2080     @@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
2081     void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
2082     {
2083     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2084     - if (!(vma->vm_flags & VM_SHARED))
2085     + if (!(vma->vm_flags & VM_MAYSHARE))
2086     vma->vm_private_data = (void *)0;
2087     }
2088    
2089     /* Returns true if the VMA has associated reserve pages */
2090     static int vma_has_reserves(struct vm_area_struct *vma)
2091     {
2092     - if (vma->vm_flags & VM_SHARED)
2093     + if (vma->vm_flags & VM_MAYSHARE)
2094     return 1;
2095     if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2096     return 1;
2097     @@ -924,7 +924,7 @@ static int vma_needs_reservation(struct hstate *h,
2098     struct address_space *mapping = vma->vm_file->f_mapping;
2099     struct inode *inode = mapping->host;
2100    
2101     - if (vma->vm_flags & VM_SHARED) {
2102     + if (vma->vm_flags & VM_MAYSHARE) {
2103     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
2104     return region_chg(&inode->i_mapping->private_list,
2105     idx, idx + 1);
2106     @@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
2107     struct address_space *mapping = vma->vm_file->f_mapping;
2108     struct inode *inode = mapping->host;
2109    
2110     - if (vma->vm_flags & VM_SHARED) {
2111     + if (vma->vm_flags & VM_MAYSHARE) {
2112     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
2113     region_add(&inode->i_mapping->private_list, idx, idx + 1);
2114    
2115     @@ -1893,7 +1893,7 @@ retry_avoidcopy:
2116     * at the time of fork() could consume its reserves on COW instead
2117     * of the full address range.
2118     */
2119     - if (!(vma->vm_flags & VM_SHARED) &&
2120     + if (!(vma->vm_flags & VM_MAYSHARE) &&
2121     is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2122     old_page != pagecache_page)
2123     outside_reserve = 1;
2124     @@ -2000,7 +2000,7 @@ retry:
2125     clear_huge_page(page, address, huge_page_size(h));
2126     __SetPageUptodate(page);
2127    
2128     - if (vma->vm_flags & VM_SHARED) {
2129     + if (vma->vm_flags & VM_MAYSHARE) {
2130     int err;
2131     struct inode *inode = mapping->host;
2132    
2133     @@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2134     goto out_mutex;
2135     }
2136    
2137     - if (!(vma->vm_flags & VM_SHARED))
2138     + if (!(vma->vm_flags & VM_MAYSHARE))
2139     pagecache_page = hugetlbfs_pagecache_page(h,
2140     vma, address);
2141     }
2142     @@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2143     * to reserve the full area even if read-only as mprotect() may be
2144     * called to make the mapping read-write. Assume !vma is a shm mapping
2145     */
2146     - if (!vma || vma->vm_flags & VM_SHARED)
2147     + if (!vma || vma->vm_flags & VM_MAYSHARE)
2148     chg = region_chg(&inode->i_mapping->private_list, from, to);
2149     else {
2150     struct resv_map *resv_map = resv_map_alloc();
2151     @@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2152     * consumed reservations are stored in the map. Hence, nothing
2153     * else has to be done for private mappings here
2154     */
2155     - if (!vma || vma->vm_flags & VM_SHARED)
2156     + if (!vma || vma->vm_flags & VM_MAYSHARE)
2157     region_add(&inode->i_mapping->private_list, from, to);
2158     return 0;
2159     }
2160     diff --git a/mm/slub.c b/mm/slub.c
2161     index 0280eee..5663da0 100644
2162     --- a/mm/slub.c
2163     +++ b/mm/slub.c
2164     @@ -9,6 +9,7 @@
2165     */
2166    
2167     #include <linux/mm.h>
2168     +#include <linux/swap.h> /* struct reclaim_state */
2169     #include <linux/module.h>
2170     #include <linux/bit_spinlock.h>
2171     #include <linux/interrupt.h>
2172     @@ -1175,6 +1176,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
2173    
2174     __ClearPageSlab(page);
2175     reset_page_mapcount(page);
2176     + if (current->reclaim_state)
2177     + current->reclaim_state->reclaimed_slab += pages;
2178     __free_pages(page, order);
2179     }
2180    
2181     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2182     index 1b34135..6b09213 100644
2183     --- a/net/8021q/vlan_dev.c
2184     +++ b/net/8021q/vlan_dev.c
2185     @@ -668,7 +668,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
2186     const struct vlan_dev_info *vlan = vlan_dev_info(dev);
2187     struct net_device *real_dev = vlan->real_dev;
2188    
2189     - if (!real_dev->ethtool_ops->get_settings)
2190     + if (!real_dev->ethtool_ops ||
2191     + !real_dev->ethtool_ops->get_settings)
2192     return -EOPNOTSUPP;
2193    
2194     return real_dev->ethtool_ops->get_settings(real_dev, cmd);
2195     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
2196     index 6549848..c4070ec 100644
2197     --- a/net/core/pktgen.c
2198     +++ b/net/core/pktgen.c
2199     @@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2200     if (pkt_dev->cflows) {
2201     /* let go of the SAs if we have them */
2202     int i = 0;
2203     - for (; i < pkt_dev->nflows; i++){
2204     + for (; i < pkt_dev->cflows; i++) {
2205     struct xfrm_state *x = pkt_dev->flows[i].x;
2206     if (x) {
2207     xfrm_state_put(x);
2208     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2209     index eae6954..f4bbc98 100644
2210     --- a/net/core/skbuff.c
2211     +++ b/net/core/skbuff.c
2212     @@ -2206,7 +2206,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2213     next_skb:
2214     block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2215    
2216     - if (abs_offset < block_limit) {
2217     + if (abs_offset < block_limit && !st->frag_data) {
2218     *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2219     return block_limit - abs_offset;
2220     }
2221     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2222     index 97f7115..4e302d1 100644
2223     --- a/net/ipv4/route.c
2224     +++ b/net/ipv4/route.c
2225     @@ -784,8 +784,8 @@ static void rt_check_expire(void)
2226     {
2227     static unsigned int rover;
2228     unsigned int i = rover, goal;
2229     - struct rtable *rth, **rthp;
2230     - unsigned long length = 0, samples = 0;
2231     + struct rtable *rth, *aux, **rthp;
2232     + unsigned long samples = 0;
2233     unsigned long sum = 0, sum2 = 0;
2234     u64 mult;
2235    
2236     @@ -795,9 +795,9 @@ static void rt_check_expire(void)
2237     goal = (unsigned int)mult;
2238     if (goal > rt_hash_mask)
2239     goal = rt_hash_mask + 1;
2240     - length = 0;
2241     for (; goal > 0; goal--) {
2242     unsigned long tmo = ip_rt_gc_timeout;
2243     + unsigned long length;
2244    
2245     i = (i + 1) & rt_hash_mask;
2246     rthp = &rt_hash_table[i].chain;
2247     @@ -809,8 +809,10 @@ static void rt_check_expire(void)
2248    
2249     if (*rthp == NULL)
2250     continue;
2251     + length = 0;
2252     spin_lock_bh(rt_hash_lock_addr(i));
2253     while ((rth = *rthp) != NULL) {
2254     + prefetch(rth->u.dst.rt_next);
2255     if (rt_is_expired(rth)) {
2256     *rthp = rth->u.dst.rt_next;
2257     rt_free(rth);
2258     @@ -819,33 +821,30 @@ static void rt_check_expire(void)
2259     if (rth->u.dst.expires) {
2260     /* Entry is expired even if it is in use */
2261     if (time_before_eq(jiffies, rth->u.dst.expires)) {
2262     +nofree:
2263     tmo >>= 1;
2264     rthp = &rth->u.dst.rt_next;
2265     /*
2266     - * Only bump our length if the hash
2267     - * inputs on entries n and n+1 are not
2268     - * the same, we only count entries on
2269     + * We only count entries on
2270     * a chain with equal hash inputs once
2271     * so that entries for different QOS
2272     * levels, and other non-hash input
2273     * attributes don't unfairly skew
2274     * the length computation
2275     */
2276     - if ((*rthp == NULL) ||
2277     - !compare_hash_inputs(&(*rthp)->fl,
2278     - &rth->fl))
2279     - length += ONE;
2280     + for (aux = rt_hash_table[i].chain;;) {
2281     + if (aux == rth) {
2282     + length += ONE;
2283     + break;
2284     + }
2285     + if (compare_hash_inputs(&aux->fl, &rth->fl))
2286     + break;
2287     + aux = aux->u.dst.rt_next;
2288     + }
2289     continue;
2290     }
2291     - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
2292     - tmo >>= 1;
2293     - rthp = &rth->u.dst.rt_next;
2294     - if ((*rthp == NULL) ||
2295     - !compare_hash_inputs(&(*rthp)->fl,
2296     - &rth->fl))
2297     - length += ONE;
2298     - continue;
2299     - }
2300     + } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
2301     + goto nofree;
2302    
2303     /* Cleanup aged off entries. */
2304     *rthp = rth->u.dst.rt_next;
2305     @@ -1068,7 +1067,6 @@ out: return 0;
2306     static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
2307     {
2308     struct rtable *rth, **rthp;
2309     - struct rtable *rthi;
2310     unsigned long now;
2311     struct rtable *cand, **candp;
2312     u32 min_score;
2313     @@ -1088,7 +1086,6 @@ restart:
2314     }
2315    
2316     rthp = &rt_hash_table[hash].chain;
2317     - rthi = NULL;
2318    
2319     spin_lock_bh(rt_hash_lock_addr(hash));
2320     while ((rth = *rthp) != NULL) {
2321     @@ -1134,17 +1131,6 @@ restart:
2322     chain_length++;
2323    
2324     rthp = &rth->u.dst.rt_next;
2325     -
2326     - /*
2327     - * check to see if the next entry in the chain
2328     - * contains the same hash input values as rt. If it does
2329     - * This is where we will insert into the list, instead of
2330     - * at the head. This groups entries that differ by aspects not
2331     - * relvant to the hash function together, which we use to adjust
2332     - * our chain length
2333     - */
2334     - if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
2335     - rthi = rth;
2336     }
2337    
2338     if (cand) {
2339     @@ -1205,10 +1191,7 @@ restart:
2340     }
2341     }
2342    
2343     - if (rthi)
2344     - rt->u.dst.rt_next = rthi->u.dst.rt_next;
2345     - else
2346     - rt->u.dst.rt_next = rt_hash_table[hash].chain;
2347     + rt->u.dst.rt_next = rt_hash_table[hash].chain;
2348    
2349     #if RT_CACHE_DEBUG >= 2
2350     if (rt->u.dst.rt_next) {
2351     @@ -1224,10 +1207,7 @@ restart:
2352     * previous writes to rt are comitted to memory
2353     * before making rt visible to other CPUS.
2354     */
2355     - if (rthi)
2356     - rcu_assign_pointer(rthi->u.dst.rt_next, rt);
2357     - else
2358     - rcu_assign_pointer(rt_hash_table[hash].chain, rt);
2359     + rcu_assign_pointer(rt_hash_table[hash].chain, rt);
2360    
2361     spin_unlock_bh(rt_hash_lock_addr(hash));
2362     *rp = rt;
2363     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2364     index 76b148b..b16dc84 100644
2365     --- a/net/ipv4/tcp.c
2366     +++ b/net/ipv4/tcp.c
2367     @@ -1286,6 +1286,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2368     struct task_struct *user_recv = NULL;
2369     int copied_early = 0;
2370     struct sk_buff *skb;
2371     + u32 urg_hole = 0;
2372    
2373     lock_sock(sk);
2374    
2375     @@ -1497,7 +1498,8 @@ do_prequeue:
2376     }
2377     }
2378     }
2379     - if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
2380     + if ((flags & MSG_PEEK) &&
2381     + (peek_seq - copied - urg_hole != tp->copied_seq)) {
2382     if (net_ratelimit())
2383     printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
2384     current->comm, task_pid_nr(current));
2385     @@ -1518,6 +1520,7 @@ do_prequeue:
2386     if (!urg_offset) {
2387     if (!sock_flag(sk, SOCK_URGINLINE)) {
2388     ++*seq;
2389     + urg_hole++;
2390     offset++;
2391     used--;
2392     if (!used)
2393     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2394     index c28976a..fd4317e 100644
2395     --- a/net/ipv4/tcp_input.c
2396     +++ b/net/ipv4/tcp_input.c
2397     @@ -927,6 +927,8 @@ static void tcp_init_metrics(struct sock *sk)
2398     tcp_set_rto(sk);
2399     if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
2400     goto reset;
2401     +
2402     +cwnd:
2403     tp->snd_cwnd = tcp_init_cwnd(tp, dst);
2404     tp->snd_cwnd_stamp = tcp_time_stamp;
2405     return;
2406     @@ -941,6 +943,7 @@ reset:
2407     tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
2408     inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2409     }
2410     + goto cwnd;
2411     }
2412    
2413     static void tcp_update_reordering(struct sock *sk, const int metric,
2414     diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
2415     index 3824990..d9233ec 100644
2416     --- a/net/mac80211/rc80211_minstrel.c
2417     +++ b/net/mac80211/rc80211_minstrel.c
2418     @@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
2419     return NULL;
2420    
2421     for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
2422     - sband = hw->wiphy->bands[hw->conf.channel->band];
2423     - if (sband->n_bitrates > max_rates)
2424     + sband = hw->wiphy->bands[i];
2425     + if (sband && sband->n_bitrates > max_rates)
2426     max_rates = sband->n_bitrates;
2427     }
2428    
2429     diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
2430     index b16801c..8bef9a1 100644
2431     --- a/net/mac80211/rc80211_pid_algo.c
2432     +++ b/net/mac80211/rc80211_pid_algo.c
2433     @@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
2434     struct ieee80211_sta *sta, void *priv_sta)
2435     {
2436     struct rc_pid_sta_info *spinfo = priv_sta;
2437     + struct rc_pid_info *pinfo = priv;
2438     + struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
2439     struct sta_info *si;
2440     + int i, j, tmp;
2441     + bool s;
2442    
2443     /* TODO: This routine should consider using RSSI from previous packets
2444     * as we need to have IEEE 802.1X auth succeed immediately after assoc..
2445     * Until that method is implemented, we will use the lowest supported
2446     * rate as a workaround. */
2447    
2448     + /* Sort the rates. This is optimized for the most common case (i.e.
2449     + * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
2450     + * mapping too. */
2451     + for (i = 0; i < sband->n_bitrates; i++) {
2452     + rinfo[i].index = i;
2453     + rinfo[i].rev_index = i;
2454     + if (RC_PID_FAST_START)
2455     + rinfo[i].diff = 0;
2456     + else
2457     + rinfo[i].diff = i * pinfo->norm_offset;
2458     + }
2459     + for (i = 1; i < sband->n_bitrates; i++) {
2460     + s = 0;
2461     + for (j = 0; j < sband->n_bitrates - i; j++)
2462     + if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
2463     + sband->bitrates[rinfo[j + 1].index].bitrate)) {
2464     + tmp = rinfo[j].index;
2465     + rinfo[j].index = rinfo[j + 1].index;
2466     + rinfo[j + 1].index = tmp;
2467     + rinfo[rinfo[j].index].rev_index = j;
2468     + rinfo[rinfo[j + 1].index].rev_index = j + 1;
2469     + s = 1;
2470     + }
2471     + if (!s)
2472     + break;
2473     + }
2474     +
2475     spinfo->txrate_idx = rate_lowest_index(sband, sta);
2476     /* HACK */
2477     si = container_of(sta, struct sta_info, sta);
2478     @@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
2479     struct rc_pid_info *pinfo;
2480     struct rc_pid_rateinfo *rinfo;
2481     struct ieee80211_supported_band *sband;
2482     - int i, j, tmp;
2483     - bool s;
2484     + int i, max_rates = 0;
2485     #ifdef CONFIG_MAC80211_DEBUGFS
2486     struct rc_pid_debugfs_entries *de;
2487     #endif
2488    
2489     - sband = hw->wiphy->bands[hw->conf.channel->band];
2490     -
2491     pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
2492     if (!pinfo)
2493     return NULL;
2494    
2495     - /* We can safely assume that sband won't change unless we get
2496     - * reinitialized. */
2497     - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
2498     + for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
2499     + sband = hw->wiphy->bands[i];
2500     + if (sband && sband->n_bitrates > max_rates)
2501     + max_rates = sband->n_bitrates;
2502     + }
2503     +
2504     + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
2505     if (!rinfo) {
2506     kfree(pinfo);
2507     return NULL;
2508     @@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
2509     pinfo->rinfo = rinfo;
2510     pinfo->oldrate = 0;
2511    
2512     - /* Sort the rates. This is optimized for the most common case (i.e.
2513     - * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
2514     - * mapping too. */
2515     - for (i = 0; i < sband->n_bitrates; i++) {
2516     - rinfo[i].index = i;
2517     - rinfo[i].rev_index = i;
2518     - if (RC_PID_FAST_START)
2519     - rinfo[i].diff = 0;
2520     - else
2521     - rinfo[i].diff = i * pinfo->norm_offset;
2522     - }
2523     - for (i = 1; i < sband->n_bitrates; i++) {
2524     - s = 0;
2525     - for (j = 0; j < sband->n_bitrates - i; j++)
2526     - if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
2527     - sband->bitrates[rinfo[j + 1].index].bitrate)) {
2528     - tmp = rinfo[j].index;
2529     - rinfo[j].index = rinfo[j + 1].index;
2530     - rinfo[j + 1].index = tmp;
2531     - rinfo[rinfo[j].index].rev_index = j;
2532     - rinfo[rinfo[j + 1].index].rev_index = j + 1;
2533     - s = 1;
2534     - }
2535     - if (!s)
2536     - break;
2537     - }
2538     -
2539     #ifdef CONFIG_MAC80211_DEBUGFS
2540     de = &pinfo->dentries;
2541     de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
2542     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
2543     index 5e75bbf..6c4846a 100644
2544     --- a/net/rose/af_rose.c
2545     +++ b/net/rose/af_rose.c
2546     @@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
2547     unsigned char *asmptr;
2548     int n, size, qbit = 0;
2549    
2550     - /* ROSE empty frame has no meaning : don't send */
2551     - if (len == 0)
2552     - return 0;
2553     -
2554     if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
2555     return -EINVAL;
2556    
2557     @@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
2558     skb_reset_transport_header(skb);
2559     copied = skb->len;
2560    
2561     - /* ROSE empty frame has no meaning : ignore it */
2562     - if (copied == 0) {
2563     - skb_free_datagram(sk, skb);
2564     - return copied;
2565     - }
2566     -
2567     if (copied > size) {
2568     copied = size;
2569     msg->msg_flags |= MSG_TRUNC;
2570     diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
2571     index ec697ce..3b64182 100644
2572     --- a/net/sched/sch_teql.c
2573     +++ b/net/sched/sch_teql.c
2574     @@ -303,6 +303,8 @@ restart:
2575     switch (teql_resolve(skb, skb_res, slave)) {
2576     case 0:
2577     if (__netif_tx_trylock(slave_txq)) {
2578     + unsigned int length = qdisc_pkt_len(skb);
2579     +
2580     if (!netif_tx_queue_stopped(slave_txq) &&
2581     !netif_tx_queue_frozen(slave_txq) &&
2582     slave_ops->ndo_start_xmit(skb, slave) == 0) {
2583     @@ -310,8 +312,7 @@ restart:
2584     master->slaves = NEXT_SLAVE(q);
2585     netif_wake_queue(dev);
2586     master->stats.tx_packets++;
2587     - master->stats.tx_bytes +=
2588     - qdisc_pkt_len(skb);
2589     + master->stats.tx_bytes += length;
2590     return 0;
2591     }
2592     __netif_tx_unlock(slave_txq);
2593     diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
2594     index cb3b4ad..c229de3 100644
2595     --- a/net/wimax/op-msg.c
2596     +++ b/net/wimax/op-msg.c
2597     @@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
2598     }
2599     result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
2600     if (result < 0) {
2601     - dev_err(dev, "no memory to add payload in attribute\n");
2602     + dev_err(dev, "no memory to add payload (msg %p size %zu) in "
2603     + "attribute: %d\n", msg, size, result);
2604     goto error_nla_put;
2605     }
2606     genlmsg_end(skb, genl_msg);
2607     @@ -302,10 +303,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
2608     struct sk_buff *skb;
2609    
2610     skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
2611     - if (skb == NULL)
2612     - goto error_msg_new;
2613     - result = wimax_msg_send(wimax_dev, skb);
2614     -error_msg_new:
2615     + if (IS_ERR(skb))
2616     + result = PTR_ERR(skb);
2617     + else
2618     + result = wimax_msg_send(wimax_dev, skb);
2619     return result;
2620     }
2621     EXPORT_SYMBOL_GPL(wimax_msg);
2622     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2623     index 4f9ff2a..66cebb5 100644
2624     --- a/net/wireless/reg.c
2625     +++ b/net/wireless/reg.c
2626     @@ -1497,6 +1497,13 @@ int regulatory_init(void)
2627     "using static definition\n");
2628     #endif
2629    
2630     + /*
2631     + * This ensures last_request is populated once modules
2632     + * come swinging in and calling regulatory hints and
2633     + * wiphy_apply_custom_regulatory().
2634     + */
2635     + flush_scheduled_work();
2636     +
2637     return 0;
2638     }
2639    
2640     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2641     index 8227172..5f1f865 100644
2642     --- a/net/xfrm/xfrm_state.c
2643     +++ b/net/xfrm/xfrm_state.c
2644     @@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
2645     {
2646     static xfrm_address_t saddr_wildcard = { };
2647     struct net *net = xp_net(pol);
2648     - unsigned int h;
2649     + unsigned int h, h_wildcard;
2650     struct hlist_node *entry;
2651     struct xfrm_state *x, *x0, *to_put;
2652     int acquire_in_progress = 0;
2653     @@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
2654     if (best)
2655     goto found;
2656    
2657     - h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
2658     - hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
2659     + h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
2660     + hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
2661     if (x->props.family == family &&
2662     x->props.reqid == tmpl->reqid &&
2663     !(x->props.flags & XFRM_STATE_WILDRECV) &&
2664     diff --git a/security/keys/request_key.c b/security/keys/request_key.c
2665     index 0e04f72..ab70cab 100644
2666     --- a/security/keys/request_key.c
2667     +++ b/security/keys/request_key.c
2668     @@ -311,7 +311,8 @@ static int construct_alloc_key(struct key_type *type,
2669    
2670     set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
2671    
2672     - down_write(&dest_keyring->sem);
2673     + if (dest_keyring)
2674     + down_write(&dest_keyring->sem);
2675    
2676     /* attach the key to the destination keyring under lock, but we do need
2677     * to do another check just in case someone beat us to it whilst we
2678     @@ -322,10 +323,12 @@ static int construct_alloc_key(struct key_type *type,
2679     if (!IS_ERR(key_ref))
2680     goto key_already_present;
2681    
2682     - __key_link(dest_keyring, key);
2683     + if (dest_keyring)
2684     + __key_link(dest_keyring, key);
2685    
2686     mutex_unlock(&key_construction_mutex);
2687     - up_write(&dest_keyring->sem);
2688     + if (dest_keyring)
2689     + up_write(&dest_keyring->sem);
2690     mutex_unlock(&user->cons_lock);
2691     *_key = key;
2692     kleave(" = 0 [%d]", key_serial(key));
2693     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2694     index 8d24c91..9d62f29 100644
2695     --- a/security/selinux/hooks.c
2696     +++ b/security/selinux/hooks.c
2697     @@ -4648,7 +4648,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
2698     if (err)
2699     return err;
2700    
2701     - if (send_perm != 0)
2702     + if (!send_perm)
2703     return 0;
2704    
2705     err = sel_netport_sid(sk->sk_protocol,
2706     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2707     index 6c26afc..d76fe8f 100644
2708     --- a/sound/pci/hda/patch_realtek.c
2709     +++ b/sound/pci/hda/patch_realtek.c
2710     @@ -11710,6 +11710,7 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
2711     SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
2712     SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
2713     SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA),
2714     + SND_PCI_QUIRK(0x103c, 0x30f1, "HP TX25xx series", ALC268_TOSHIBA),
2715     SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
2716     SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA),
2717     SND_PCI_QUIRK(0x1179, 0xff50, "TOSHIBA A305", ALC268_TOSHIBA),
2718     diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
2719     index 19e3745..ee379cf 100644
2720     --- a/sound/usb/usbaudio.c
2721     +++ b/sound/usb/usbaudio.c
2722     @@ -3375,7 +3375,7 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip,
2723     [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface,
2724     [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface,
2725     [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface,
2726     - [QUIRK_MIDI_RAW] = snd_usb_create_midi_interface,
2727     + [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface,
2728     [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface,
2729     [QUIRK_MIDI_CME] = snd_usb_create_midi_interface,
2730     [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
2731     diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
2732     index 36e4f7a..8e7f789 100644
2733     --- a/sound/usb/usbaudio.h
2734     +++ b/sound/usb/usbaudio.h
2735     @@ -153,7 +153,7 @@ enum quirk_type {
2736     QUIRK_MIDI_YAMAHA,
2737     QUIRK_MIDI_MIDIMAN,
2738     QUIRK_MIDI_NOVATION,
2739     - QUIRK_MIDI_RAW,
2740     + QUIRK_MIDI_FASTLANE,
2741     QUIRK_MIDI_EMAGIC,
2742     QUIRK_MIDI_CME,
2743     QUIRK_MIDI_US122L,
2744     diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
2745     index 26bad37..2fb35cc 100644
2746     --- a/sound/usb/usbmidi.c
2747     +++ b/sound/usb/usbmidi.c
2748     @@ -1778,8 +1778,18 @@ int snd_usb_create_midi_interface(struct snd_usb_audio* chip,
2749     umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
2750     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2751     break;
2752     - case QUIRK_MIDI_RAW:
2753     + case QUIRK_MIDI_FASTLANE:
2754     umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
2755     + /*
2756     + * Interface 1 contains isochronous endpoints, but with the same
2757     + * numbers as in interface 0. Since it is interface 1 that the
2758     + * USB core has most recently seen, these descriptors are now
2759     + * associated with the endpoint numbers. This will foul up our
2760     + * attempts to submit bulk/interrupt URBs to the endpoints in
2761     + * interface 0, so we have to make sure that the USB core looks
2762     + * again at interface 0 by calling usb_set_interface() on it.
2763     + */
2764     + usb_set_interface(umidi->chip->dev, 0, 0);
2765     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2766     break;
2767     case QUIRK_MIDI_EMAGIC:
2768     diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
2769     index 5d8ef09..25162f6 100644
2770     --- a/sound/usb/usbquirks.h
2771     +++ b/sound/usb/usbquirks.h
2772     @@ -1858,7 +1858,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2773     .data = & (const struct snd_usb_audio_quirk[]) {
2774     {
2775     .ifnum = 0,
2776     - .type = QUIRK_MIDI_RAW
2777     + .type = QUIRK_MIDI_FASTLANE
2778     },
2779     {
2780     .ifnum = 1,
2781     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2782     index d85642e..9138d0d 100644
2783     --- a/virt/kvm/kvm_main.c
2784     +++ b/virt/kvm/kvm_main.c
2785     @@ -2315,6 +2315,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
2786     r = -ENOMEM;
2787     goto out_free_0;
2788     }
2789     + cpumask_clear(cpus_hardware_enabled);
2790    
2791     r = kvm_arch_hardware_setup();
2792     if (r < 0)