Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.29-r7/0104-2.6.29.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 857 - (hide annotations) (download)
Tue Jun 30 20:01:52 2009 UTC (14 years, 11 months ago) by niro
File size: 93587 byte(s)
-2.6.29-magellan-r7: updated to linux-2.6.29.5

1 niro 857 diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
2     index cec829b..5c484ae 100644
3     --- a/Documentation/filesystems/ext4.txt
4     +++ b/Documentation/filesystems/ext4.txt
5     @@ -85,7 +85,7 @@ Note: More extensive information for getting started with ext4 can be
6     * extent format more robust in face of on-disk corruption due to magics,
7     * internal redundancy in tree
8     * improved file allocation (multi-block alloc)
9     -* fix 32000 subdirectory limit
10     +* lift 32000 subdirectory limit imposed by i_links_count[1]
11     * nsec timestamps for mtime, atime, ctime, create time
12     * inode version field on disk (NFSv4, Lustre)
13     * reduced e2fsck time via uninit_bg feature
14     @@ -100,6 +100,9 @@ Note: More extensive information for getting started with ext4 can be
15     * efficent new ordered mode in JBD2 and ext4(avoid using buffer head to force
16     the ordering)
17    
18     +[1] Filesystems with a block size of 1k may see a limit imposed by the
19     +directory hash tree having a maximum depth of two.
20     +
21     2.2 Candidate features for future inclusion
22    
23     * Online defrag (patches available but not well tested)
24     diff --git a/Makefile b/Makefile
25     index 1095142..330e8de 100644
26     --- a/Makefile
27     +++ b/Makefile
28     @@ -1,7 +1,7 @@
29     VERSION = 2
30     PATCHLEVEL = 6
31     SUBLEVEL = 29
32     -EXTRAVERSION = .4
33     +EXTRAVERSION = .5
34     NAME = Temporary Tasmanian Devil
35    
36     # *DOCUMENTATION*
37     diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
38     index f900a39..3bc41c3 100644
39     --- a/arch/powerpc/mm/tlb_nohash_low.S
40     +++ b/arch/powerpc/mm/tlb_nohash_low.S
41     @@ -161,6 +161,6 @@ _GLOBAL(_tlbil_va)
42     isync
43     1: wrtee r10
44     blr
45     -#elif
46     +#else
47     #error Unsupported processor type !
48     #endif
49     diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
50     index 0a83bd7..c8f14c1 100644
51     --- a/arch/sparc/kernel/of_device_32.c
52     +++ b/arch/sparc/kernel/of_device_32.c
53     @@ -246,8 +246,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
54    
55     static int of_bus_sbus_match(struct device_node *np)
56     {
57     - return !strcmp(np->name, "sbus") ||
58     - !strcmp(np->name, "sbi");
59     + struct device_node *dp = np;
60     +
61     + while (dp) {
62     + if (!strcmp(dp->name, "sbus") ||
63     + !strcmp(dp->name, "sbi"))
64     + return 1;
65     +
66     + /* Have a look at use_1to1_mapping(). We're trying
67     + * to match SBUS if that's the top-level bus and we
68     + * don't have some intervening real bus that provides
69     + * ranges based translations.
70     + */
71     + if (of_find_property(dp, "ranges", NULL) != NULL)
72     + break;
73     +
74     + dp = dp->parent;
75     + }
76     +
77     + return 0;
78     }
79    
80     static void of_bus_sbus_count_cells(struct device_node *child,
81     diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
82     index b4a12c9..9013c4b 100644
83     --- a/arch/sparc/kernel/of_device_64.c
84     +++ b/arch/sparc/kernel/of_device_64.c
85     @@ -301,8 +301,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
86    
87     static int of_bus_sbus_match(struct device_node *np)
88     {
89     - return !strcmp(np->name, "sbus") ||
90     - !strcmp(np->name, "sbi");
91     + struct device_node *dp = np;
92     +
93     + while (dp) {
94     + if (!strcmp(dp->name, "sbus") ||
95     + !strcmp(dp->name, "sbi"))
96     + return 1;
97     +
98     + /* Have a look at use_1to1_mapping(). We're trying
99     + * to match SBUS if that's the top-level bus and we
100     + * don't have some intervening real bus that provides
101     + * ranges based translations.
102     + */
103     + if (of_find_property(dp, "ranges", NULL) != NULL)
104     + break;
105     +
106     + dp = dp->parent;
107     + }
108     +
109     + return 0;
110     }
111    
112     static void of_bus_sbus_count_cells(struct device_node *child,
113     diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
114     index 79457f6..a35fef7 100644
115     --- a/arch/sparc/kernel/smp_64.c
116     +++ b/arch/sparc/kernel/smp_64.c
117     @@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
118     while (!cpu_isset(cpuid, smp_commenced_mask))
119     rmb();
120    
121     - ipi_call_lock();
122     + ipi_call_lock_irq();
123     cpu_set(cpuid, cpu_online_map);
124     - ipi_call_unlock();
125     + ipi_call_unlock_irq();
126    
127     /* idle thread is expected to have preempt disabled */
128     preempt_disable();
129     diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
130     index a00545f..db3134f 100644
131     --- a/arch/x86/kernel/hpet.c
132     +++ b/arch/x86/kernel/hpet.c
133     @@ -191,27 +191,42 @@ static struct clock_event_device hpet_clockevent = {
134     .rating = 50,
135     };
136    
137     -static void hpet_start_counter(void)
138     +static void hpet_stop_counter(void)
139     {
140     unsigned long cfg = hpet_readl(HPET_CFG);
141     -
142     cfg &= ~HPET_CFG_ENABLE;
143     hpet_writel(cfg, HPET_CFG);
144     +}
145     +
146     +static void hpet_reset_counter(void)
147     +{
148     hpet_writel(0, HPET_COUNTER);
149     hpet_writel(0, HPET_COUNTER + 4);
150     +}
151     +
152     +static void hpet_start_counter(void)
153     +{
154     + unsigned long cfg = hpet_readl(HPET_CFG);
155     cfg |= HPET_CFG_ENABLE;
156     hpet_writel(cfg, HPET_CFG);
157     }
158    
159     +static void hpet_restart_counter(void)
160     +{
161     + hpet_stop_counter();
162     + hpet_reset_counter();
163     + hpet_start_counter();
164     +}
165     +
166     static void hpet_resume_device(void)
167     {
168     force_hpet_resume();
169     }
170    
171     -static void hpet_restart_counter(void)
172     +static void hpet_resume_counter(void)
173     {
174     hpet_resume_device();
175     - hpet_start_counter();
176     + hpet_restart_counter();
177     }
178    
179     static void hpet_enable_legacy_int(void)
180     @@ -264,6 +279,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
181    
182     switch (mode) {
183     case CLOCK_EVT_MODE_PERIODIC:
184     + hpet_stop_counter();
185     delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
186     delta >>= evt->shift;
187     now = hpet_readl(HPET_COUNTER);
188     @@ -274,14 +290,17 @@ static void hpet_set_mode(enum clock_event_mode mode,
189     cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
190     HPET_TN_SETVAL | HPET_TN_32BIT;
191     hpet_writel(cfg, HPET_Tn_CFG(timer));
192     - /*
193     - * The first write after writing TN_SETVAL to the
194     - * config register sets the counter value, the second
195     - * write sets the period.
196     - */
197     hpet_writel(cmp, HPET_Tn_CMP(timer));
198     udelay(1);
199     + /*
200     + * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
201     + * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
202     + * bit is automatically cleared after the first write.
203     + * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
204     + * Publication # 24674)
205     + */
206     hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
207     + hpet_start_counter();
208     break;
209    
210     case CLOCK_EVT_MODE_ONESHOT:
211     @@ -695,7 +714,7 @@ static struct clocksource clocksource_hpet = {
212     .mask = HPET_MASK,
213     .shift = HPET_SHIFT,
214     .flags = CLOCK_SOURCE_IS_CONTINUOUS,
215     - .resume = hpet_restart_counter,
216     + .resume = hpet_resume_counter,
217     #ifdef CONFIG_X86_64
218     .vread = vread_hpet,
219     #endif
220     @@ -707,7 +726,7 @@ static int hpet_clocksource_register(void)
221     cycle_t t1;
222    
223     /* Start the counter */
224     - hpet_start_counter();
225     + hpet_restart_counter();
226    
227     /* Verify whether hpet counter works */
228     t1 = read_hpet();
229     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
230     index 8005da2..2d2affd 100644
231     --- a/arch/x86/kvm/mmu.c
232     +++ b/arch/x86/kvm/mmu.c
233     @@ -2906,8 +2906,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
234    
235     static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
236     {
237     - kvm_x86_ops->tlb_flush(vcpu);
238     - set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
239     + kvm_set_cr3(vcpu, vcpu->arch.cr3);
240     return 1;
241     }
242    
243     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
244     index 92f0457..570184a 100644
245     --- a/arch/x86/kvm/x86.c
246     +++ b/arch/x86/kvm/x86.c
247     @@ -334,6 +334,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
248    
249     void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
250     {
251     + unsigned long old_cr4 = vcpu->arch.cr4;
252     + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
253     +
254     if (cr4 & CR4_RESERVED_BITS) {
255     printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
256     kvm_inject_gp(vcpu, 0);
257     @@ -347,7 +350,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
258     kvm_inject_gp(vcpu, 0);
259     return;
260     }
261     - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
262     + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
263     + && ((cr4 ^ old_cr4) & pdptr_bits)
264     && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
265     printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
266     kvm_inject_gp(vcpu, 0);
267     diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
268     index 8f307d9..f46c340 100644
269     --- a/arch/x86/mm/hugetlbpage.c
270     +++ b/arch/x86/mm/hugetlbpage.c
271     @@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
272     unsigned long sbase = saddr & PUD_MASK;
273     unsigned long s_end = sbase + PUD_SIZE;
274    
275     + /* Allow segments to share if only one is marked locked */
276     + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
277     + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
278     +
279     /*
280     * match the virtual addresses, permission and the alignment of the
281     * page table page.
282     */
283     if (pmd_index(addr) != pmd_index(saddr) ||
284     - vma->vm_flags != svma->vm_flags ||
285     + vm_flags != svm_flags ||
286     sbase < svma->vm_start || svma->vm_end < s_end)
287     return 0;
288    
289     diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
290     index 9136946..d6033b8 100644
291     --- a/arch/x86/pci/mmconfig-shared.c
292     +++ b/arch/x86/pci/mmconfig-shared.c
293     @@ -254,7 +254,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
294     if (!fixmem32)
295     return AE_OK;
296     if ((mcfg_res->start >= fixmem32->address) &&
297     - (mcfg_res->end <= (fixmem32->address +
298     + (mcfg_res->end < (fixmem32->address +
299     fixmem32->address_length))) {
300     mcfg_res->flags = 1;
301     return AE_CTRL_TERMINATE;
302     @@ -271,7 +271,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
303     return AE_OK;
304    
305     if ((mcfg_res->start >= address.minimum) &&
306     - (mcfg_res->end <= (address.minimum + address.address_length))) {
307     + (mcfg_res->end < (address.minimum + address.address_length))) {
308     mcfg_res->flags = 1;
309     return AE_CTRL_TERMINATE;
310     }
311     @@ -297,7 +297,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
312     struct resource mcfg_res;
313    
314     mcfg_res.start = start;
315     - mcfg_res.end = end;
316     + mcfg_res.end = end - 1;
317     mcfg_res.flags = 0;
318    
319     acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
320     diff --git a/crypto/api.c b/crypto/api.c
321     index 38a2bc0..e7aa72d 100644
322     --- a/crypto/api.c
323     +++ b/crypto/api.c
324     @@ -221,7 +221,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
325    
326     request_module(name);
327    
328     - if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
329     + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
330     + CRYPTO_ALG_NEED_FALLBACK) &&
331     snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
332     request_module(tmp);
333    
334     diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
335     index 7bc22a4..61f6b7a 100644
336     --- a/drivers/acpi/processor_idle.c
337     +++ b/drivers/acpi/processor_idle.c
338     @@ -212,6 +212,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
339     struct acpi_processor_power *pwr = &pr->power;
340     u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
341    
342     + if (boot_cpu_has(X86_FEATURE_AMDC1E))
343     + type = ACPI_STATE_C1;
344     +
345     /*
346     * Check, if one of the previous states already marked the lapic
347     * unstable
348     @@ -648,6 +651,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
349     switch (cx->type) {
350     case ACPI_STATE_C1:
351     cx->valid = 1;
352     + acpi_timer_check_state(i, pr, cx);
353     break;
354    
355     case ACPI_STATE_C2:
356     @@ -866,17 +870,19 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
357    
358     /* Do not access any ACPI IO ports in suspend path */
359     if (acpi_idle_suspend) {
360     - acpi_safe_halt();
361     local_irq_enable();
362     + cpu_relax();
363     return 0;
364     }
365    
366     + acpi_state_timer_broadcast(pr, cx, 1);
367     t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
368     acpi_idle_do_entry(cx);
369     t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
370    
371     local_irq_enable();
372     cx->usage++;
373     + acpi_state_timer_broadcast(pr, cx, 0);
374    
375     return ticks_elapsed_in_us(t1, t2);
376     }
377     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
378     index 8f90508..6971a12 100644
379     --- a/drivers/block/xen-blkfront.c
380     +++ b/drivers/block/xen-blkfront.c
381     @@ -977,8 +977,10 @@ static void backend_changed(struct xenbus_device *dev,
382     break;
383    
384     case XenbusStateClosing:
385     - if (info->gd == NULL)
386     - xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
387     + if (info->gd == NULL) {
388     + xenbus_frontend_closed(dev);
389     + break;
390     + }
391     bd = bdget_disk(info->gd, 0);
392     if (bd == NULL)
393     xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
394     diff --git a/drivers/char/random.c b/drivers/char/random.c
395     index 7c13581..1801245 100644
396     --- a/drivers/char/random.c
397     +++ b/drivers/char/random.c
398     @@ -1660,15 +1660,20 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
399     * value is not cryptographically secure but for several uses the cost of
400     * depleting entropy is too high
401     */
402     +DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
403     unsigned int get_random_int(void)
404     {
405     - /*
406     - * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
407     - * every second, from the entropy pool (and thus creates a limited
408     - * drain on it), and uses halfMD4Transform within the second. We
409     - * also mix it with jiffies and the PID:
410     - */
411     - return secure_ip_id((__force __be32)(current->pid + jiffies));
412     + struct keydata *keyptr;
413     + __u32 *hash = get_cpu_var(get_random_int_hash);
414     + int ret;
415     +
416     + keyptr = get_keyptr();
417     + hash[0] += current->pid + jiffies + get_cycles();
418     +
419     + ret = half_md4_transform(hash, keyptr->secret);
420     + put_cpu_var(get_random_int_hash);
421     +
422     + return ret;
423     }
424    
425     /*
426     diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
427     index ed306eb..0c2f55a 100644
428     --- a/drivers/char/tpm/tpm_bios.c
429     +++ b/drivers/char/tpm/tpm_bios.c
430     @@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
431     unsigned char * event_entry)
432     {
433     const char *name = "";
434     - char data[40] = "";
435     + /* 41 so there is room for 40 data and 1 nul */
436     + char data[41] = "";
437     int i, n_len = 0, d_len = 0;
438     struct tcpa_pc_event *pc_event;
439    
440     diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
441     index 3f0fdd1..856b3cc 100644
442     --- a/drivers/crypto/padlock-aes.c
443     +++ b/drivers/crypto/padlock-aes.c
444     @@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
445     MODULE_LICENSE("GPL");
446     MODULE_AUTHOR("Michal Ludvig");
447    
448     -MODULE_ALIAS("aes-all");
449     +MODULE_ALIAS("aes");
450     diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
451     index c31afbd..c41f339 100644
452     --- a/drivers/gpu/drm/r128/r128_cce.c
453     +++ b/drivers/gpu/drm/r128/r128_cce.c
454     @@ -511,9 +511,9 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
455    
456     #if __OS_HAS_AGP
457     if (!dev_priv->is_pci) {
458     - drm_core_ioremap(dev_priv->cce_ring, dev);
459     - drm_core_ioremap(dev_priv->ring_rptr, dev);
460     - drm_core_ioremap(dev->agp_buffer_map, dev);
461     + drm_core_ioremap_wc(dev_priv->cce_ring, dev);
462     + drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
463     + drm_core_ioremap_wc(dev->agp_buffer_map, dev);
464     if (!dev_priv->cce_ring->handle ||
465     !dev_priv->ring_rptr->handle ||
466     !dev->agp_buffer_map->handle) {
467     diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
468     index f0a0f72..5e60131 100644
469     --- a/drivers/hid/usbhid/hid-core.c
470     +++ b/drivers/hid/usbhid/hid-core.c
471     @@ -765,7 +765,7 @@ static int usbhid_parse(struct hid_device *hid)
472     goto err;
473     }
474    
475     - hid->quirks = quirks;
476     + hid->quirks |= quirks;
477    
478     return 0;
479     err:
480     diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
481     index b5e3b28..a1787fd 100644
482     --- a/drivers/hwmon/lm78.c
483     +++ b/drivers/hwmon/lm78.c
484     @@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
485     .name = "lm78",
486     },
487     .probe = lm78_isa_probe,
488     - .remove = lm78_isa_remove,
489     + .remove = __devexit_p(lm78_isa_remove),
490     };
491    
492    
493     diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
494     index af70777..ff8229c 100644
495     --- a/drivers/ide/ide-io.c
496     +++ b/drivers/ide/ide-io.c
497     @@ -967,7 +967,7 @@ void ide_timer_expiry (unsigned long data)
498     }
499     spin_lock_irq(&hwif->lock);
500     enable_irq(hwif->irq);
501     - if (startstop == ide_stopped) {
502     + if (startstop == ide_stopped && hwif->polling == 0) {
503     ide_unlock_port(hwif);
504     plug_device = 1;
505     }
506     @@ -1145,7 +1145,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
507     * same irq as is currently being serviced here, and Linux
508     * won't allow another of the same (on any CPU) until we return.
509     */
510     - if (startstop == ide_stopped) {
511     + if (startstop == ide_stopped && hwif->polling == 0) {
512     BUG_ON(hwif->handler);
513     ide_unlock_port(hwif);
514     plug_device = 1;
515     diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
516     index b1892bd..5defb72 100644
517     --- a/drivers/ide/ide-iops.c
518     +++ b/drivers/ide/ide-iops.c
519     @@ -587,8 +587,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
520    
521     /*
522     * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
523     - * We list them here and depend on the device side cable detection for them.
524     - *
525     * Some optical devices with the buggy firmwares have the same problem.
526     */
527     static const struct drive_list_entry ivb_list[] = {
528     @@ -632,10 +630,25 @@ u8 eighty_ninty_three (ide_drive_t *drive)
529     * - force bit13 (80c cable present) check also for !ivb devices
530     * (unless the slave device is pre-ATA3)
531     */
532     - if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
533     - (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
534     + if (id[ATA_ID_HW_CONFIG] & 0x4000)
535     return 1;
536    
537     + if (ivb) {
538     + const char *model = (char *)&id[ATA_ID_PROD];
539     +
540     + if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
541     + /*
542     + * These ATAPI devices always report 80c cable
543     + * so we have to depend on the host in this case.
544     + */
545     + if (hwif->cbl == ATA_CBL_PATA80)
546     + return 1;
547     + } else {
548     + /* Depend on the device side cable detection. */
549     + if (id[ATA_ID_HW_CONFIG] & 0x2000)
550     + return 1;
551     + }
552     + }
553     no_80w:
554     if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
555     return 0;
556     diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
557     index bddae2b..515fd4e 100644
558     --- a/drivers/ide/ide-pci-generic.c
559     +++ b/drivers/ide/ide-pci-generic.c
560     @@ -35,6 +35,16 @@ MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE st
561    
562     #define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
563    
564     +static void netcell_quirkproc(ide_drive_t *drive)
565     +{
566     + /* mark words 85-87 as valid */
567     + drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
568     +}
569     +
570     +static const struct ide_port_ops netcell_port_ops = {
571     + .quirkproc = netcell_quirkproc,
572     +};
573     +
574     #define DECLARE_GENERIC_PCI_DEV(extra_flags) \
575     { \
576     .name = DRV_NAME, \
577     @@ -76,6 +86,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
578    
579     { /* 6: Revolution */
580     .name = DRV_NAME,
581     + .port_ops = &netcell_port_ops,
582     .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
583     IDE_HFLAG_TRUST_BIOS_FOR_DMA |
584     IDE_HFLAG_OFF_BOARD,
585     diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
586     index 4e6181c..af7860c 100644
587     --- a/drivers/ide/ide-tape.c
588     +++ b/drivers/ide/ide-tape.c
589     @@ -654,12 +654,6 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
590     {
591     idetape_tape_t *tape = drive->driver_data;
592    
593     - if (drive->pc->c[0] == REQUEST_SENSE &&
594     - pc->c[0] == REQUEST_SENSE) {
595     - printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
596     - "Two request sense in serial were issued\n");
597     - }
598     -
599     if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
600     tape->failed_pc = pc;
601    
602     diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
603     index 6d9f810..635c787 100644
604     --- a/drivers/mtd/devices/mtd_dataflash.c
605     +++ b/drivers/mtd/devices/mtd_dataflash.c
606     @@ -184,7 +184,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
607     /* Calculate flash page address; use block erase (for speed) if
608     * we're at a block boundary and need to erase the whole block.
609     */
610     - pageaddr = div_u64(instr->len, priv->page_size);
611     + pageaddr = div_u64(instr->addr, priv->page_size);
612     do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
613     pageaddr = pageaddr << priv->page_offset;
614    
615     diff --git a/drivers/net/Makefile b/drivers/net/Makefile
616     index 471baaf..ac7b12c 100644
617     --- a/drivers/net/Makefile
618     +++ b/drivers/net/Makefile
619     @@ -100,7 +100,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
620     obj-$(CONFIG_NET) += Space.o loopback.o
621     obj-$(CONFIG_SEEQ8005) += seeq8005.o
622     obj-$(CONFIG_NET_SB1000) += sb1000.o
623     -obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
624     +obj-$(CONFIG_MAC8390) += mac8390.o
625     obj-$(CONFIG_APNE) += apne.o 8390.o
626     obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
627     obj-$(CONFIG_HP100) += hp100.o
628     diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
629     index 6b6530f..a7e688a 100644
630     --- a/drivers/net/bnx2.c
631     +++ b/drivers/net/bnx2.c
632     @@ -2585,6 +2585,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
633     /* Tell compiler that status block fields can change. */
634     barrier();
635     cons = *bnapi->hw_tx_cons_ptr;
636     + barrier();
637     if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
638     cons++;
639     return cons;
640     @@ -2864,6 +2865,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
641     /* Tell compiler that status block fields can change. */
642     barrier();
643     cons = *bnapi->hw_rx_cons_ptr;
644     + barrier();
645     if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
646     cons++;
647     return cons;
648     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
649     index 27fb7f5..77da4c5 100644
650     --- a/drivers/net/bonding/bond_alb.c
651     +++ b/drivers/net/bonding/bond_alb.c
652     @@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
653    
654     if (arp->op_code == htons(ARPOP_REPLY)) {
655     /* update rx hash table for this ARP */
656     - printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
657     - bond_dev->name);
658     bond = netdev_priv(bond_dev);
659     rlb_update_entry_from_arp(bond, arp);
660     pr_debug("Server received an ARP Reply from client\n");
661     @@ -1739,9 +1737,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
662     }
663     }
664    
665     - write_unlock_bh(&bond->curr_slave_lock);
666     - read_unlock(&bond->lock);
667     -
668     if (swap_slave) {
669     alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
670     alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
671     @@ -1749,16 +1744,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
672     alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
673     bond->alb_info.rlb_enabled);
674    
675     + read_lock(&bond->lock);
676     alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
677     if (bond->alb_info.rlb_enabled) {
678     /* inform clients mac address has changed */
679     rlb_req_update_slave_clients(bond, bond->curr_active_slave);
680     }
681     + read_unlock(&bond->lock);
682     }
683    
684     - read_lock(&bond->lock);
685     - write_lock_bh(&bond->curr_slave_lock);
686     -
687     return 0;
688     }
689    
690     diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
691     index d436e27..df6459c 100644
692     --- a/drivers/net/e1000/e1000_main.c
693     +++ b/drivers/net/e1000/e1000_main.c
694     @@ -4009,8 +4009,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
695     PCI_DMA_FROMDEVICE);
696    
697     length = le16_to_cpu(rx_desc->length);
698     -
699     - if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
700     + /* !EOP means multiple descriptors were used to store a single
701     + * packet, also make sure the frame isn't just CRC only */
702     + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
703     /* All receives must fit into a single buffer */
704     E1000_DBG("%s: Receive packet consumed multiple"
705     " buffers\n", netdev->name);
706     diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
707     index 3c831f1..44ceb36 100644
708     --- a/drivers/net/igb/igb_ethtool.c
709     +++ b/drivers/net/igb/igb_ethtool.c
710     @@ -2024,6 +2024,10 @@ static struct ethtool_ops igb_ethtool_ops = {
711     .get_ethtool_stats = igb_get_ethtool_stats,
712     .get_coalesce = igb_get_coalesce,
713     .set_coalesce = igb_set_coalesce,
714     + .get_flags = ethtool_op_get_flags,
715     +#ifdef CONFIG_IGB_LRO
716     + .set_flags = ethtool_op_set_flags,
717     +#endif
718     };
719    
720     void igb_set_ethtool_ops(struct net_device *netdev)
721     diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
722     index 8e88486..f26667d 100644
723     --- a/drivers/net/mac8390.c
724     +++ b/drivers/net/mac8390.c
725     @@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
726     if (!MACH_IS_MAC)
727     return ERR_PTR(-ENODEV);
728    
729     - dev = alloc_ei_netdev();
730     + dev = ____alloc_ei_netdev(0);
731     if (!dev)
732     return ERR_PTR(-ENOMEM);
733    
734     @@ -481,10 +481,10 @@ void cleanup_module(void)
735     static const struct net_device_ops mac8390_netdev_ops = {
736     .ndo_open = mac8390_open,
737     .ndo_stop = mac8390_close,
738     - .ndo_start_xmit = ei_start_xmit,
739     - .ndo_tx_timeout = ei_tx_timeout,
740     - .ndo_get_stats = ei_get_stats,
741     - .ndo_set_multicast_list = ei_set_multicast_list,
742     + .ndo_start_xmit = __ei_start_xmit,
743     + .ndo_tx_timeout = __ei_tx_timeout,
744     + .ndo_get_stats = __ei_get_stats,
745     + .ndo_set_multicast_list = __ei_set_multicast_list,
746     .ndo_validate_addr = eth_validate_addr,
747     .ndo_set_mac_address = eth_mac_addr,
748     .ndo_change_mtu = eth_change_mtu,
749     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
750     index 7e24b50..f5824af 100644
751     --- a/drivers/net/macvlan.c
752     +++ b/drivers/net/macvlan.c
753     @@ -329,7 +329,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
754     const struct macvlan_dev *vlan = netdev_priv(dev);
755     struct net_device *lowerdev = vlan->lowerdev;
756    
757     - if (lowerdev->ethtool_ops->get_rx_csum == NULL)
758     + if (lowerdev->ethtool_ops == NULL ||
759     + lowerdev->ethtool_ops->get_rx_csum == NULL)
760     return 0;
761     return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
762     }
763     @@ -340,7 +341,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
764     const struct macvlan_dev *vlan = netdev_priv(dev);
765     struct net_device *lowerdev = vlan->lowerdev;
766    
767     - if (!lowerdev->ethtool_ops->get_settings)
768     + if (!lowerdev->ethtool_ops ||
769     + !lowerdev->ethtool_ops->get_settings)
770     return -EOPNOTSUPP;
771    
772     return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
773     @@ -351,7 +353,8 @@ static u32 macvlan_ethtool_get_flags(struct net_device *dev)
774     const struct macvlan_dev *vlan = netdev_priv(dev);
775     struct net_device *lowerdev = vlan->lowerdev;
776    
777     - if (!lowerdev->ethtool_ops->get_flags)
778     + if (!lowerdev->ethtool_ops ||
779     + !lowerdev->ethtool_ops->get_flags)
780     return 0;
781     return lowerdev->ethtool_ops->get_flags(lowerdev);
782     }
783     diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
784     index e9c1296..53ff238 100644
785     --- a/drivers/net/myri10ge/myri10ge.c
786     +++ b/drivers/net/myri10ge/myri10ge.c
787     @@ -2446,6 +2446,7 @@ static int myri10ge_open(struct net_device *dev)
788     lro_mgr->lro_arr = ss->rx_done.lro_desc;
789     lro_mgr->get_frag_header = myri10ge_get_frag_header;
790     lro_mgr->max_aggr = myri10ge_lro_max_pkts;
791     + lro_mgr->frag_align_pad = 2;
792     if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
793     lro_mgr->max_aggr = MAX_SKB_FRAGS;
794    
795     diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
796     index 9201e5a..0b8ec7f 100644
797     --- a/drivers/net/r8169.c
798     +++ b/drivers/net/r8169.c
799     @@ -3554,54 +3554,64 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
800     int handled = 0;
801     int status;
802    
803     + /* loop handling interrupts until we have no new ones or
804     + * we hit a invalid/hotplug case.
805     + */
806     status = RTL_R16(IntrStatus);
807     + while (status && status != 0xffff) {
808     + handled = 1;
809    
810     - /* hotplug/major error/no more work/shared irq */
811     - if ((status == 0xffff) || !status)
812     - goto out;
813     -
814     - handled = 1;
815     + /* Handle all of the error cases first. These will reset
816     + * the chip, so just exit the loop.
817     + */
818     + if (unlikely(!netif_running(dev))) {
819     + rtl8169_asic_down(ioaddr);
820     + break;
821     + }
822    
823     - if (unlikely(!netif_running(dev))) {
824     - rtl8169_asic_down(ioaddr);
825     - goto out;
826     - }
827     + /* Work around for rx fifo overflow */
828     + if (unlikely(status & RxFIFOOver) &&
829     + (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
830     + netif_stop_queue(dev);
831     + rtl8169_tx_timeout(dev);
832     + break;
833     + }
834    
835     - status &= tp->intr_mask;
836     - RTL_W16(IntrStatus,
837     - (status & RxFIFOOver) ? (status | RxOverflow) : status);
838     + if (unlikely(status & SYSErr)) {
839     + rtl8169_pcierr_interrupt(dev);
840     + break;
841     + }
842    
843     - if (!(status & tp->intr_event))
844     - goto out;
845     + if (status & LinkChg)
846     + rtl8169_check_link_status(dev, tp, ioaddr);
847    
848     - /* Work around for rx fifo overflow */
849     - if (unlikely(status & RxFIFOOver) &&
850     - (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
851     - netif_stop_queue(dev);
852     - rtl8169_tx_timeout(dev);
853     - goto out;
854     - }
855     + /* We need to see the lastest version of tp->intr_mask to
856     + * avoid ignoring an MSI interrupt and having to wait for
857     + * another event which may never come.
858     + */
859     + smp_rmb();
860     + if (status & tp->intr_mask & tp->napi_event) {
861     + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
862     + tp->intr_mask = ~tp->napi_event;
863     +
864     + if (likely(netif_rx_schedule_prep(&tp->napi)))
865     + __netif_rx_schedule(&tp->napi);
866     + else if (netif_msg_intr(tp)) {
867     + printk(KERN_INFO "%s: interrupt %04x in poll\n",
868     + dev->name, status);
869     + }
870     + }
871    
872     - if (unlikely(status & SYSErr)) {
873     - rtl8169_pcierr_interrupt(dev);
874     - goto out;
875     + /* We only get a new MSI interrupt when all active irq
876     + * sources on the chip have been acknowledged. So, ack
877     + * everything we've seen and check if new sources have become
878     + * active to avoid blocking all interrupts from the chip.
879     + */
880     + RTL_W16(IntrStatus,
881     + (status & RxFIFOOver) ? (status | RxOverflow) : status);
882     + status = RTL_R16(IntrStatus);
883     }
884    
885     - if (status & LinkChg)
886     - rtl8169_check_link_status(dev, tp, ioaddr);
887     -
888     - if (status & tp->napi_event) {
889     - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
890     - tp->intr_mask = ~tp->napi_event;
891     -
892     - if (likely(netif_rx_schedule_prep(&tp->napi)))
893     - __netif_rx_schedule(&tp->napi);
894     - else if (netif_msg_intr(tp)) {
895     - printk(KERN_INFO "%s: interrupt %04x in poll\n",
896     - dev->name, status);
897     - }
898     - }
899     -out:
900     return IRQ_RETVAL(handled);
901     }
902    
903     @@ -3617,13 +3627,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
904    
905     if (work_done < budget) {
906     netif_rx_complete(napi);
907     - tp->intr_mask = 0xffff;
908     - /*
909     - * 20040426: the barrier is not strictly required but the
910     - * behavior of the irq handler could be less predictable
911     - * without it. Btw, the lack of flush for the posted pci
912     - * write is safe - FR
913     +
914     + /* We need for force the visibility of tp->intr_mask
915     + * for other CPUs, as we can loose an MSI interrupt
916     + * and potentially wait for a retransmit timeout if we don't.
917     + * The posted write to IntrMask is safe, as it will
918     + * eventually make it to the chip and we won't loose anything
919     + * until it does.
920     */
921     + tp->intr_mask = 0xffff;
922     smp_wmb();
923     RTL_W16(IntrMask, tp->intr_event);
924     }
925     diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
926     index 70a8b21..ab621b0 100644
927     --- a/drivers/net/wireless/iwlwifi/iwl-sta.c
928     +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
929     @@ -708,6 +708,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
930     {
931     unsigned long flags;
932     int ret = 0;
933     + __le16 key_flags = 0;
934     +
935     + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
936     + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
937     + key_flags &= ~STA_KEY_FLG_INVALID;
938     +
939     + if (sta_id == priv->hw_params.bcast_sta_id)
940     + key_flags |= STA_KEY_MULTICAST_MSK;
941    
942     keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
943     keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
944     @@ -727,6 +735,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
945     WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
946     "no space for new kew");
947    
948     + priv->stations[sta_id].sta.key.key_flags = key_flags;
949     +
950     +
951     /* This copy is acutally not needed: we get the key with each TX */
952     memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
953    
954     @@ -743,9 +754,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
955     {
956     u8 sta_id = IWL_INVALID_STATION;
957     unsigned long flags;
958     - __le16 key_flags = 0;
959     int i;
960     - DECLARE_MAC_BUF(mac);
961    
962     sta_id = iwl_find_station(priv, addr);
963     if (sta_id == IWL_INVALID_STATION) {
964     @@ -760,16 +769,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
965     return;
966     }
967    
968     - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
969     - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
970     - key_flags &= ~STA_KEY_FLG_INVALID;
971     -
972     - if (sta_id == priv->hw_params.bcast_sta_id)
973     - key_flags |= STA_KEY_MULTICAST_MSK;
974     -
975     spin_lock_irqsave(&priv->sta_lock, flags);
976    
977     - priv->stations[sta_id].sta.key.key_flags = key_flags;
978     priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
979    
980     for (i = 0; i < 5; i++)
981     diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
982     index c03f1d2..faa0fcf 100644
983     --- a/drivers/scsi/3w-xxxx.c
984     +++ b/drivers/scsi/3w-xxxx.c
985     @@ -6,7 +6,7 @@
986     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
987     Brad Strand <linux@3ware.com>
988    
989     - Copyright (C) 1999-2007 3ware Inc.
990     + Copyright (C) 1999-2009 3ware Inc.
991    
992     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
993     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
994     @@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
995     {
996     dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
997    
998     - scsi_dma_unmap(cmd);
999     + if (cmd->SCp.phase == TW_PHASE_SGLIST)
1000     + scsi_dma_unmap(cmd);
1001     } /* End tw_unmap_scsi_data() */
1002    
1003     /* This function will reset a device extension */
1004     diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
1005     index 0742e68..e938615 100644
1006     --- a/drivers/scsi/3w-xxxx.h
1007     +++ b/drivers/scsi/3w-xxxx.h
1008     @@ -6,7 +6,7 @@
1009     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
1010     Brad Strand <linux@3ware.com>
1011    
1012     - Copyright (C) 1999-2007 3ware Inc.
1013     + Copyright (C) 1999-2009 3ware Inc.
1014    
1015     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
1016     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
1017     diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
1018     index 2b7531d..08eefec 100644
1019     --- a/drivers/serial/icom.c
1020     +++ b/drivers/serial/icom.c
1021     @@ -1482,8 +1482,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
1022    
1023     free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
1024     iounmap(icom_adapter->base_addr);
1025     - icom_free_adapter(icom_adapter);
1026     pci_release_regions(icom_adapter->pci_dev);
1027     + icom_free_adapter(icom_adapter);
1028     }
1029    
1030     static void icom_kref_release(struct kref *kref)
1031     diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
1032     index 28d2c8d..28b07ce 100644
1033     --- a/drivers/serial/mpc52xx_uart.c
1034     +++ b/drivers/serial/mpc52xx_uart.c
1035     @@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
1036     pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
1037     co, co->index, options);
1038    
1039     - if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
1040     + if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
1041     pr_debug("PSC%x out of range\n", co->index);
1042     return -EINVAL;
1043     }
1044     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1045     index b3d5a23..7f2909a 100644
1046     --- a/drivers/usb/class/cdc-acm.c
1047     +++ b/drivers/usb/class/cdc-acm.c
1048     @@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
1049     tty->driver_data = acm;
1050     acm->tty = tty;
1051    
1052     - /* force low_latency on so that our tty_push actually forces the data through,
1053     - otherwise it is scheduled, and with high data rates data can get lost. */
1054     - tty->low_latency = 1;
1055     -
1056     if (usb_autopm_get_interface(acm->control) < 0)
1057     goto early_bail;
1058     else
1059     diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
1060     index b899f1a..f14d74f 100644
1061     --- a/drivers/usb/host/isp1760-hcd.c
1062     +++ b/drivers/usb/host/isp1760-hcd.c
1063     @@ -1644,6 +1644,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1064     u32 reg_base, or_reg, skip_reg;
1065     unsigned long flags;
1066     struct ptd ptd;
1067     + packet_enqueue *pe;
1068    
1069     switch (usb_pipetype(urb->pipe)) {
1070     case PIPE_ISOCHRONOUS:
1071     @@ -1655,6 +1656,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1072     reg_base = INT_REGS_OFFSET;
1073     or_reg = HC_INT_IRQ_MASK_OR_REG;
1074     skip_reg = HC_INT_PTD_SKIPMAP_REG;
1075     + pe = enqueue_an_INT_packet;
1076     break;
1077    
1078     default:
1079     @@ -1662,6 +1664,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1080     reg_base = ATL_REGS_OFFSET;
1081     or_reg = HC_ATL_IRQ_MASK_OR_REG;
1082     skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1083     + pe = enqueue_an_ATL_packet;
1084     break;
1085     }
1086    
1087     @@ -1673,6 +1676,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1088     u32 skip_map;
1089     u32 or_map;
1090     struct isp1760_qtd *qtd;
1091     + struct isp1760_qh *qh = ints->qh;
1092    
1093     skip_map = isp1760_readl(hcd->regs + skip_reg);
1094     skip_map |= 1 << i;
1095     @@ -1685,8 +1689,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1096     priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
1097     + i * sizeof(ptd), sizeof(ptd));
1098     qtd = ints->qtd;
1099     -
1100     - clean_up_qtdlist(qtd);
1101     + qtd = clean_up_qtdlist(qtd);
1102    
1103     free_mem(priv, ints->payload);
1104    
1105     @@ -1697,7 +1700,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1106     ints->payload = 0;
1107    
1108     isp1760_urb_done(priv, urb, status);
1109     + if (qtd)
1110     + pe(hcd, qh, qtd);
1111     break;
1112     +
1113     + } else if (ints->qtd) {
1114     + struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
1115     +
1116     + for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
1117     + if (qtd->urb == urb) {
1118     + prev_qtd->hw_next = clean_up_qtdlist(qtd);
1119     + isp1760_urb_done(priv, urb, status);
1120     + break;
1121     + }
1122     + prev_qtd = qtd;
1123     + }
1124     + /* we found the urb before the end of the list */
1125     + if (qtd)
1126     + break;
1127     }
1128     ints++;
1129     }
1130     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1131     index 5daa517..4423875 100644
1132     --- a/drivers/usb/serial/ftdi_sio.c
1133     +++ b/drivers/usb/serial/ftdi_sio.c
1134     @@ -1485,14 +1485,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1135    
1136     remove_sysfs_attrs(port);
1137    
1138     - /* all open ports are closed at this point
1139     - * (by usbserial.c:__serial_close, which calls ftdi_close)
1140     - */
1141     -
1142     - if (priv) {
1143     - usb_set_serial_port_data(port, NULL);
1144     - kref_put(&priv->kref, ftdi_sio_priv_release);
1145     - }
1146     + kref_put(&priv->kref, ftdi_sio_priv_release);
1147    
1148     return 0;
1149     }
1150     diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1151     index 4b36d88..61b7d9e 100644
1152     --- a/drivers/usb/serial/usb-serial.c
1153     +++ b/drivers/usb/serial/usb-serial.c
1154     @@ -978,6 +978,7 @@ int usb_serial_probe(struct usb_interface *interface,
1155     if (retval > 0) {
1156     /* quietly accept this device, but don't bind to a
1157     serial port as it's about to disappear */
1158     + serial->num_ports = 0;
1159     goto exit;
1160     }
1161     }
1162     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1163     index 90909f9..a2bd86e 100644
1164     --- a/fs/ext4/ext4.h
1165     +++ b/fs/ext4/ext4.h
1166     @@ -248,6 +248,30 @@ struct flex_groups {
1167     #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
1168     #define EXT4_FL_USER_MODIFIABLE 0x000B80FF /* User modifiable flags */
1169    
1170     +/* Flags that should be inherited by new inodes from their parent. */
1171     +#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
1172     + EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
1173     + EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
1174     + EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
1175     + EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
1176     +
1177     +/* Flags that are appropriate for regular files (all but dir-specific ones). */
1178     +#define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL))
1179     +
1180     +/* Flags that are appropriate for non-directories/regular files. */
1181     +#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
1182     +
1183     +/* Mask out flags that are inappropriate for the given type of inode. */
1184     +static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
1185     +{
1186     + if (S_ISDIR(mode))
1187     + return flags;
1188     + else if (S_ISREG(mode))
1189     + return flags & EXT4_REG_FLMASK;
1190     + else
1191     + return flags & EXT4_OTHER_FLMASK;
1192     +}
1193     +
1194     /*
1195     * Inode dynamic state flags
1196     */
1197     @@ -255,6 +279,7 @@ struct flex_groups {
1198     #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
1199     #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
1200     #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
1201     +#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
1202    
1203     /* Used to pass group descriptor data when online resize is done */
1204     struct ext4_new_group_input {
1205     @@ -302,7 +327,9 @@ struct ext4_new_group_data {
1206     #define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
1207     #define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
1208     #define EXT4_IOC_MIGRATE _IO('f', 9)
1209     + /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
1210     /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
1211     +#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
1212    
1213     /*
1214     * ioctl commands in 32 bit emulation
1215     @@ -530,7 +557,7 @@ do { \
1216     #define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */
1217     #define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */
1218     #define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
1219     -#define EXT4_MOUNT_RESERVATION 0x10000 /* Preallocation */
1220     +#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
1221     #define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
1222     #define EXT4_MOUNT_NOBH 0x40000 /* No bufferheads */
1223     #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
1224     @@ -1091,6 +1118,7 @@ extern int ext4_can_truncate(struct inode *inode);
1225     extern void ext4_truncate(struct inode *);
1226     extern void ext4_set_inode_flags(struct inode *);
1227     extern void ext4_get_inode_flags(struct ext4_inode_info *);
1228     +extern int ext4_alloc_da_blocks(struct inode *inode);
1229     extern void ext4_set_aops(struct inode *inode);
1230     extern int ext4_writepage_trans_blocks(struct inode *);
1231     extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
1232     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1233     index e0aa4fe..8723d0b 100644
1234     --- a/fs/ext4/extents.c
1235     +++ b/fs/ext4/extents.c
1236     @@ -1740,11 +1740,13 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1237     {
1238     struct ext4_ext_cache *cex;
1239     BUG_ON(len == 0);
1240     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1241     cex = &EXT4_I(inode)->i_cached_extent;
1242     cex->ec_type = type;
1243     cex->ec_block = block;
1244     cex->ec_len = len;
1245     cex->ec_start = start;
1246     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1247     }
1248    
1249     /*
1250     @@ -1801,12 +1803,17 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1251     struct ext4_extent *ex)
1252     {
1253     struct ext4_ext_cache *cex;
1254     + int ret = EXT4_EXT_CACHE_NO;
1255    
1256     + /*
1257     + * We borrow i_block_reservation_lock to protect i_cached_extent
1258     + */
1259     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1260     cex = &EXT4_I(inode)->i_cached_extent;
1261    
1262     /* has cache valid data? */
1263     if (cex->ec_type == EXT4_EXT_CACHE_NO)
1264     - return EXT4_EXT_CACHE_NO;
1265     + goto errout;
1266    
1267     BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1268     cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1269     @@ -1817,11 +1824,11 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1270     ext_debug("%u cached by %u:%u:%llu\n",
1271     block,
1272     cex->ec_block, cex->ec_len, cex->ec_start);
1273     - return cex->ec_type;
1274     + ret = cex->ec_type;
1275     }
1276     -
1277     - /* not in cache */
1278     - return EXT4_EXT_CACHE_NO;
1279     +errout:
1280     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1281     + return ret;
1282     }
1283    
1284     /*
1285     @@ -2776,6 +2783,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1286     if (allocated > max_blocks)
1287     allocated = max_blocks;
1288     set_buffer_unwritten(bh_result);
1289     + bh_result->b_bdev = inode->i_sb->s_bdev;
1290     + bh_result->b_blocknr = newblock;
1291     goto out2;
1292     }
1293    
1294     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1295     index f731cb5..588af8c 100644
1296     --- a/fs/ext4/file.c
1297     +++ b/fs/ext4/file.c
1298     @@ -33,9 +33,14 @@
1299     */
1300     static int ext4_release_file(struct inode *inode, struct file *filp)
1301     {
1302     + if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
1303     + ext4_alloc_da_blocks(inode);
1304     + EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
1305     + }
1306     /* if we are the last writer on the inode, drop the block reservation */
1307     if ((filp->f_mode & FMODE_WRITE) &&
1308     - (atomic_read(&inode->i_writecount) == 1))
1309     + (atomic_read(&inode->i_writecount) == 1) &&
1310     + !EXT4_I(inode)->i_reserved_data_blocks)
1311     {
1312     down_write(&EXT4_I(inode)->i_data_sem);
1313     ext4_discard_preallocations(inode);
1314     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1315     index 2d2b358..345cba1 100644
1316     --- a/fs/ext4/ialloc.c
1317     +++ b/fs/ext4/ialloc.c
1318     @@ -720,11 +720,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
1319     ret2 = find_group_flex(sb, dir, &group);
1320     if (ret2 == -1) {
1321     ret2 = find_group_other(sb, dir, &group);
1322     - if (ret2 == 0 && once)
1323     + if (ret2 == 0 && once) {
1324     once = 0;
1325     printk(KERN_NOTICE "ext4: find_group_flex "
1326     "failed, fallback succeeded dir %lu\n",
1327     dir->i_ino);
1328     + }
1329     }
1330     goto got_group;
1331     }
1332     @@ -885,16 +886,12 @@ got:
1333     ei->i_disksize = 0;
1334    
1335     /*
1336     - * Don't inherit extent flag from directory. We set extent flag on
1337     - * newly created directory and file only if -o extent mount option is
1338     - * specified
1339     + * Don't inherit extent flag from directory, amongst others. We set
1340     + * extent flag on newly created directory and file only if -o extent
1341     + * mount option is specified
1342     */
1343     - ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
1344     - if (S_ISLNK(mode))
1345     - ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
1346     - /* dirsync only applies to directories */
1347     - if (!S_ISDIR(mode))
1348     - ei->i_flags &= ~EXT4_DIRSYNC_FL;
1349     + ei->i_flags =
1350     + ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1351     ei->i_file_acl = 0;
1352     ei->i_dtime = 0;
1353     ei->i_block_group = group;
1354     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1355     index 2c0439d..725f20a 100644
1356     --- a/fs/ext4/inode.c
1357     +++ b/fs/ext4/inode.c
1358     @@ -1036,8 +1036,15 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1359     /* update per-inode reservations */
1360     BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1361     EXT4_I(inode)->i_reserved_data_blocks -= used;
1362     -
1363     spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1364     +
1365     + /*
1366     + * If we have done all the pending block allocations and if
1367     + * there aren't any writers on the inode, we can discard the
1368     + * inode's preallocations.
1369     + */
1370     + if (!total && (atomic_read(&inode->i_writecount) == 0))
1371     + ext4_discard_preallocations(inode);
1372     }
1373    
1374     /*
1375     @@ -1069,6 +1076,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1376     int retval;
1377    
1378     clear_buffer_mapped(bh);
1379     + clear_buffer_unwritten(bh);
1380    
1381     /*
1382     * Try to see if we can get the block without requesting
1383     @@ -1099,6 +1107,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1384     return retval;
1385    
1386     /*
1387     + * When we call get_blocks without the create flag, the
1388     + * BH_Unwritten flag could have gotten set if the blocks
1389     + * requested were part of a uninitialized extent. We need to
1390     + * clear this flag now that we are committed to convert all or
1391     + * part of the uninitialized extent to be an initialized
1392     + * extent. This is because we need to avoid the combination
1393     + * of BH_Unwritten and BH_Mapped flags being simultaneously
1394     + * set on the buffer_head.
1395     + */
1396     + clear_buffer_unwritten(bh);
1397     +
1398     + /*
1399     * New blocks allocate and/or writing to uninitialized extent
1400     * will possibly result in updating i_data, so we take
1401     * the write lock of i_data_sem, and call get_blocks()
1402     @@ -2213,6 +2233,10 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1403     struct buffer_head *bh_result, int create)
1404     {
1405     int ret = 0;
1406     + sector_t invalid_block = ~((sector_t) 0xffff);
1407     +
1408     + if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1409     + invalid_block = ~0;
1410    
1411     BUG_ON(create == 0);
1412     BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1413     @@ -2234,11 +2258,18 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1414     /* not enough space to reserve */
1415     return ret;
1416    
1417     - map_bh(bh_result, inode->i_sb, 0);
1418     + map_bh(bh_result, inode->i_sb, invalid_block);
1419     set_buffer_new(bh_result);
1420     set_buffer_delay(bh_result);
1421     } else if (ret > 0) {
1422     bh_result->b_size = (ret << inode->i_blkbits);
1423     + /*
1424     + * With sub-block writes into unwritten extents
1425     + * we also need to mark the buffer as new so that
1426     + * the unwritten parts of the buffer gets correctly zeroed.
1427     + */
1428     + if (buffer_unwritten(bh_result))
1429     + set_buffer_new(bh_result);
1430     ret = 0;
1431     }
1432    
1433     @@ -2816,6 +2847,48 @@ out:
1434     return;
1435     }
1436    
1437     +/*
1438     + * Force all delayed allocation blocks to be allocated for a given inode.
1439     + */
1440     +int ext4_alloc_da_blocks(struct inode *inode)
1441     +{
1442     + if (!EXT4_I(inode)->i_reserved_data_blocks &&
1443     + !EXT4_I(inode)->i_reserved_meta_blocks)
1444     + return 0;
1445     +
1446     + /*
1447     + * We do something simple for now. The filemap_flush() will
1448     + * also start triggering a write of the data blocks, which is
1449     + * not strictly speaking necessary (and for users of
1450     + * laptop_mode, not even desirable). However, to do otherwise
1451     + * would require replicating code paths in:
1452     + *
1453     + * ext4_da_writepages() ->
1454     + * write_cache_pages() ---> (via passed in callback function)
1455     + * __mpage_da_writepage() -->
1456     + * mpage_add_bh_to_extent()
1457     + * mpage_da_map_blocks()
1458     + *
1459     + * The problem is that write_cache_pages(), located in
1460     + * mm/page-writeback.c, marks pages clean in preparation for
1461     + * doing I/O, which is not desirable if we're not planning on
1462     + * doing I/O at all.
1463     + *
1464     + * We could call write_cache_pages(), and then redirty all of
1465     + * the pages by calling redirty_page_for_writeback() but that
1466     + * would be ugly in the extreme. So instead we would need to
1467     + * replicate parts of the code in the above functions,
1468     + * simplifying them becuase we wouldn't actually intend to
1469     + * write out the pages, but rather only collect contiguous
1470     + * logical block extents, call the multi-block allocator, and
1471     + * then update the buffer heads with the block allocations.
1472     + *
1473     + * For now, though, we'll cheat by calling filemap_flush(),
1474     + * which will map the blocks, and start the I/O, but not
1475     + * actually wait for the I/O to complete.
1476     + */
1477     + return filemap_flush(inode->i_mapping);
1478     +}
1479    
1480     /*
1481     * bmap() is special. It gets used by applications such as lilo and by
1482     @@ -3838,6 +3911,9 @@ void ext4_truncate(struct inode *inode)
1483     if (!ext4_can_truncate(inode))
1484     return;
1485    
1486     + if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
1487     + ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
1488     +
1489     if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1490     ext4_ext_truncate(inode);
1491     return;
1492     @@ -4248,11 +4324,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1493     ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1494     inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
1495     ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
1496     - if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
1497     - cpu_to_le32(EXT4_OS_HURD)) {
1498     + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
1499     ei->i_file_acl |=
1500     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
1501     - }
1502     inode->i_size = ext4_isize(raw_inode);
1503     ei->i_disksize = inode->i_size;
1504     inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1505     @@ -4299,6 +4373,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1506     (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
1507     }
1508    
1509     + if (ei->i_file_acl &&
1510     + ((ei->i_file_acl <
1511     + (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
1512     + EXT4_SB(sb)->s_gdb_count)) ||
1513     + (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
1514     + ext4_error(sb, __func__,
1515     + "bad extended attribute block %llu in inode #%lu",
1516     + ei->i_file_acl, inode->i_ino);
1517     + ret = -EIO;
1518     + goto bad_inode;
1519     + }
1520     +
1521     if (S_ISREG(inode->i_mode)) {
1522     inode->i_op = &ext4_file_inode_operations;
1523     inode->i_fop = &ext4_file_operations;
1524     @@ -4315,7 +4401,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1525     inode->i_op = &ext4_symlink_inode_operations;
1526     ext4_set_aops(inode);
1527     }
1528     - } else {
1529     + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
1530     + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1531     inode->i_op = &ext4_special_inode_operations;
1532     if (raw_inode->i_block[0])
1533     init_special_inode(inode, inode->i_mode,
1534     @@ -4323,6 +4410,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1535     else
1536     init_special_inode(inode, inode->i_mode,
1537     new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1538     + } else {
1539     + brelse(bh);
1540     + ret = -EIO;
1541     + ext4_error(inode->i_sb, __func__,
1542     + "bogus i_mode (%o) for inode=%lu",
1543     + inode->i_mode, inode->i_ino);
1544     + goto bad_inode;
1545     }
1546     brelse(iloc.bh);
1547     ext4_set_inode_flags(inode);
1548     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1549     index 42dc83f..91e75f7 100644
1550     --- a/fs/ext4/ioctl.c
1551     +++ b/fs/ext4/ioctl.c
1552     @@ -48,8 +48,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1553     if (err)
1554     return err;
1555    
1556     - if (!S_ISDIR(inode->i_mode))
1557     - flags &= ~EXT4_DIRSYNC_FL;
1558     + flags = ext4_mask_flags(inode->i_mode, flags);
1559    
1560     err = -EPERM;
1561     mutex_lock(&inode->i_mutex);
1562     @@ -263,6 +262,20 @@ setversion_out:
1563     return err;
1564     }
1565    
1566     + case EXT4_IOC_ALLOC_DA_BLKS:
1567     + {
1568     + int err;
1569     + if (!is_owner_or_cap(inode))
1570     + return -EACCES;
1571     +
1572     + err = mnt_want_write(filp->f_path.mnt);
1573     + if (err)
1574     + return err;
1575     + err = ext4_alloc_da_blocks(inode);
1576     + mnt_drop_write(filp->f_path.mnt);
1577     + return err;
1578     + }
1579     +
1580     default:
1581     return -ENOTTY;
1582     }
1583     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1584     index ba702bd..8977e60 100644
1585     --- a/fs/ext4/namei.c
1586     +++ b/fs/ext4/namei.c
1587     @@ -1052,8 +1052,16 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1588     return ERR_PTR(-EIO);
1589     }
1590     inode = ext4_iget(dir->i_sb, ino);
1591     - if (IS_ERR(inode))
1592     - return ERR_CAST(inode);
1593     + if (unlikely(IS_ERR(inode))) {
1594     + if (PTR_ERR(inode) == -ESTALE) {
1595     + ext4_error(dir->i_sb, __func__,
1596     + "deleted inode referenced: %u",
1597     + ino);
1598     + return ERR_PTR(-EIO);
1599     + } else {
1600     + return ERR_CAST(inode);
1601     + }
1602     + }
1603     }
1604     return d_splice_alias(inode, dentry);
1605     }
1606     @@ -2311,7 +2319,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1607     struct inode *old_inode, *new_inode;
1608     struct buffer_head *old_bh, *new_bh, *dir_bh;
1609     struct ext4_dir_entry_2 *old_de, *new_de;
1610     - int retval;
1611     + int retval, force_da_alloc = 0;
1612    
1613     old_bh = new_bh = dir_bh = NULL;
1614    
1615     @@ -2449,6 +2457,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1616     ext4_mark_inode_dirty(handle, new_inode);
1617     if (!new_inode->i_nlink)
1618     ext4_orphan_add(handle, new_inode);
1619     + if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
1620     + force_da_alloc = 1;
1621     }
1622     retval = 0;
1623    
1624     @@ -2457,6 +2467,8 @@ end_rename:
1625     brelse(old_bh);
1626     brelse(new_bh);
1627     ext4_journal_stop(handle);
1628     + if (retval == 0 && force_da_alloc)
1629     + ext4_alloc_da_blocks(old_inode);
1630     return retval;
1631     }
1632    
1633     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1634     index 39d1993..1ad3c20 100644
1635     --- a/fs/ext4/super.c
1636     +++ b/fs/ext4/super.c
1637     @@ -803,8 +803,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1638     if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
1639     seq_puts(seq, ",noacl");
1640     #endif
1641     - if (!test_opt(sb, RESERVATION))
1642     - seq_puts(seq, ",noreservation");
1643     if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
1644     seq_printf(seq, ",commit=%u",
1645     (unsigned) (sbi->s_commit_interval / HZ));
1646     @@ -855,6 +853,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1647     if (test_opt(sb, DATA_ERR_ABORT))
1648     seq_puts(seq, ",data_err=abort");
1649    
1650     + if (test_opt(sb, NO_AUTO_DA_ALLOC))
1651     + seq_puts(seq, ",auto_da_alloc=0");
1652     +
1653     ext4_show_quota_options(seq, sb);
1654     return 0;
1655     }
1656     @@ -1002,7 +1003,7 @@ enum {
1657     Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1658     Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
1659     Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1660     - Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
1661     + Opt_auto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
1662     Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
1663     Opt_journal_update, Opt_journal_dev,
1664     Opt_journal_checksum, Opt_journal_async_commit,
1665     @@ -1037,8 +1038,6 @@ static const match_table_t tokens = {
1666     {Opt_nouser_xattr, "nouser_xattr"},
1667     {Opt_acl, "acl"},
1668     {Opt_noacl, "noacl"},
1669     - {Opt_reservation, "reservation"},
1670     - {Opt_noreservation, "noreservation"},
1671     {Opt_noload, "noload"},
1672     {Opt_nobh, "nobh"},
1673     {Opt_bh, "bh"},
1674     @@ -1073,6 +1072,7 @@ static const match_table_t tokens = {
1675     {Opt_nodelalloc, "nodelalloc"},
1676     {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1677     {Opt_journal_ioprio, "journal_ioprio=%u"},
1678     + {Opt_auto_da_alloc, "auto_da_alloc=%u"},
1679     {Opt_err, NULL},
1680     };
1681    
1682     @@ -1205,12 +1205,6 @@ static int parse_options(char *options, struct super_block *sb,
1683     "not supported\n");
1684     break;
1685     #endif
1686     - case Opt_reservation:
1687     - set_opt(sbi->s_mount_opt, RESERVATION);
1688     - break;
1689     - case Opt_noreservation:
1690     - clear_opt(sbi->s_mount_opt, RESERVATION);
1691     - break;
1692     case Opt_journal_update:
1693     /* @@@ FIXME */
1694     /* Eventually we will want to be able to create
1695     @@ -1471,6 +1465,14 @@ set_qf_format:
1696     *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
1697     option);
1698     break;
1699     + case Opt_auto_da_alloc:
1700     + if (match_int(&args[0], &option))
1701     + return 0;
1702     + if (option)
1703     + clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
1704     + else
1705     + set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1706     + break;
1707     default:
1708     printk(KERN_ERR
1709     "EXT4-fs: Unrecognized mount option \"%s\" "
1710     @@ -2099,7 +2101,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1711     sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
1712     sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
1713    
1714     - set_opt(sbi->s_mount_opt, RESERVATION);
1715     set_opt(sbi->s_mount_opt, BARRIER);
1716    
1717     /*
1718     diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
1719     index 257ff26..bbe6d59 100644
1720     --- a/fs/jbd2/revoke.c
1721     +++ b/fs/jbd2/revoke.c
1722     @@ -55,6 +55,25 @@
1723     * need do nothing.
1724     * RevokeValid set, Revoked set:
1725     * buffer has been revoked.
1726     + *
1727     + * Locking rules:
1728     + * We keep two hash tables of revoke records. One hashtable belongs to the
1729     + * running transaction (is pointed to by journal->j_revoke), the other one
1730     + * belongs to the committing transaction. Accesses to the second hash table
1731     + * happen only from the kjournald and no other thread touches this table. Also
1732     + * journal_switch_revoke_table() which switches which hashtable belongs to the
1733     + * running and which to the committing transaction is called only from
1734     + * kjournald. Therefore we need no locks when accessing the hashtable belonging
1735     + * to the committing transaction.
1736     + *
1737     + * All users operating on the hash table belonging to the running transaction
1738     + * have a handle to the transaction. Therefore they are safe from kjournald
1739     + * switching hash tables under them. For operations on the lists of entries in
1740     + * the hash table j_revoke_lock is used.
1741     + *
1742     + * Finally, also replay code uses the hash tables but at this moment noone else
1743     + * can touch them (filesystem isn't mounted yet) and hence no locking is
1744     + * needed.
1745     */
1746    
1747     #ifndef __KERNEL__
1748     @@ -401,8 +420,6 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
1749     * the second time we would still have a pending revoke to cancel. So,
1750     * do not trust the Revoked bit on buffers unless RevokeValid is also
1751     * set.
1752     - *
1753     - * The caller must have the journal locked.
1754     */
1755     int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
1756     {
1757     @@ -480,10 +497,7 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
1758     /*
1759     * Write revoke records to the journal for all entries in the current
1760     * revoke hash, deleting the entries as we go.
1761     - *
1762     - * Called with the journal lock held.
1763     */
1764     -
1765     void jbd2_journal_write_revoke_records(journal_t *journal,
1766     transaction_t *transaction)
1767     {
1768     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1769     index 3b2f697..5dd75c0 100644
1770     --- a/fs/nfs/dir.c
1771     +++ b/fs/nfs/dir.c
1772     @@ -1943,7 +1943,8 @@ int nfs_permission(struct inode *inode, int mask)
1773     case S_IFREG:
1774     /* NFSv4 has atomic_open... */
1775     if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
1776     - && (mask & MAY_OPEN))
1777     + && (mask & MAY_OPEN)
1778     + && !(mask & MAY_EXEC))
1779     goto out;
1780     break;
1781     case S_IFDIR:
1782     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
1783     index 98b93ca..1a2b0cb 100644
1784     --- a/include/linux/ptrace.h
1785     +++ b/include/linux/ptrace.h
1786     @@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code);
1787     extern void __ptrace_link(struct task_struct *child,
1788     struct task_struct *new_parent);
1789     extern void __ptrace_unlink(struct task_struct *child);
1790     +extern int __ptrace_detach(struct task_struct *tracer, struct task_struct *p);
1791     extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
1792     #define PTRACE_MODE_READ 1
1793     #define PTRACE_MODE_ATTACH 2
1794     diff --git a/kernel/exit.c b/kernel/exit.c
1795     index 467ffcd..a33f399 100644
1796     --- a/kernel/exit.c
1797     +++ b/kernel/exit.c
1798     @@ -703,22 +703,50 @@ static void exit_mm(struct task_struct * tsk)
1799     }
1800    
1801     /*
1802     - * Return nonzero if @parent's children should reap themselves.
1803     - *
1804     - * Called with write_lock_irq(&tasklist_lock) held.
1805     + * Called with irqs disabled, returns true if childs should reap themselves.
1806     */
1807     -static int ignoring_children(struct task_struct *parent)
1808     +static int ignoring_children(struct sighand_struct *sigh)
1809     {
1810     int ret;
1811     - struct sighand_struct *psig = parent->sighand;
1812     - unsigned long flags;
1813     - spin_lock_irqsave(&psig->siglock, flags);
1814     - ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1815     - (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
1816     - spin_unlock_irqrestore(&psig->siglock, flags);
1817     + spin_lock(&sigh->siglock);
1818     + ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
1819     + (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
1820     + spin_unlock(&sigh->siglock);
1821     return ret;
1822     }
1823    
1824     +/* Returns nonzero if the tracee should be released. */
1825     +int __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
1826     +{
1827     + __ptrace_unlink(p);
1828     +
1829     + if (p->exit_state != EXIT_ZOMBIE)
1830     + return 0;
1831     + /*
1832     + * If it's a zombie, our attachedness prevented normal
1833     + * parent notification or self-reaping. Do notification
1834     + * now if it would have happened earlier. If it should
1835     + * reap itself we return true.
1836     + *
1837     + * If it's our own child, there is no notification to do.
1838     + * But if our normal children self-reap, then this child
1839     + * was prevented by ptrace and we must reap it now.
1840     + */
1841     + if (!task_detached(p) && thread_group_empty(p)) {
1842     + if (!same_thread_group(p->real_parent, tracer))
1843     + do_notify_parent(p, p->exit_signal);
1844     + else if (ignoring_children(tracer->sighand))
1845     + p->exit_signal = -1;
1846     + }
1847     +
1848     + if (!task_detached(p))
1849     + return 0;
1850     +
1851     + /* Mark it as in the process of being reaped. */
1852     + p->exit_state = EXIT_DEAD;
1853     + return 1;
1854     +}
1855     +
1856     /*
1857     * Detach all tasks we were using ptrace on.
1858     * Any that need to be release_task'd are put on the @dead list.
1859     @@ -728,43 +756,10 @@ static int ignoring_children(struct task_struct *parent)
1860     static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
1861     {
1862     struct task_struct *p, *n;
1863     - int ign = -1;
1864    
1865     list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
1866     - __ptrace_unlink(p);
1867     -
1868     - if (p->exit_state != EXIT_ZOMBIE)
1869     - continue;
1870     -
1871     - /*
1872     - * If it's a zombie, our attachedness prevented normal
1873     - * parent notification or self-reaping. Do notification
1874     - * now if it would have happened earlier. If it should
1875     - * reap itself, add it to the @dead list. We can't call
1876     - * release_task() here because we already hold tasklist_lock.
1877     - *
1878     - * If it's our own child, there is no notification to do.
1879     - * But if our normal children self-reap, then this child
1880     - * was prevented by ptrace and we must reap it now.
1881     - */
1882     - if (!task_detached(p) && thread_group_empty(p)) {
1883     - if (!same_thread_group(p->real_parent, parent))
1884     - do_notify_parent(p, p->exit_signal);
1885     - else {
1886     - if (ign < 0)
1887     - ign = ignoring_children(parent);
1888     - if (ign)
1889     - p->exit_signal = -1;
1890     - }
1891     - }
1892     -
1893     - if (task_detached(p)) {
1894     - /*
1895     - * Mark it as in the process of being reaped.
1896     - */
1897     - p->exit_state = EXIT_DEAD;
1898     + if (__ptrace_detach(parent, p))
1899     list_add(&p->ptrace_entry, dead);
1900     - }
1901     }
1902     }
1903    
1904     diff --git a/kernel/futex.c b/kernel/futex.c
1905     index 438701a..2844297 100644
1906     --- a/kernel/futex.c
1907     +++ b/kernel/futex.c
1908     @@ -192,6 +192,7 @@ static void drop_futex_key_refs(union futex_key *key)
1909     * @shared: NULL for a PROCESS_PRIVATE futex,
1910     * &current->mm->mmap_sem for a PROCESS_SHARED futex
1911     * @key: address where result is stored.
1912     + * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
1913     *
1914     * Returns a negative error code or 0
1915     * The key words are stored in *key on success.
1916     @@ -204,7 +205,8 @@ static void drop_futex_key_refs(union futex_key *key)
1917     * For other futexes, it points to &current->mm->mmap_sem and
1918     * caller must have taken the reader lock. but NOT any spinlocks.
1919     */
1920     -static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1921     +static int
1922     +get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
1923     {
1924     unsigned long address = (unsigned long)uaddr;
1925     struct mm_struct *mm = current->mm;
1926     @@ -227,7 +229,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1927     * but access_ok() should be faster than find_vma()
1928     */
1929     if (!fshared) {
1930     - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
1931     + if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
1932     return -EFAULT;
1933     key->private.mm = mm;
1934     key->private.address = address;
1935     @@ -236,7 +238,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1936     }
1937    
1938     again:
1939     - err = get_user_pages_fast(address, 1, 0, &page);
1940     + err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
1941     if (err < 0)
1942     return err;
1943    
1944     @@ -707,7 +709,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
1945     if (!bitset)
1946     return -EINVAL;
1947    
1948     - ret = get_futex_key(uaddr, fshared, &key);
1949     + ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
1950     if (unlikely(ret != 0))
1951     goto out;
1952    
1953     @@ -753,10 +755,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1954     int ret, op_ret, attempt = 0;
1955    
1956     retryfull:
1957     - ret = get_futex_key(uaddr1, fshared, &key1);
1958     + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1959     if (unlikely(ret != 0))
1960     goto out;
1961     - ret = get_futex_key(uaddr2, fshared, &key2);
1962     + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
1963     if (unlikely(ret != 0))
1964     goto out_put_key1;
1965    
1966     @@ -859,10 +861,10 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1967     int ret, drop_count = 0;
1968    
1969     retry:
1970     - ret = get_futex_key(uaddr1, fshared, &key1);
1971     + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1972     if (unlikely(ret != 0))
1973     goto out;
1974     - ret = get_futex_key(uaddr2, fshared, &key2);
1975     + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
1976     if (unlikely(ret != 0))
1977     goto out_put_key1;
1978    
1979     @@ -1181,7 +1183,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1980     q.bitset = bitset;
1981     retry:
1982     q.key = FUTEX_KEY_INIT;
1983     - ret = get_futex_key(uaddr, fshared, &q.key);
1984     + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
1985     if (unlikely(ret != 0))
1986     goto out;
1987    
1988     @@ -1370,7 +1372,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1989     q.pi_state = NULL;
1990     retry:
1991     q.key = FUTEX_KEY_INIT;
1992     - ret = get_futex_key(uaddr, fshared, &q.key);
1993     + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1994     if (unlikely(ret != 0))
1995     goto out;
1996    
1997     @@ -1630,7 +1632,7 @@ retry:
1998     if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1999     return -EPERM;
2000    
2001     - ret = get_futex_key(uaddr, fshared, &key);
2002     + ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
2003     if (unlikely(ret != 0))
2004     goto out;
2005    
2006     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2007     index dc3b98e..893c2c7 100644
2008     --- a/kernel/ptrace.c
2009     +++ b/kernel/ptrace.c
2010     @@ -235,18 +235,10 @@ out:
2011     return retval;
2012     }
2013    
2014     -static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
2015     -{
2016     - child->exit_code = data;
2017     - /* .. re-parent .. */
2018     - __ptrace_unlink(child);
2019     - /* .. and wake it up. */
2020     - if (child->exit_state != EXIT_ZOMBIE)
2021     - wake_up_process(child);
2022     -}
2023     -
2024     int ptrace_detach(struct task_struct *child, unsigned int data)
2025     {
2026     + int dead = 0;
2027     +
2028     if (!valid_signal(data))
2029     return -EIO;
2030    
2031     @@ -256,10 +248,19 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
2032    
2033     write_lock_irq(&tasklist_lock);
2034     /* protect against de_thread()->release_task() */
2035     - if (child->ptrace)
2036     - __ptrace_detach(child, data);
2037     + if (child->ptrace) {
2038     + child->exit_code = data;
2039     +
2040     + dead = __ptrace_detach(current, child);
2041     +
2042     + if (!child->exit_state)
2043     + wake_up_process(child);
2044     + }
2045     write_unlock_irq(&tasklist_lock);
2046    
2047     + if (unlikely(dead))
2048     + release_task(child);
2049     +
2050     return 0;
2051     }
2052    
2053     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2054     index 107da3d..2a9129b 100644
2055     --- a/mm/hugetlb.c
2056     +++ b/mm/hugetlb.c
2057     @@ -316,7 +316,7 @@ static void resv_map_release(struct kref *ref)
2058     static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
2059     {
2060     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2061     - if (!(vma->vm_flags & VM_SHARED))
2062     + if (!(vma->vm_flags & VM_MAYSHARE))
2063     return (struct resv_map *)(get_vma_private_data(vma) &
2064     ~HPAGE_RESV_MASK);
2065     return NULL;
2066     @@ -325,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
2067     static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
2068     {
2069     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2070     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
2071     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
2072    
2073     set_vma_private_data(vma, (get_vma_private_data(vma) &
2074     HPAGE_RESV_MASK) | (unsigned long)map);
2075     @@ -334,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
2076     static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
2077     {
2078     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2079     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
2080     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
2081    
2082     set_vma_private_data(vma, get_vma_private_data(vma) | flags);
2083     }
2084     @@ -353,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
2085     if (vma->vm_flags & VM_NORESERVE)
2086     return;
2087    
2088     - if (vma->vm_flags & VM_SHARED) {
2089     + if (vma->vm_flags & VM_MAYSHARE) {
2090     /* Shared mappings always use reserves */
2091     h->resv_huge_pages--;
2092     } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
2093     @@ -369,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
2094     void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
2095     {
2096     VM_BUG_ON(!is_vm_hugetlb_page(vma));
2097     - if (!(vma->vm_flags & VM_SHARED))
2098     + if (!(vma->vm_flags & VM_MAYSHARE))
2099     vma->vm_private_data = (void *)0;
2100     }
2101    
2102     /* Returns true if the VMA has associated reserve pages */
2103     static int vma_has_reserves(struct vm_area_struct *vma)
2104     {
2105     - if (vma->vm_flags & VM_SHARED)
2106     + if (vma->vm_flags & VM_MAYSHARE)
2107     return 1;
2108     if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2109     return 1;
2110     @@ -924,7 +924,7 @@ static int vma_needs_reservation(struct hstate *h,
2111     struct address_space *mapping = vma->vm_file->f_mapping;
2112     struct inode *inode = mapping->host;
2113    
2114     - if (vma->vm_flags & VM_SHARED) {
2115     + if (vma->vm_flags & VM_MAYSHARE) {
2116     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
2117     return region_chg(&inode->i_mapping->private_list,
2118     idx, idx + 1);
2119     @@ -949,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
2120     struct address_space *mapping = vma->vm_file->f_mapping;
2121     struct inode *inode = mapping->host;
2122    
2123     - if (vma->vm_flags & VM_SHARED) {
2124     + if (vma->vm_flags & VM_MAYSHARE) {
2125     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
2126     region_add(&inode->i_mapping->private_list, idx, idx + 1);
2127    
2128     @@ -1893,7 +1893,7 @@ retry_avoidcopy:
2129     * at the time of fork() could consume its reserves on COW instead
2130     * of the full address range.
2131     */
2132     - if (!(vma->vm_flags & VM_SHARED) &&
2133     + if (!(vma->vm_flags & VM_MAYSHARE) &&
2134     is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2135     old_page != pagecache_page)
2136     outside_reserve = 1;
2137     @@ -2000,7 +2000,7 @@ retry:
2138     clear_huge_page(page, address, huge_page_size(h));
2139     __SetPageUptodate(page);
2140    
2141     - if (vma->vm_flags & VM_SHARED) {
2142     + if (vma->vm_flags & VM_MAYSHARE) {
2143     int err;
2144     struct inode *inode = mapping->host;
2145    
2146     @@ -2104,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2147     goto out_mutex;
2148     }
2149    
2150     - if (!(vma->vm_flags & VM_SHARED))
2151     + if (!(vma->vm_flags & VM_MAYSHARE))
2152     pagecache_page = hugetlbfs_pagecache_page(h,
2153     vma, address);
2154     }
2155     @@ -2289,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2156     * to reserve the full area even if read-only as mprotect() may be
2157     * called to make the mapping read-write. Assume !vma is a shm mapping
2158     */
2159     - if (!vma || vma->vm_flags & VM_SHARED)
2160     + if (!vma || vma->vm_flags & VM_MAYSHARE)
2161     chg = region_chg(&inode->i_mapping->private_list, from, to);
2162     else {
2163     struct resv_map *resv_map = resv_map_alloc();
2164     @@ -2330,7 +2330,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2165     * consumed reservations are stored in the map. Hence, nothing
2166     * else has to be done for private mappings here
2167     */
2168     - if (!vma || vma->vm_flags & VM_SHARED)
2169     + if (!vma || vma->vm_flags & VM_MAYSHARE)
2170     region_add(&inode->i_mapping->private_list, from, to);
2171     return 0;
2172     }
2173     diff --git a/mm/slub.c b/mm/slub.c
2174     index 0280eee..5663da0 100644
2175     --- a/mm/slub.c
2176     +++ b/mm/slub.c
2177     @@ -9,6 +9,7 @@
2178     */
2179    
2180     #include <linux/mm.h>
2181     +#include <linux/swap.h> /* struct reclaim_state */
2182     #include <linux/module.h>
2183     #include <linux/bit_spinlock.h>
2184     #include <linux/interrupt.h>
2185     @@ -1175,6 +1176,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
2186    
2187     __ClearPageSlab(page);
2188     reset_page_mapcount(page);
2189     + if (current->reclaim_state)
2190     + current->reclaim_state->reclaimed_slab += pages;
2191     __free_pages(page, order);
2192     }
2193    
2194     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
2195     index 1b34135..6b09213 100644
2196     --- a/net/8021q/vlan_dev.c
2197     +++ b/net/8021q/vlan_dev.c
2198     @@ -668,7 +668,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
2199     const struct vlan_dev_info *vlan = vlan_dev_info(dev);
2200     struct net_device *real_dev = vlan->real_dev;
2201    
2202     - if (!real_dev->ethtool_ops->get_settings)
2203     + if (!real_dev->ethtool_ops ||
2204     + !real_dev->ethtool_ops->get_settings)
2205     return -EOPNOTSUPP;
2206    
2207     return real_dev->ethtool_ops->get_settings(real_dev, cmd);
2208     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
2209     index 6549848..c4070ec 100644
2210     --- a/net/core/pktgen.c
2211     +++ b/net/core/pktgen.c
2212     @@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2213     if (pkt_dev->cflows) {
2214     /* let go of the SAs if we have them */
2215     int i = 0;
2216     - for (; i < pkt_dev->nflows; i++){
2217     + for (; i < pkt_dev->cflows; i++) {
2218     struct xfrm_state *x = pkt_dev->flows[i].x;
2219     if (x) {
2220     xfrm_state_put(x);
2221     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2222     index eae6954..f4bbc98 100644
2223     --- a/net/core/skbuff.c
2224     +++ b/net/core/skbuff.c
2225     @@ -2206,7 +2206,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2226     next_skb:
2227     block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2228    
2229     - if (abs_offset < block_limit) {
2230     + if (abs_offset < block_limit && !st->frag_data) {
2231     *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2232     return block_limit - abs_offset;
2233     }
2234     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2235     index 97f7115..4e302d1 100644
2236     --- a/net/ipv4/route.c
2237     +++ b/net/ipv4/route.c
2238     @@ -784,8 +784,8 @@ static void rt_check_expire(void)
2239     {
2240     static unsigned int rover;
2241     unsigned int i = rover, goal;
2242     - struct rtable *rth, **rthp;
2243     - unsigned long length = 0, samples = 0;
2244     + struct rtable *rth, *aux, **rthp;
2245     + unsigned long samples = 0;
2246     unsigned long sum = 0, sum2 = 0;
2247     u64 mult;
2248    
2249     @@ -795,9 +795,9 @@ static void rt_check_expire(void)
2250     goal = (unsigned int)mult;
2251     if (goal > rt_hash_mask)
2252     goal = rt_hash_mask + 1;
2253     - length = 0;
2254     for (; goal > 0; goal--) {
2255     unsigned long tmo = ip_rt_gc_timeout;
2256     + unsigned long length;
2257    
2258     i = (i + 1) & rt_hash_mask;
2259     rthp = &rt_hash_table[i].chain;
2260     @@ -809,8 +809,10 @@ static void rt_check_expire(void)
2261    
2262     if (*rthp == NULL)
2263     continue;
2264     + length = 0;
2265     spin_lock_bh(rt_hash_lock_addr(i));
2266     while ((rth = *rthp) != NULL) {
2267     + prefetch(rth->u.dst.rt_next);
2268     if (rt_is_expired(rth)) {
2269     *rthp = rth->u.dst.rt_next;
2270     rt_free(rth);
2271     @@ -819,33 +821,30 @@ static void rt_check_expire(void)
2272     if (rth->u.dst.expires) {
2273     /* Entry is expired even if it is in use */
2274     if (time_before_eq(jiffies, rth->u.dst.expires)) {
2275     +nofree:
2276     tmo >>= 1;
2277     rthp = &rth->u.dst.rt_next;
2278     /*
2279     - * Only bump our length if the hash
2280     - * inputs on entries n and n+1 are not
2281     - * the same, we only count entries on
2282     + * We only count entries on
2283     * a chain with equal hash inputs once
2284     * so that entries for different QOS
2285     * levels, and other non-hash input
2286     * attributes don't unfairly skew
2287     * the length computation
2288     */
2289     - if ((*rthp == NULL) ||
2290     - !compare_hash_inputs(&(*rthp)->fl,
2291     - &rth->fl))
2292     - length += ONE;
2293     + for (aux = rt_hash_table[i].chain;;) {
2294     + if (aux == rth) {
2295     + length += ONE;
2296     + break;
2297     + }
2298     + if (compare_hash_inputs(&aux->fl, &rth->fl))
2299     + break;
2300     + aux = aux->u.dst.rt_next;
2301     + }
2302     continue;
2303     }
2304     - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
2305     - tmo >>= 1;
2306     - rthp = &rth->u.dst.rt_next;
2307     - if ((*rthp == NULL) ||
2308     - !compare_hash_inputs(&(*rthp)->fl,
2309     - &rth->fl))
2310     - length += ONE;
2311     - continue;
2312     - }
2313     + } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
2314     + goto nofree;
2315    
2316     /* Cleanup aged off entries. */
2317     *rthp = rth->u.dst.rt_next;
2318     @@ -1068,7 +1067,6 @@ out: return 0;
2319     static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
2320     {
2321     struct rtable *rth, **rthp;
2322     - struct rtable *rthi;
2323     unsigned long now;
2324     struct rtable *cand, **candp;
2325     u32 min_score;
2326     @@ -1088,7 +1086,6 @@ restart:
2327     }
2328    
2329     rthp = &rt_hash_table[hash].chain;
2330     - rthi = NULL;
2331    
2332     spin_lock_bh(rt_hash_lock_addr(hash));
2333     while ((rth = *rthp) != NULL) {
2334     @@ -1134,17 +1131,6 @@ restart:
2335     chain_length++;
2336    
2337     rthp = &rth->u.dst.rt_next;
2338     -
2339     - /*
2340     - * check to see if the next entry in the chain
2341     - * contains the same hash input values as rt. If it does
2342     - * This is where we will insert into the list, instead of
2343     - * at the head. This groups entries that differ by aspects not
2344     - * relvant to the hash function together, which we use to adjust
2345     - * our chain length
2346     - */
2347     - if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
2348     - rthi = rth;
2349     }
2350    
2351     if (cand) {
2352     @@ -1205,10 +1191,7 @@ restart:
2353     }
2354     }
2355    
2356     - if (rthi)
2357     - rt->u.dst.rt_next = rthi->u.dst.rt_next;
2358     - else
2359     - rt->u.dst.rt_next = rt_hash_table[hash].chain;
2360     + rt->u.dst.rt_next = rt_hash_table[hash].chain;
2361    
2362     #if RT_CACHE_DEBUG >= 2
2363     if (rt->u.dst.rt_next) {
2364     @@ -1224,10 +1207,7 @@ restart:
2365     * previous writes to rt are comitted to memory
2366     * before making rt visible to other CPUS.
2367     */
2368     - if (rthi)
2369     - rcu_assign_pointer(rthi->u.dst.rt_next, rt);
2370     - else
2371     - rcu_assign_pointer(rt_hash_table[hash].chain, rt);
2372     + rcu_assign_pointer(rt_hash_table[hash].chain, rt);
2373    
2374     spin_unlock_bh(rt_hash_lock_addr(hash));
2375     *rp = rt;
2376     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2377     index 76b148b..b16dc84 100644
2378     --- a/net/ipv4/tcp.c
2379     +++ b/net/ipv4/tcp.c
2380     @@ -1286,6 +1286,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2381     struct task_struct *user_recv = NULL;
2382     int copied_early = 0;
2383     struct sk_buff *skb;
2384     + u32 urg_hole = 0;
2385    
2386     lock_sock(sk);
2387    
2388     @@ -1497,7 +1498,8 @@ do_prequeue:
2389     }
2390     }
2391     }
2392     - if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
2393     + if ((flags & MSG_PEEK) &&
2394     + (peek_seq - copied - urg_hole != tp->copied_seq)) {
2395     if (net_ratelimit())
2396     printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
2397     current->comm, task_pid_nr(current));
2398     @@ -1518,6 +1520,7 @@ do_prequeue:
2399     if (!urg_offset) {
2400     if (!sock_flag(sk, SOCK_URGINLINE)) {
2401     ++*seq;
2402     + urg_hole++;
2403     offset++;
2404     used--;
2405     if (!used)
2406     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2407     index c28976a..fd4317e 100644
2408     --- a/net/ipv4/tcp_input.c
2409     +++ b/net/ipv4/tcp_input.c
2410     @@ -927,6 +927,8 @@ static void tcp_init_metrics(struct sock *sk)
2411     tcp_set_rto(sk);
2412     if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
2413     goto reset;
2414     +
2415     +cwnd:
2416     tp->snd_cwnd = tcp_init_cwnd(tp, dst);
2417     tp->snd_cwnd_stamp = tcp_time_stamp;
2418     return;
2419     @@ -941,6 +943,7 @@ reset:
2420     tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
2421     inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2422     }
2423     + goto cwnd;
2424     }
2425    
2426     static void tcp_update_reordering(struct sock *sk, const int metric,
2427     diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
2428     index 3824990..d9233ec 100644
2429     --- a/net/mac80211/rc80211_minstrel.c
2430     +++ b/net/mac80211/rc80211_minstrel.c
2431     @@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
2432     return NULL;
2433    
2434     for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
2435     - sband = hw->wiphy->bands[hw->conf.channel->band];
2436     - if (sband->n_bitrates > max_rates)
2437     + sband = hw->wiphy->bands[i];
2438     + if (sband && sband->n_bitrates > max_rates)
2439     max_rates = sband->n_bitrates;
2440     }
2441    
2442     diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
2443     index b16801c..8bef9a1 100644
2444     --- a/net/mac80211/rc80211_pid_algo.c
2445     +++ b/net/mac80211/rc80211_pid_algo.c
2446     @@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
2447     struct ieee80211_sta *sta, void *priv_sta)
2448     {
2449     struct rc_pid_sta_info *spinfo = priv_sta;
2450     + struct rc_pid_info *pinfo = priv;
2451     + struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
2452     struct sta_info *si;
2453     + int i, j, tmp;
2454     + bool s;
2455    
2456     /* TODO: This routine should consider using RSSI from previous packets
2457     * as we need to have IEEE 802.1X auth succeed immediately after assoc..
2458     * Until that method is implemented, we will use the lowest supported
2459     * rate as a workaround. */
2460    
2461     + /* Sort the rates. This is optimized for the most common case (i.e.
2462     + * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
2463     + * mapping too. */
2464     + for (i = 0; i < sband->n_bitrates; i++) {
2465     + rinfo[i].index = i;
2466     + rinfo[i].rev_index = i;
2467     + if (RC_PID_FAST_START)
2468     + rinfo[i].diff = 0;
2469     + else
2470     + rinfo[i].diff = i * pinfo->norm_offset;
2471     + }
2472     + for (i = 1; i < sband->n_bitrates; i++) {
2473     + s = 0;
2474     + for (j = 0; j < sband->n_bitrates - i; j++)
2475     + if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
2476     + sband->bitrates[rinfo[j + 1].index].bitrate)) {
2477     + tmp = rinfo[j].index;
2478     + rinfo[j].index = rinfo[j + 1].index;
2479     + rinfo[j + 1].index = tmp;
2480     + rinfo[rinfo[j].index].rev_index = j;
2481     + rinfo[rinfo[j + 1].index].rev_index = j + 1;
2482     + s = 1;
2483     + }
2484     + if (!s)
2485     + break;
2486     + }
2487     +
2488     spinfo->txrate_idx = rate_lowest_index(sband, sta);
2489     /* HACK */
2490     si = container_of(sta, struct sta_info, sta);
2491     @@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
2492     struct rc_pid_info *pinfo;
2493     struct rc_pid_rateinfo *rinfo;
2494     struct ieee80211_supported_band *sband;
2495     - int i, j, tmp;
2496     - bool s;
2497     + int i, max_rates = 0;
2498     #ifdef CONFIG_MAC80211_DEBUGFS
2499     struct rc_pid_debugfs_entries *de;
2500     #endif
2501    
2502     - sband = hw->wiphy->bands[hw->conf.channel->band];
2503     -
2504     pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
2505     if (!pinfo)
2506     return NULL;
2507    
2508     - /* We can safely assume that sband won't change unless we get
2509     - * reinitialized. */
2510     - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
2511     + for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
2512     + sband = hw->wiphy->bands[i];
2513     + if (sband && sband->n_bitrates > max_rates)
2514     + max_rates = sband->n_bitrates;
2515     + }
2516     +
2517     + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
2518     if (!rinfo) {
2519     kfree(pinfo);
2520     return NULL;
2521     @@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
2522     pinfo->rinfo = rinfo;
2523     pinfo->oldrate = 0;
2524    
2525     - /* Sort the rates. This is optimized for the most common case (i.e.
2526     - * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
2527     - * mapping too. */
2528     - for (i = 0; i < sband->n_bitrates; i++) {
2529     - rinfo[i].index = i;
2530     - rinfo[i].rev_index = i;
2531     - if (RC_PID_FAST_START)
2532     - rinfo[i].diff = 0;
2533     - else
2534     - rinfo[i].diff = i * pinfo->norm_offset;
2535     - }
2536     - for (i = 1; i < sband->n_bitrates; i++) {
2537     - s = 0;
2538     - for (j = 0; j < sband->n_bitrates - i; j++)
2539     - if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
2540     - sband->bitrates[rinfo[j + 1].index].bitrate)) {
2541     - tmp = rinfo[j].index;
2542     - rinfo[j].index = rinfo[j + 1].index;
2543     - rinfo[j + 1].index = tmp;
2544     - rinfo[rinfo[j].index].rev_index = j;
2545     - rinfo[rinfo[j + 1].index].rev_index = j + 1;
2546     - s = 1;
2547     - }
2548     - if (!s)
2549     - break;
2550     - }
2551     -
2552     #ifdef CONFIG_MAC80211_DEBUGFS
2553     de = &pinfo->dentries;
2554     de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
2555     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
2556     index 5e75bbf..6c4846a 100644
2557     --- a/net/rose/af_rose.c
2558     +++ b/net/rose/af_rose.c
2559     @@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
2560     unsigned char *asmptr;
2561     int n, size, qbit = 0;
2562    
2563     - /* ROSE empty frame has no meaning : don't send */
2564     - if (len == 0)
2565     - return 0;
2566     -
2567     if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
2568     return -EINVAL;
2569    
2570     @@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
2571     skb_reset_transport_header(skb);
2572     copied = skb->len;
2573    
2574     - /* ROSE empty frame has no meaning : ignore it */
2575     - if (copied == 0) {
2576     - skb_free_datagram(sk, skb);
2577     - return copied;
2578     - }
2579     -
2580     if (copied > size) {
2581     copied = size;
2582     msg->msg_flags |= MSG_TRUNC;
2583     diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
2584     index ec697ce..3b64182 100644
2585     --- a/net/sched/sch_teql.c
2586     +++ b/net/sched/sch_teql.c
2587     @@ -303,6 +303,8 @@ restart:
2588     switch (teql_resolve(skb, skb_res, slave)) {
2589     case 0:
2590     if (__netif_tx_trylock(slave_txq)) {
2591     + unsigned int length = qdisc_pkt_len(skb);
2592     +
2593     if (!netif_tx_queue_stopped(slave_txq) &&
2594     !netif_tx_queue_frozen(slave_txq) &&
2595     slave_ops->ndo_start_xmit(skb, slave) == 0) {
2596     @@ -310,8 +312,7 @@ restart:
2597     master->slaves = NEXT_SLAVE(q);
2598     netif_wake_queue(dev);
2599     master->stats.tx_packets++;
2600     - master->stats.tx_bytes +=
2601     - qdisc_pkt_len(skb);
2602     + master->stats.tx_bytes += length;
2603     return 0;
2604     }
2605     __netif_tx_unlock(slave_txq);
2606     diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
2607     index cb3b4ad..c229de3 100644
2608     --- a/net/wimax/op-msg.c
2609     +++ b/net/wimax/op-msg.c
2610     @@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
2611     }
2612     result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
2613     if (result < 0) {
2614     - dev_err(dev, "no memory to add payload in attribute\n");
2615     + dev_err(dev, "no memory to add payload (msg %p size %zu) in "
2616     + "attribute: %d\n", msg, size, result);
2617     goto error_nla_put;
2618     }
2619     genlmsg_end(skb, genl_msg);
2620     @@ -302,10 +303,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
2621     struct sk_buff *skb;
2622    
2623     skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
2624     - if (skb == NULL)
2625     - goto error_msg_new;
2626     - result = wimax_msg_send(wimax_dev, skb);
2627     -error_msg_new:
2628     + if (IS_ERR(skb))
2629     + result = PTR_ERR(skb);
2630     + else
2631     + result = wimax_msg_send(wimax_dev, skb);
2632     return result;
2633     }
2634     EXPORT_SYMBOL_GPL(wimax_msg);
2635     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2636     index 4f9ff2a..66cebb5 100644
2637     --- a/net/wireless/reg.c
2638     +++ b/net/wireless/reg.c
2639     @@ -1497,6 +1497,13 @@ int regulatory_init(void)
2640     "using static definition\n");
2641     #endif
2642    
2643     + /*
2644     + * This ensures last_request is populated once modules
2645     + * come swinging in and calling regulatory hints and
2646     + * wiphy_apply_custom_regulatory().
2647     + */
2648     + flush_scheduled_work();
2649     +
2650     return 0;
2651     }
2652    
2653     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2654     index 8227172..5f1f865 100644
2655     --- a/net/xfrm/xfrm_state.c
2656     +++ b/net/xfrm/xfrm_state.c
2657     @@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
2658     {
2659     static xfrm_address_t saddr_wildcard = { };
2660     struct net *net = xp_net(pol);
2661     - unsigned int h;
2662     + unsigned int h, h_wildcard;
2663     struct hlist_node *entry;
2664     struct xfrm_state *x, *x0, *to_put;
2665     int acquire_in_progress = 0;
2666     @@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
2667     if (best)
2668     goto found;
2669    
2670     - h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
2671     - hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
2672     + h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
2673     + hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
2674     if (x->props.family == family &&
2675     x->props.reqid == tmpl->reqid &&
2676     !(x->props.flags & XFRM_STATE_WILDRECV) &&
2677     diff --git a/security/keys/request_key.c b/security/keys/request_key.c
2678     index 0e04f72..ab70cab 100644
2679     --- a/security/keys/request_key.c
2680     +++ b/security/keys/request_key.c
2681     @@ -311,7 +311,8 @@ static int construct_alloc_key(struct key_type *type,
2682    
2683     set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
2684    
2685     - down_write(&dest_keyring->sem);
2686     + if (dest_keyring)
2687     + down_write(&dest_keyring->sem);
2688    
2689     /* attach the key to the destination keyring under lock, but we do need
2690     * to do another check just in case someone beat us to it whilst we
2691     @@ -322,10 +323,12 @@ static int construct_alloc_key(struct key_type *type,
2692     if (!IS_ERR(key_ref))
2693     goto key_already_present;
2694    
2695     - __key_link(dest_keyring, key);
2696     + if (dest_keyring)
2697     + __key_link(dest_keyring, key);
2698    
2699     mutex_unlock(&key_construction_mutex);
2700     - up_write(&dest_keyring->sem);
2701     + if (dest_keyring)
2702     + up_write(&dest_keyring->sem);
2703     mutex_unlock(&user->cons_lock);
2704     *_key = key;
2705     kleave(" = 0 [%d]", key_serial(key));
2706     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2707     index 8d24c91..9d62f29 100644
2708     --- a/security/selinux/hooks.c
2709     +++ b/security/selinux/hooks.c
2710     @@ -4648,7 +4648,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
2711     if (err)
2712     return err;
2713    
2714     - if (send_perm != 0)
2715     + if (!send_perm)
2716     return 0;
2717    
2718     err = sel_netport_sid(sk->sk_protocol,
2719     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2720     index 6c26afc..d76fe8f 100644
2721     --- a/sound/pci/hda/patch_realtek.c
2722     +++ b/sound/pci/hda/patch_realtek.c
2723     @@ -11710,6 +11710,7 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
2724     SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
2725     SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
2726     SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA),
2727     + SND_PCI_QUIRK(0x103c, 0x30f1, "HP TX25xx series", ALC268_TOSHIBA),
2728     SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
2729     SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA),
2730     SND_PCI_QUIRK(0x1179, 0xff50, "TOSHIBA A305", ALC268_TOSHIBA),
2731     diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
2732     index 19e3745..ee379cf 100644
2733     --- a/sound/usb/usbaudio.c
2734     +++ b/sound/usb/usbaudio.c
2735     @@ -3375,7 +3375,7 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip,
2736     [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface,
2737     [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface,
2738     [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface,
2739     - [QUIRK_MIDI_RAW] = snd_usb_create_midi_interface,
2740     + [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface,
2741     [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface,
2742     [QUIRK_MIDI_CME] = snd_usb_create_midi_interface,
2743     [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
2744     diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
2745     index 36e4f7a..8e7f789 100644
2746     --- a/sound/usb/usbaudio.h
2747     +++ b/sound/usb/usbaudio.h
2748     @@ -153,7 +153,7 @@ enum quirk_type {
2749     QUIRK_MIDI_YAMAHA,
2750     QUIRK_MIDI_MIDIMAN,
2751     QUIRK_MIDI_NOVATION,
2752     - QUIRK_MIDI_RAW,
2753     + QUIRK_MIDI_FASTLANE,
2754     QUIRK_MIDI_EMAGIC,
2755     QUIRK_MIDI_CME,
2756     QUIRK_MIDI_US122L,
2757     diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
2758     index 26bad37..2fb35cc 100644
2759     --- a/sound/usb/usbmidi.c
2760     +++ b/sound/usb/usbmidi.c
2761     @@ -1778,8 +1778,18 @@ int snd_usb_create_midi_interface(struct snd_usb_audio* chip,
2762     umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
2763     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2764     break;
2765     - case QUIRK_MIDI_RAW:
2766     + case QUIRK_MIDI_FASTLANE:
2767     umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
2768     + /*
2769     + * Interface 1 contains isochronous endpoints, but with the same
2770     + * numbers as in interface 0. Since it is interface 1 that the
2771     + * USB core has most recently seen, these descriptors are now
2772     + * associated with the endpoint numbers. This will foul up our
2773     + * attempts to submit bulk/interrupt URBs to the endpoints in
2774     + * interface 0, so we have to make sure that the USB core looks
2775     + * again at interface 0 by calling usb_set_interface() on it.
2776     + */
2777     + usb_set_interface(umidi->chip->dev, 0, 0);
2778     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2779     break;
2780     case QUIRK_MIDI_EMAGIC:
2781     diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
2782     index 5d8ef09..25162f6 100644
2783     --- a/sound/usb/usbquirks.h
2784     +++ b/sound/usb/usbquirks.h
2785     @@ -1858,7 +1858,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2786     .data = & (const struct snd_usb_audio_quirk[]) {
2787     {
2788     .ifnum = 0,
2789     - .type = QUIRK_MIDI_RAW
2790     + .type = QUIRK_MIDI_FASTLANE
2791     },
2792     {
2793     .ifnum = 1,
2794     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2795     index d85642e..9138d0d 100644
2796     --- a/virt/kvm/kvm_main.c
2797     +++ b/virt/kvm/kvm_main.c
2798     @@ -2315,6 +2315,7 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
2799     r = -ENOMEM;
2800     goto out_free_0;
2801     }
2802     + cpumask_clear(cpus_hardware_enabled);
2803    
2804     r = kvm_arch_hardware_setup();
2805     if (r < 0)