Magellan Linux

Annotation of /trunk/kernel26-alx/patches-2.6.27-r3/0124-2.6.27.25-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1176 - (hide annotations) (download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 7 months ago) by niro
File size: 60933 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 niro 1176 diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
2     index 0d53949..befe8d4 100644
3     --- a/Documentation/filesystems/ext4.txt
4     +++ b/Documentation/filesystems/ext4.txt
5     @@ -73,7 +73,7 @@ Mailing list: linux-ext4@vger.kernel.org
6     * extent format more robust in face of on-disk corruption due to magics,
7     * internal redunancy in tree
8     * improved file allocation (multi-block alloc)
9     -* fix 32000 subdirectory limit
10     +* lift 32000 subdirectory limit imposed by i_links_count[1]
11     * nsec timestamps for mtime, atime, ctime, create time
12     * inode version field on disk (NFSv4, Lustre)
13     * reduced e2fsck time via uninit_bg feature
14     @@ -88,6 +88,9 @@ Mailing list: linux-ext4@vger.kernel.org
15     * efficent new ordered mode in JBD2 and ext4(avoid using buffer head to force
16     the ordering)
17    
18     +[1] Filesystems with a block size of 1k may see a limit imposed by the
19     +directory hash tree having a maximum depth of two.
20     +
21     2.2 Candidate features for future inclusion
22    
23     * Online defrag (patches available but not well tested)
24     diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
25     index 71819bb..6ea90d7 100644
26     --- a/arch/sparc/include/asm/pil.h
27     +++ b/arch/sparc/include/asm/pil.h
28     @@ -18,5 +18,6 @@
29     #define PIL_SMP_CTX_NEW_VERSION 4
30     #define PIL_DEVICE_IRQ 5
31     #define PIL_SMP_CALL_FUNC_SNGL 6
32     +#define PIL_KGDB_CAPTURE 8
33    
34     #endif /* !(_SPARC64_PIL_H) */
35     diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
36     index ec81cde..0aaa086 100644
37     --- a/arch/sparc/include/asm/tlb_64.h
38     +++ b/arch/sparc/include/asm/tlb_64.h
39     @@ -58,6 +58,8 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i
40     static inline void tlb_flush_mmu(struct mmu_gather *mp)
41     {
42     if (mp->need_flush) {
43     + if (!mp->fullmm)
44     + flush_tlb_pending();
45     free_pages_and_swap_cache(mp->pages, mp->pages_nr);
46     mp->pages_nr = 0;
47     mp->need_flush = 0;
48     @@ -78,8 +80,6 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un
49    
50     if (mp->fullmm)
51     mp->fullmm = 0;
52     - else
53     - flush_tlb_pending();
54    
55     /* keep the page table cache within bounds */
56     check_pgt_cache();
57     diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
58     index f58c537..e0bfc51 100644
59     --- a/arch/sparc/kernel/of_device.c
60     +++ b/arch/sparc/kernel/of_device.c
61     @@ -223,8 +223,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
62    
63     static int of_bus_sbus_match(struct device_node *np)
64     {
65     - return !strcmp(np->name, "sbus") ||
66     - !strcmp(np->name, "sbi");
67     + struct device_node *dp = np;
68     +
69     + while (dp) {
70     + if (!strcmp(dp->name, "sbus") ||
71     + !strcmp(dp->name, "sbi"))
72     + return 1;
73     +
74     + /* Have a look at use_1to1_mapping(). We're trying
75     + * to match SBUS if that's the top-level bus and we
76     + * don't have some intervening real bus that provides
77     + * ranges based translations.
78     + */
79     + if (of_find_property(dp, "ranges", NULL) != NULL)
80     + break;
81     +
82     + dp = dp->parent;
83     + }
84     +
85     + return 0;
86     }
87    
88     static void of_bus_sbus_count_cells(struct device_node *child,
89     diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
90     index 7495bc7..0708a5b 100644
91     --- a/arch/sparc64/kernel/irq.c
92     +++ b/arch/sparc64/kernel/irq.c
93     @@ -318,17 +318,25 @@ static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
94     sun4u_irq_enable(virt_irq);
95     }
96    
97     +/* Don't do anything. The desc->status check for IRQ_DISABLED in
98     + * handler_irq() will skip the handler call and that will leave the
99     + * interrupt in the sent state. The next ->enable() call will hit the
100     + * ICLR register to reset the state machine.
101     + *
102     + * This scheme is necessary, instead of clearing the Valid bit in the
103     + * IMAP register, to handle the case of IMAP registers being shared by
104     + * multiple INOs (and thus ICLR registers). Since we use a different
105     + * virtual IRQ for each shared IMAP instance, the generic code thinks
106     + * there is only one user so it prematurely calls ->disable() on
107     + * free_irq().
108     + *
109     + * We have to provide an explicit ->disable() method instead of using
110     + * NULL to get the default. The reason is that if the generic code
111     + * sees that, it also hooks up a default ->shutdown method which
112     + * invokes ->mask() which we do not want. See irq_chip_set_defaults().
113     + */
114     static void sun4u_irq_disable(unsigned int virt_irq)
115     {
116     - struct irq_handler_data *data = get_irq_chip_data(virt_irq);
117     -
118     - if (likely(data)) {
119     - unsigned long imap = data->imap;
120     - unsigned long tmp = upa_readq(imap);
121     -
122     - tmp &= ~IMAP_VALID;
123     - upa_writeq(tmp, imap);
124     - }
125     }
126    
127     static void sun4u_irq_eoi(unsigned int virt_irq)
128     @@ -739,7 +747,8 @@ void handler_irq(int irq, struct pt_regs *regs)
129    
130     desc = irq_desc + virt_irq;
131    
132     - desc->handle_irq(virt_irq, desc);
133     + if (!(desc->status & IRQ_DISABLED))
134     + desc->handle_irq(virt_irq, desc);
135    
136     bucket_pa = next_pa;
137     }
138     diff --git a/arch/sparc64/kernel/kgdb.c b/arch/sparc64/kernel/kgdb.c
139     index fefbe6d..f5a0fd4 100644
140     --- a/arch/sparc64/kernel/kgdb.c
141     +++ b/arch/sparc64/kernel/kgdb.c
142     @@ -108,7 +108,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
143     }
144    
145     #ifdef CONFIG_SMP
146     -void smp_kgdb_capture_client(struct pt_regs *regs)
147     +void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
148     {
149     unsigned long flags;
150    
151     diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
152     index 100ebd5..d342723 100644
153     --- a/arch/sparc64/kernel/of_device.c
154     +++ b/arch/sparc64/kernel/of_device.c
155     @@ -278,8 +278,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
156    
157     static int of_bus_sbus_match(struct device_node *np)
158     {
159     - return !strcmp(np->name, "sbus") ||
160     - !strcmp(np->name, "sbi");
161     + struct device_node *dp = np;
162     +
163     + while (dp) {
164     + if (!strcmp(dp->name, "sbus") ||
165     + !strcmp(dp->name, "sbi"))
166     + return 1;
167     +
168     + /* Have a look at use_1to1_mapping(). We're trying
169     + * to match SBUS if that's the top-level bus and we
170     + * don't have some intervening real bus that provides
171     + * ranges based translations.
172     + */
173     + if (of_find_property(dp, "ranges", NULL) != NULL)
174     + break;
175     +
176     + dp = dp->parent;
177     + }
178     +
179     + return 0;
180     }
181    
182     static void of_bus_sbus_count_cells(struct device_node *child,
183     diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
184     index 09a5ec2..d498c60 100644
185     --- a/arch/sparc64/kernel/pci_common.c
186     +++ b/arch/sparc64/kernel/pci_common.c
187     @@ -368,7 +368,7 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
188     const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL);
189    
190     if (vdma) {
191     - struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
192     + struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
193    
194     if (!rp) {
195     prom_printf("Cannot allocate IOMMU resource.\n");
196     diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
197     index 2be166c..a0ad401 100644
198     --- a/arch/sparc64/kernel/smp.c
199     +++ b/arch/sparc64/kernel/smp.c
200     @@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
201     while (!cpu_isset(cpuid, smp_commenced_mask))
202     rmb();
203    
204     - ipi_call_lock();
205     + ipi_call_lock_irq();
206     cpu_set(cpuid, cpu_online_map);
207     - ipi_call_unlock();
208     + ipi_call_unlock_irq();
209    
210     /* idle thread is expected to have preempt disabled */
211     preempt_disable();
212     @@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
213     * If the address space is non-shared (ie. mm->count == 1) we avoid
214     * cross calls when we want to flush the currently running process's
215     * tlb state. This is done by clearing all cpu bits except the current
216     - * processor's in current->active_mm->cpu_vm_mask and performing the
217     + * processor's in current->mm->cpu_vm_mask and performing the
218     * flush locally only. This will force any subsequent cpus which run
219     * this task to flush the context from the local tlb if the process
220     * migrates to another cpu (again).
221     @@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
222     u32 ctx = CTX_HWBITS(mm->context);
223     int cpu = get_cpu();
224    
225     - if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
226     + if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
227     mm->cpu_vm_mask = cpumask_of_cpu(cpu);
228     else
229     smp_cross_call_masked(&xcall_flush_tlb_pending,
230     diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
231     index 1ade3d6..89bf646 100644
232     --- a/arch/sparc64/kernel/ttable.S
233     +++ b/arch/sparc64/kernel/ttable.S
234     @@ -63,7 +63,13 @@ tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
235     #else
236     tl0_irq6: BTRAP(0x46)
237     #endif
238     -tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
239     +tl0_irq7: BTRAP(0x47)
240     +#ifdef CONFIG_KGDB
241     +tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8)
242     +#else
243     +tl0_irq8: BTRAP(0x48)
244     +#endif
245     +tl0_irq9: BTRAP(0x49)
246     tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
247     tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
248     tl0_irq15: TRAP_IRQ(handler_irq, 15)
249     diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
250     index 86773e8..f1d76cb 100644
251     --- a/arch/sparc64/mm/ultra.S
252     +++ b/arch/sparc64/mm/ultra.S
253     @@ -681,28 +681,8 @@ xcall_new_mmu_context_version:
254     #ifdef CONFIG_KGDB
255     .globl xcall_kgdb_capture
256     xcall_kgdb_capture:
257     -661: rdpr %pstate, %g2
258     - wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
259     - .section .sun4v_2insn_patch, "ax"
260     - .word 661b
261     - nop
262     - nop
263     - .previous
264     -
265     - rdpr %pil, %g2
266     - wrpr %g0, 15, %pil
267     - sethi %hi(109f), %g7
268     - ba,pt %xcc, etrap_irq
269     -109: or %g7, %lo(109b), %g7
270     -#ifdef CONFIG_TRACE_IRQFLAGS
271     - call trace_hardirqs_off
272     - nop
273     -#endif
274     - call smp_kgdb_capture_client
275     - add %sp, PTREGS_OFF, %o0
276     - /* Has to be a non-v9 branch due to the large distance. */
277     - ba rtrap_xcall
278     - ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
279     + wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
280     + retry
281     #endif
282    
283     #endif /* CONFIG_SMP */
284     diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
285     index 6d5a3c4..2781331 100644
286     --- a/arch/x86/kernel/setup.c
287     +++ b/arch/x86/kernel/setup.c
288     @@ -730,6 +730,9 @@ void __init setup_arch(char **cmdline_p)
289    
290     finish_e820_parsing();
291    
292     + if (efi_enabled)
293     + efi_init();
294     +
295     dmi_scan_machine();
296    
297     dmi_check_system(bad_bios_dmi_table);
298     @@ -743,8 +746,6 @@ void __init setup_arch(char **cmdline_p)
299     insert_resource(&iomem_resource, &data_resource);
300     insert_resource(&iomem_resource, &bss_resource);
301    
302     - if (efi_enabled)
303     - efi_init();
304    
305     #ifdef CONFIG_X86_32
306     if (ppro_with_ram_bug()) {
307     diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
308     index 8f307d9..f46c340 100644
309     --- a/arch/x86/mm/hugetlbpage.c
310     +++ b/arch/x86/mm/hugetlbpage.c
311     @@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
312     unsigned long sbase = saddr & PUD_MASK;
313     unsigned long s_end = sbase + PUD_SIZE;
314    
315     + /* Allow segments to share if only one is marked locked */
316     + unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
317     + unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
318     +
319     /*
320     * match the virtual addresses, permission and the alignment of the
321     * page table page.
322     */
323     if (pmd_index(addr) != pmd_index(saddr) ||
324     - vma->vm_flags != svma->vm_flags ||
325     + vm_flags != svm_flags ||
326     sbase < svma->vm_start || svma->vm_end < s_end)
327     return 0;
328    
329     diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
330     index 7c3b8dc..5468c19 100644
331     --- a/arch/x86/mm/pageattr.c
332     +++ b/arch/x86/mm/pageattr.c
333     @@ -565,6 +565,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
334     ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
335     pgprot_val(ref_prot) |= _PAGE_PRESENT;
336     __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
337     +
338     + /*
339     + * Intel Atom errata AAH41 workaround.
340     + *
341     + * The real fix should be in hw or in a microcode update, but
342     + * we also probabilistically try to reduce the window of having
343     + * a large TLB mixed with 4K TLBs while instruction fetches are
344     + * going on.
345     + */
346     + __flush_tlb_all();
347     +
348     base = NULL;
349    
350     out_unlock:
351     diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
352     index 76d49eb..8a06160 100644
353     --- a/arch/x86/pci/mmconfig-shared.c
354     +++ b/arch/x86/pci/mmconfig-shared.c
355     @@ -255,7 +255,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
356     if (!fixmem32)
357     return AE_OK;
358     if ((mcfg_res->start >= fixmem32->address) &&
359     - (mcfg_res->end <= (fixmem32->address +
360     + (mcfg_res->end < (fixmem32->address +
361     fixmem32->address_length))) {
362     mcfg_res->flags = 1;
363     return AE_CTRL_TERMINATE;
364     @@ -272,7 +272,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
365     return AE_OK;
366    
367     if ((mcfg_res->start >= address.minimum) &&
368     - (mcfg_res->end <= (address.minimum + address.address_length))) {
369     + (mcfg_res->end < (address.minimum + address.address_length))) {
370     mcfg_res->flags = 1;
371     return AE_CTRL_TERMINATE;
372     }
373     @@ -298,7 +298,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
374     struct resource mcfg_res;
375    
376     mcfg_res.start = start;
377     - mcfg_res.end = end;
378     + mcfg_res.end = end - 1;
379     mcfg_res.flags = 0;
380    
381     acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
382     diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
383     index 81b40ed..5639e27 100644
384     --- a/drivers/acpi/processor_idle.c
385     +++ b/drivers/acpi/processor_idle.c
386     @@ -303,6 +303,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
387     struct acpi_processor_power *pwr = &pr->power;
388     u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
389    
390     + if (boot_cpu_has(X86_FEATURE_AMDC1E))
391     + type = ACPI_STATE_C1;
392     +
393     /*
394     * Check, if one of the previous states already marked the lapic
395     * unstable
396     @@ -1154,6 +1157,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
397     switch (cx->type) {
398     case ACPI_STATE_C1:
399     cx->valid = 1;
400     + acpi_timer_check_state(i, pr, cx);
401     break;
402    
403     case ACPI_STATE_C2:
404     @@ -1468,20 +1472,22 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
405    
406     /* Do not access any ACPI IO ports in suspend path */
407     if (acpi_idle_suspend) {
408     - acpi_safe_halt();
409     local_irq_enable();
410     + cpu_relax();
411     return 0;
412     }
413    
414     if (pr->flags.bm_check)
415     acpi_idle_update_bm_rld(pr, cx);
416    
417     + acpi_state_timer_broadcast(pr, cx, 1);
418     t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
419     acpi_idle_do_entry(cx);
420     t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
421    
422     local_irq_enable();
423     cx->usage++;
424     + acpi_state_timer_broadcast(pr, cx, 0);
425    
426     return ticks_elapsed_in_us(t1, t2);
427     }
428     diff --git a/drivers/char/random.c b/drivers/char/random.c
429     index 7ce1ac4..201b2c1 100644
430     --- a/drivers/char/random.c
431     +++ b/drivers/char/random.c
432     @@ -1626,15 +1626,20 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
433     * value is not cryptographically secure but for several uses the cost of
434     * depleting entropy is too high
435     */
436     +DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
437     unsigned int get_random_int(void)
438     {
439     - /*
440     - * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
441     - * every second, from the entropy pool (and thus creates a limited
442     - * drain on it), and uses halfMD4Transform within the second. We
443     - * also mix it with jiffies and the PID:
444     - */
445     - return secure_ip_id((__force __be32)(current->pid + jiffies));
446     + struct keydata *keyptr;
447     + __u32 *hash = get_cpu_var(get_random_int_hash);
448     + int ret;
449     +
450     + keyptr = get_keyptr();
451     + hash[0] += current->pid + jiffies + get_cycles();
452     +
453     + ret = half_md4_transform(hash, keyptr->secret);
454     + put_cpu_var(get_random_int_hash);
455     +
456     + return ret;
457     }
458    
459     /*
460     diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
461     index 68f052b..2db432d 100644
462     --- a/drivers/char/tpm/tpm_bios.c
463     +++ b/drivers/char/tpm/tpm_bios.c
464     @@ -214,7 +214,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
465     unsigned char * event_entry)
466     {
467     const char *name = "";
468     - char data[40] = "";
469     + /* 41 so there is room for 40 data and 1 nul */
470     + char data[41] = "";
471     int i, n_len = 0, d_len = 0;
472     struct tcpa_pc_event *pc_event;
473    
474     diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
475     index ed7859f..affee01 100644
476     --- a/drivers/hwmon/lm78.c
477     +++ b/drivers/hwmon/lm78.c
478     @@ -178,7 +178,7 @@ static struct platform_driver lm78_isa_driver = {
479     .name = "lm78",
480     },
481     .probe = lm78_isa_probe,
482     - .remove = lm78_isa_remove,
483     + .remove = __devexit_p(lm78_isa_remove),
484     };
485    
486    
487     diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
488     index 53526d9..dedf96b 100644
489     --- a/drivers/media/video/cx88/cx88-input.c
490     +++ b/drivers/media/video/cx88/cx88-input.c
491     @@ -48,8 +48,7 @@ struct cx88_IR {
492    
493     /* poll external decoder */
494     int polling;
495     - struct work_struct work;
496     - struct timer_list timer;
497     + struct delayed_work work;
498     u32 gpio_addr;
499     u32 last_gpio;
500     u32 mask_keycode;
501     @@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
502     }
503     }
504    
505     -static void ir_timer(unsigned long data)
506     -{
507     - struct cx88_IR *ir = (struct cx88_IR *)data;
508     -
509     - schedule_work(&ir->work);
510     -}
511     -
512     static void cx88_ir_work(struct work_struct *work)
513     {
514     - struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
515     + struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work);
516    
517     cx88_ir_handle_key(ir);
518     - mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
519     + schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
520     }
521    
522     void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
523     {
524     if (ir->polling) {
525     - setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
526     - INIT_WORK(&ir->work, cx88_ir_work);
527     - schedule_work(&ir->work);
528     + INIT_DELAYED_WORK(&ir->work, cx88_ir_work);
529     + schedule_delayed_work(&ir->work, 0);
530     }
531     if (ir->sampling) {
532     core->pci_irqmask |= PCI_INT_IR_SMPINT;
533     @@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
534     core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
535     }
536    
537     - if (ir->polling) {
538     - del_timer_sync(&ir->timer);
539     - flush_scheduled_work();
540     - }
541     + if (ir->polling)
542     + cancel_delayed_work_sync(&ir->work);
543     }
544    
545     /* ---------------------------------------------------------------------- */
546     diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
547     index 2486a65..ba91aee 100644
548     --- a/drivers/net/bnx2.c
549     +++ b/drivers/net/bnx2.c
550     @@ -2574,6 +2574,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
551     /* Tell compiler that status block fields can change. */
552     barrier();
553     cons = *bnapi->hw_tx_cons_ptr;
554     + barrier();
555     if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
556     cons++;
557     return cons;
558     @@ -2849,6 +2850,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
559     /* Tell compiler that status block fields can change. */
560     barrier();
561     cons = *bnapi->hw_rx_cons_ptr;
562     + barrier();
563     if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
564     cons++;
565     return cons;
566     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
567     index 4489e58..e929e61 100644
568     --- a/drivers/net/bonding/bond_alb.c
569     +++ b/drivers/net/bonding/bond_alb.c
570     @@ -1716,9 +1716,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
571     }
572     }
573    
574     - write_unlock_bh(&bond->curr_slave_lock);
575     - read_unlock(&bond->lock);
576     -
577     if (swap_slave) {
578     alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
579     alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
580     @@ -1726,16 +1723,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
581     alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
582     bond->alb_info.rlb_enabled);
583    
584     + read_lock(&bond->lock);
585     alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
586     if (bond->alb_info.rlb_enabled) {
587     /* inform clients mac address has changed */
588     rlb_req_update_slave_clients(bond, bond->curr_active_slave);
589     }
590     + read_unlock(&bond->lock);
591     }
592    
593     - read_lock(&bond->lock);
594     - write_lock_bh(&bond->curr_slave_lock);
595     -
596     return 0;
597     }
598    
599     diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
600     index 6b96357..1f60117 100644
601     --- a/drivers/net/e1000/e1000_main.c
602     +++ b/drivers/net/e1000/e1000_main.c
603     @@ -4133,8 +4133,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
604     PCI_DMA_FROMDEVICE);
605    
606     length = le16_to_cpu(rx_desc->length);
607     -
608     - if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
609     + /* !EOP means multiple descriptors were used to store a single
610     + * packet, also make sure the frame isn't just CRC only */
611     + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
612     /* All receives must fit into a single buffer */
613     E1000_DBG("%s: Receive packet consumed multiple"
614     " buffers\n", netdev->name);
615     diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
616     index 89964fa..23110d8 100644
617     --- a/drivers/net/igb/igb_ethtool.c
618     +++ b/drivers/net/igb/igb_ethtool.c
619     @@ -2029,6 +2029,10 @@ static struct ethtool_ops igb_ethtool_ops = {
620     .get_ethtool_stats = igb_get_ethtool_stats,
621     .get_coalesce = igb_get_coalesce,
622     .set_coalesce = igb_set_coalesce,
623     + .get_flags = ethtool_op_get_flags,
624     +#ifdef CONFIG_IGB_LRO
625     + .set_flags = ethtool_op_set_flags,
626     +#endif
627     };
628    
629     void igb_set_ethtool_ops(struct net_device *netdev)
630     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
631     index 4239450..1aa0388 100644
632     --- a/drivers/net/macvlan.c
633     +++ b/drivers/net/macvlan.c
634     @@ -328,7 +328,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
635     const struct macvlan_dev *vlan = netdev_priv(dev);
636     struct net_device *lowerdev = vlan->lowerdev;
637    
638     - if (lowerdev->ethtool_ops->get_rx_csum == NULL)
639     + if (lowerdev->ethtool_ops == NULL ||
640     + lowerdev->ethtool_ops->get_rx_csum == NULL)
641     return 0;
642     return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
643     }
644     diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
645     index d6524db..7d41ec8 100644
646     --- a/drivers/net/myri10ge/myri10ge.c
647     +++ b/drivers/net/myri10ge/myri10ge.c
648     @@ -2379,6 +2379,7 @@ static int myri10ge_open(struct net_device *dev)
649     lro_mgr->lro_arr = ss->rx_done.lro_desc;
650     lro_mgr->get_frag_header = myri10ge_get_frag_header;
651     lro_mgr->max_aggr = myri10ge_lro_max_pkts;
652     + lro_mgr->frag_align_pad = 2;
653     if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
654     lro_mgr->max_aggr = MAX_SKB_FRAGS;
655    
656     diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
657     index a0537f0..6221cdc 100644
658     --- a/drivers/scsi/3w-xxxx.c
659     +++ b/drivers/scsi/3w-xxxx.c
660     @@ -6,7 +6,7 @@
661     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
662     Brad Strand <linux@3ware.com>
663    
664     - Copyright (C) 1999-2007 3ware Inc.
665     + Copyright (C) 1999-2009 3ware Inc.
666    
667     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
668     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
669     @@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
670     {
671     dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
672    
673     - scsi_dma_unmap(cmd);
674     + if (cmd->SCp.phase == TW_PHASE_SGLIST)
675     + scsi_dma_unmap(cmd);
676     } /* End tw_unmap_scsi_data() */
677    
678     /* This function will reset a device extension */
679     diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
680     index 0742e68..e938615 100644
681     --- a/drivers/scsi/3w-xxxx.h
682     +++ b/drivers/scsi/3w-xxxx.h
683     @@ -6,7 +6,7 @@
684     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
685     Brad Strand <linux@3ware.com>
686    
687     - Copyright (C) 1999-2007 3ware Inc.
688     + Copyright (C) 1999-2009 3ware Inc.
689    
690     Kernel compatiblity By: Andre Hedrick <andre@suse.com>
691     Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
692     diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
693     index 2b7531d..08eefec 100644
694     --- a/drivers/serial/icom.c
695     +++ b/drivers/serial/icom.c
696     @@ -1482,8 +1482,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
697    
698     free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
699     iounmap(icom_adapter->base_addr);
700     - icom_free_adapter(icom_adapter);
701     pci_release_regions(icom_adapter->pci_dev);
702     + icom_free_adapter(icom_adapter);
703     }
704    
705     static void icom_kref_release(struct kref *kref)
706     diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
707     index 32e7acb..3485510 100644
708     --- a/drivers/serial/mpc52xx_uart.c
709     +++ b/drivers/serial/mpc52xx_uart.c
710     @@ -1000,7 +1000,7 @@ mpc52xx_console_setup(struct console *co, char *options)
711     pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
712     co, co->index, options);
713    
714     - if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
715     + if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
716     pr_debug("PSC%x out of range\n", co->index);
717     return -EINVAL;
718     }
719     diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
720     index 8017f1c..eb3b103 100644
721     --- a/drivers/usb/host/isp1760-hcd.c
722     +++ b/drivers/usb/host/isp1760-hcd.c
723     @@ -1645,6 +1645,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
724     u32 reg_base, or_reg, skip_reg;
725     unsigned long flags;
726     struct ptd ptd;
727     + packet_enqueue *pe;
728    
729     switch (usb_pipetype(urb->pipe)) {
730     case PIPE_ISOCHRONOUS:
731     @@ -1656,6 +1657,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
732     reg_base = INT_REGS_OFFSET;
733     or_reg = HC_INT_IRQ_MASK_OR_REG;
734     skip_reg = HC_INT_PTD_SKIPMAP_REG;
735     + pe = enqueue_an_INT_packet;
736     break;
737    
738     default:
739     @@ -1663,6 +1665,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
740     reg_base = ATL_REGS_OFFSET;
741     or_reg = HC_ATL_IRQ_MASK_OR_REG;
742     skip_reg = HC_ATL_PTD_SKIPMAP_REG;
743     + pe = enqueue_an_ATL_packet;
744     break;
745     }
746    
747     @@ -1674,6 +1677,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
748     u32 skip_map;
749     u32 or_map;
750     struct isp1760_qtd *qtd;
751     + struct isp1760_qh *qh = ints->qh;
752    
753     skip_map = isp1760_readl(hcd->regs + skip_reg);
754     skip_map |= 1 << i;
755     @@ -1686,8 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
756     priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
757     + i * sizeof(ptd), sizeof(ptd));
758     qtd = ints->qtd;
759     -
760     - clean_up_qtdlist(qtd);
761     + qtd = clean_up_qtdlist(qtd);
762    
763     free_mem(priv, ints->payload);
764    
765     @@ -1698,7 +1701,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
766     ints->payload = 0;
767    
768     isp1760_urb_done(priv, urb, status);
769     + if (qtd)
770     + pe(hcd, qh, qtd);
771     break;
772     +
773     + } else if (ints->qtd) {
774     + struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
775     +
776     + for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
777     + if (qtd->urb == urb) {
778     + prev_qtd->hw_next = clean_up_qtdlist(qtd);
779     + isp1760_urb_done(priv, urb, status);
780     + break;
781     + }
782     + prev_qtd = qtd;
783     + }
784     + /* we found the urb before the end of the list */
785     + if (qtd)
786     + break;
787     }
788     ints++;
789     }
790     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
791     index eadbee3..1985721 100644
792     --- a/fs/ext4/ext4.h
793     +++ b/fs/ext4/ext4.h
794     @@ -248,6 +248,30 @@ struct flex_groups {
795     #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
796     #define EXT4_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
797    
798     +/* Flags that should be inherited by new inodes from their parent. */
799     +#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
800     + EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
801     + EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
802     + EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
803     + EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
804     +
805     +/* Flags that are appropriate for regular files (all but dir-specific ones). */
806     +#define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL))
807     +
808     +/* Flags that are appropriate for non-directories/regular files. */
809     +#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
810     +
811     +/* Mask out flags that are inappropriate for the given type of inode. */
812     +static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
813     +{
814     + if (S_ISDIR(mode))
815     + return flags;
816     + else if (S_ISREG(mode))
817     + return flags & EXT4_REG_FLMASK;
818     + else
819     + return flags & EXT4_OTHER_FLMASK;
820     +}
821     +
822     /*
823     * Inode dynamic state flags
824     */
825     @@ -255,6 +279,7 @@ struct flex_groups {
826     #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
827     #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
828     #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
829     +#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
830    
831     /* Used to pass group descriptor data when online resize is done */
832     struct ext4_new_group_input {
833     @@ -302,7 +327,9 @@ struct ext4_new_group_data {
834     #define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
835     #define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
836     #define EXT4_IOC_MIGRATE _IO('f', 9)
837     + /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
838     /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
839     +#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
840    
841     /*
842     * ioctl commands in 32 bit emulation
843     @@ -1078,6 +1105,7 @@ extern int ext4_can_truncate(struct inode *inode);
844     extern void ext4_truncate (struct inode *);
845     extern void ext4_set_inode_flags(struct inode *);
846     extern void ext4_get_inode_flags(struct ext4_inode_info *);
847     +extern int ext4_alloc_da_blocks(struct inode *inode);
848     extern void ext4_set_aops(struct inode *inode);
849     extern int ext4_writepage_trans_blocks(struct inode *);
850     extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
851     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
852     index b24d3c5..f99635a 100644
853     --- a/fs/ext4/extents.c
854     +++ b/fs/ext4/extents.c
855     @@ -1118,7 +1118,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
856     struct ext4_extent_idx *ix;
857     struct ext4_extent *ex;
858     ext4_fsblk_t block;
859     - int depth, ee_len;
860     + int depth; /* Note, NOT eh_depth; depth from top of tree */
861     + int ee_len;
862    
863     BUG_ON(path == NULL);
864     depth = path->p_depth;
865     @@ -1177,7 +1178,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
866     if (bh == NULL)
867     return -EIO;
868     eh = ext_block_hdr(bh);
869     - if (ext4_ext_check_header(inode, eh, depth)) {
870     + /* subtract from p_depth to get proper eh_depth */
871     + if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
872     put_bh(bh);
873     return -EIO;
874     }
875     @@ -1631,11 +1633,13 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
876     {
877     struct ext4_ext_cache *cex;
878     BUG_ON(len == 0);
879     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
880     cex = &EXT4_I(inode)->i_cached_extent;
881     cex->ec_type = type;
882     cex->ec_block = block;
883     cex->ec_len = len;
884     cex->ec_start = start;
885     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
886     }
887    
888     /*
889     @@ -1692,12 +1696,17 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
890     struct ext4_extent *ex)
891     {
892     struct ext4_ext_cache *cex;
893     + int ret = EXT4_EXT_CACHE_NO;
894    
895     + /*
896     + * We borrow i_block_reservation_lock to protect i_cached_extent
897     + */
898     + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
899     cex = &EXT4_I(inode)->i_cached_extent;
900    
901     /* has cache valid data? */
902     if (cex->ec_type == EXT4_EXT_CACHE_NO)
903     - return EXT4_EXT_CACHE_NO;
904     + goto errout;
905    
906     BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
907     cex->ec_type != EXT4_EXT_CACHE_EXTENT);
908     @@ -1708,11 +1717,11 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
909     ext_debug("%u cached by %u:%u:%llu\n",
910     block,
911     cex->ec_block, cex->ec_len, cex->ec_start);
912     - return cex->ec_type;
913     + ret = cex->ec_type;
914     }
915     -
916     - /* not in cache */
917     - return EXT4_EXT_CACHE_NO;
918     +errout:
919     + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
920     + return ret;
921     }
922    
923     /*
924     @@ -2668,6 +2677,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
925     if (allocated > max_blocks)
926     allocated = max_blocks;
927     set_buffer_unwritten(bh_result);
928     + bh_result->b_bdev = inode->i_sb->s_bdev;
929     + bh_result->b_blocknr = newblock;
930     goto out2;
931     }
932    
933     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
934     index 430eb79..c0d02f8 100644
935     --- a/fs/ext4/file.c
936     +++ b/fs/ext4/file.c
937     @@ -33,9 +33,14 @@
938     */
939     static int ext4_release_file (struct inode * inode, struct file * filp)
940     {
941     + if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
942     + ext4_alloc_da_blocks(inode);
943     + EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
944     + }
945     /* if we are the last writer on the inode, drop the block reservation */
946     if ((filp->f_mode & FMODE_WRITE) &&
947     - (atomic_read(&inode->i_writecount) == 1))
948     + (atomic_read(&inode->i_writecount) == 1) &&
949     + !EXT4_I(inode)->i_reserved_data_blocks)
950     {
951     down_write(&EXT4_I(inode)->i_data_sem);
952     ext4_discard_reservation(inode);
953     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
954     index cce841f..e8754fd 100644
955     --- a/fs/ext4/ialloc.c
956     +++ b/fs/ext4/ialloc.c
957     @@ -188,7 +188,7 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
958     struct ext4_group_desc * gdp;
959     struct ext4_super_block * es;
960     struct ext4_sb_info *sbi;
961     - int fatal = 0, err;
962     + int fatal = 0, err, cleared;
963     ext4_group_t flex_group;
964    
965     if (atomic_read(&inode->i_count) > 1) {
966     @@ -242,10 +242,12 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
967     goto error_return;
968    
969     /* Ok, now we can actually update the inode bitmaps.. */
970     - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
971     - bit, bitmap_bh->b_data))
972     - ext4_error (sb, "ext4_free_inode",
973     - "bit already cleared for inode %lu", ino);
974     + spin_lock(sb_bgl_lock(sbi, block_group));
975     + cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
976     + spin_unlock(sb_bgl_lock(sbi, block_group));
977     + if (!cleared)
978     + ext4_error(sb, "ext4_free_inode",
979     + "bit already cleared for inode %lu", ino);
980     else {
981     gdp = ext4_get_group_desc (sb, block_group, &bh2);
982    
983     @@ -685,6 +687,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
984     struct inode *ret;
985     ext4_group_t i;
986     int free = 0;
987     + static int once = 1;
988     ext4_group_t flex_group;
989    
990     /* Cannot create files in a deleted directory */
991     @@ -704,10 +707,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
992     ret2 = find_group_flex(sb, dir, &group);
993     if (ret2 == -1) {
994     ret2 = find_group_other(sb, dir, &group);
995     - if (ret2 == 0 && printk_ratelimit())
996     + if (ret2 == 0 && once) {
997     + once = 0;
998     printk(KERN_NOTICE "ext4: find_group_flex "
999     "failed, fallback succeeded dir %lu\n",
1000     dir->i_ino);
1001     + }
1002     }
1003     goto got_group;
1004     }
1005     @@ -861,16 +866,12 @@ got:
1006     ei->i_disksize = 0;
1007    
1008     /*
1009     - * Don't inherit extent flag from directory. We set extent flag on
1010     - * newly created directory and file only if -o extent mount option is
1011     - * specified
1012     + * Don't inherit extent flag from directory, amongst others. We set
1013     + * extent flag on newly created directory and file only if -o extent
1014     + * mount option is specified
1015     */
1016     - ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
1017     - if (S_ISLNK(mode))
1018     - ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
1019     - /* dirsync only applies to directories */
1020     - if (!S_ISDIR(mode))
1021     - ei->i_flags &= ~EXT4_DIRSYNC_FL;
1022     + ei->i_flags =
1023     + ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1024     ei->i_file_acl = 0;
1025     ei->i_dtime = 0;
1026     ei->i_block_alloc_info = NULL;
1027     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1028     index 63b911b..aeebfc2 100644
1029     --- a/fs/ext4/inode.c
1030     +++ b/fs/ext4/inode.c
1031     @@ -1046,6 +1046,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1032     EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1033     EXT4_I(inode)->i_allocated_meta_blocks = 0;
1034     spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1035     +
1036     + /*
1037     + * If we have done all the pending block allocations and if
1038     + * there aren't any writers on the inode, we can discard the
1039     + * inode's preallocations.
1040     + */
1041     + if (!total && (atomic_read(&inode->i_writecount) == 0))
1042     + ext4_discard_reservation(inode);
1043     }
1044    
1045     /*
1046     @@ -1077,6 +1085,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1047     int retval;
1048    
1049     clear_buffer_mapped(bh);
1050     + clear_buffer_unwritten(bh);
1051    
1052     /*
1053     * Try to see if we can get the block without requesting
1054     @@ -1107,6 +1116,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1055     return retval;
1056    
1057     /*
1058     + * When we call get_blocks without the create flag, the
1059     + * BH_Unwritten flag could have gotten set if the blocks
1060     + * requested were part of a uninitialized extent. We need to
1061     + * clear this flag now that we are committed to convert all or
1062     + * part of the uninitialized extent to be an initialized
1063     + * extent. This is because we need to avoid the combination
1064     + * of BH_Unwritten and BH_Mapped flags being simultaneously
1065     + * set on the buffer_head.
1066     + */
1067     + clear_buffer_unwritten(bh);
1068     +
1069     + /*
1070     * New blocks allocate and/or writing to uninitialized extent
1071     * will possibly result in updating i_data, so we take
1072     * the write lock of i_data_sem, and call get_blocks()
1073     @@ -2097,6 +2118,10 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1074     struct buffer_head *bh_result, int create)
1075     {
1076     int ret = 0;
1077     + sector_t invalid_block = ~((sector_t) 0xffff);
1078     +
1079     + if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1080     + invalid_block = ~0;
1081    
1082     BUG_ON(create == 0);
1083     BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1084     @@ -2118,11 +2143,18 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1085     /* not enough space to reserve */
1086     return ret;
1087    
1088     - map_bh(bh_result, inode->i_sb, 0);
1089     + map_bh(bh_result, inode->i_sb, invalid_block);
1090     set_buffer_new(bh_result);
1091     set_buffer_delay(bh_result);
1092     } else if (ret > 0) {
1093     bh_result->b_size = (ret << inode->i_blkbits);
1094     + /*
1095     + * With sub-block writes into unwritten extents
1096     + * we also need to mark the buffer as new so that
1097     + * the unwritten parts of the buffer gets correctly zeroed.
1098     + */
1099     + if (buffer_unwritten(bh_result))
1100     + set_buffer_new(bh_result);
1101     ret = 0;
1102     }
1103    
1104     @@ -2585,6 +2617,48 @@ out:
1105     return;
1106     }
1107    
1108     +/*
1109     + * Force all delayed allocation blocks to be allocated for a given inode.
1110     + */
1111     +int ext4_alloc_da_blocks(struct inode *inode)
1112     +{
1113     + if (!EXT4_I(inode)->i_reserved_data_blocks &&
1114     + !EXT4_I(inode)->i_reserved_meta_blocks)
1115     + return 0;
1116     +
1117     + /*
1118     + * We do something simple for now. The filemap_flush() will
1119     + * also start triggering a write of the data blocks, which is
1120     + * not strictly speaking necessary (and for users of
1121     + * laptop_mode, not even desirable). However, to do otherwise
1122     + * would require replicating code paths in:
1123     + *
1124     + * ext4_da_writepages() ->
1125     + * write_cache_pages() ---> (via passed in callback function)
1126     + * __mpage_da_writepage() -->
1127     + * mpage_add_bh_to_extent()
1128     + * mpage_da_map_blocks()
1129     + *
1130     + * The problem is that write_cache_pages(), located in
1131     + * mm/page-writeback.c, marks pages clean in preparation for
1132     + * doing I/O, which is not desirable if we're not planning on
1133     + * doing I/O at all.
1134     + *
1135     + * We could call write_cache_pages(), and then redirty all of
1136     + * the pages by calling redirty_page_for_writeback() but that
1137     + * would be ugly in the extreme. So instead we would need to
1138     + * replicate parts of the code in the above functions,
1139     + * simplifying them becuase we wouldn't actually intend to
1140     + * write out the pages, but rather only collect contiguous
1141     + * logical block extents, call the multi-block allocator, and
1142     + * then update the buffer heads with the block allocations.
1143     + *
1144     + * For now, though, we'll cheat by calling filemap_flush(),
1145     + * which will map the blocks, and start the I/O, but not
1146     + * actually wait for the I/O to complete.
1147     + */
1148     + return filemap_flush(inode->i_mapping);
1149     +}
1150    
1151     /*
1152     * bmap() is special. It gets used by applications such as lilo and by
1153     @@ -3594,6 +3668,9 @@ void ext4_truncate(struct inode *inode)
1154     if (!ext4_can_truncate(inode))
1155     return;
1156    
1157     + if (inode->i_size == 0)
1158     + ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
1159     +
1160     if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1161     ext4_ext_truncate(inode);
1162     return;
1163     @@ -4011,11 +4088,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1164     ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1165     inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
1166     ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
1167     - if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
1168     - cpu_to_le32(EXT4_OS_HURD)) {
1169     + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
1170     ei->i_file_acl |=
1171     ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
1172     - }
1173     inode->i_size = ext4_isize(raw_inode);
1174     ei->i_disksize = inode->i_size;
1175     inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1176     @@ -4062,6 +4137,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1177     (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
1178     }
1179    
1180     + if (ei->i_file_acl &&
1181     + ((ei->i_file_acl <
1182     + (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
1183     + EXT4_SB(sb)->s_gdb_count)) ||
1184     + (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
1185     + ext4_error(sb, __func__,
1186     + "bad extended attribute block %llu in inode #%lu",
1187     + ei->i_file_acl, inode->i_ino);
1188     + ret = -EIO;
1189     + goto bad_inode;
1190     + }
1191     +
1192     if (S_ISREG(inode->i_mode)) {
1193     inode->i_op = &ext4_file_inode_operations;
1194     inode->i_fop = &ext4_file_operations;
1195     @@ -4076,7 +4163,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1196     inode->i_op = &ext4_symlink_inode_operations;
1197     ext4_set_aops(inode);
1198     }
1199     - } else {
1200     + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
1201     + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1202     inode->i_op = &ext4_special_inode_operations;
1203     if (raw_inode->i_block[0])
1204     init_special_inode(inode, inode->i_mode,
1205     @@ -4084,6 +4172,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1206     else
1207     init_special_inode(inode, inode->i_mode,
1208     new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1209     + } else {
1210     + brelse(bh);
1211     + ret = -EIO;
1212     + ext4_error(inode->i_sb, __func__,
1213     + "bogus i_mode (%o) for inode=%lu",
1214     + inode->i_mode, inode->i_ino);
1215     + goto bad_inode;
1216     }
1217     brelse (iloc.bh);
1218     ext4_set_inode_flags(inode);
1219     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1220     index 306bfd4..58dedf0 100644
1221     --- a/fs/ext4/ioctl.c
1222     +++ b/fs/ext4/ioctl.c
1223     @@ -49,8 +49,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1224     if (err)
1225     return err;
1226    
1227     - if (!S_ISDIR(inode->i_mode))
1228     - flags &= ~EXT4_DIRSYNC_FL;
1229     + flags = ext4_mask_flags(inode->i_mode, flags);
1230    
1231     err = -EPERM;
1232     mutex_lock(&inode->i_mutex);
1233     @@ -288,6 +287,20 @@ setversion_out:
1234     return err;
1235     }
1236    
1237     + case EXT4_IOC_ALLOC_DA_BLKS:
1238     + {
1239     + int err;
1240     + if (!is_owner_or_cap(inode))
1241     + return -EACCES;
1242     +
1243     + err = mnt_want_write(filp->f_path.mnt);
1244     + if (err)
1245     + return err;
1246     + err = ext4_alloc_da_blocks(inode);
1247     + mnt_drop_write(filp->f_path.mnt);
1248     + return err;
1249     + }
1250     +
1251     default:
1252     return -ENOTTY;
1253     }
1254     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1255     index 39d7cc1..c7dc115 100644
1256     --- a/fs/ext4/mballoc.c
1257     +++ b/fs/ext4/mballoc.c
1258     @@ -1450,7 +1450,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1259     struct ext4_free_extent *gex = &ac->ac_g_ex;
1260    
1261     BUG_ON(ex->fe_len <= 0);
1262     - BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1263     + BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1264     BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1265     BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1266    
1267     @@ -2698,7 +2698,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
1268     sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
1269     if (sbi->s_mb_maxs == NULL) {
1270     clear_opt(sbi->s_mount_opt, MBALLOC);
1271     - kfree(sbi->s_mb_maxs);
1272     + kfree(sbi->s_mb_offsets);
1273     return -ENOMEM;
1274     }
1275    
1276     @@ -3400,7 +3400,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
1277     }
1278     BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
1279     start > ac->ac_o_ex.fe_logical);
1280     - BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1281     + BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1282    
1283     /* now prepare goal request */
1284    
1285     @@ -3698,6 +3698,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
1286     struct super_block *sb, struct ext4_prealloc_space *pa)
1287     {
1288     unsigned long grp;
1289     + ext4_fsblk_t grp_blk;
1290    
1291     if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
1292     return;
1293     @@ -3712,8 +3713,12 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
1294     pa->pa_deleted = 1;
1295     spin_unlock(&pa->pa_lock);
1296    
1297     - /* -1 is to protect from crossing allocation group */
1298     - ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
1299     + grp_blk = pa->pa_pstart;
1300     + /* If linear, pa_pstart may be in the next group when pa is used up */
1301     + if (pa->pa_linear)
1302     + grp_blk--;
1303     +
1304     + ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
1305    
1306     /*
1307     * possible race:
1308     @@ -4527,7 +4532,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
1309     pa_inode_list) {
1310     spin_lock(&tmp_pa->pa_lock);
1311     if (tmp_pa->pa_deleted) {
1312     - spin_unlock(&pa->pa_lock);
1313     + spin_unlock(&tmp_pa->pa_lock);
1314     continue;
1315     }
1316     if (!added && pa->pa_free < tmp_pa->pa_free) {
1317     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1318     index 4f3628f..6a71680 100644
1319     --- a/fs/ext4/namei.c
1320     +++ b/fs/ext4/namei.c
1321     @@ -1055,8 +1055,16 @@ static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, str
1322     return ERR_PTR(-EIO);
1323     }
1324     inode = ext4_iget(dir->i_sb, ino);
1325     - if (IS_ERR(inode))
1326     - return ERR_CAST(inode);
1327     + if (unlikely(IS_ERR(inode))) {
1328     + if (PTR_ERR(inode) == -ESTALE) {
1329     + ext4_error(dir->i_sb, __func__,
1330     + "deleted inode referenced: %u",
1331     + ino);
1332     + return ERR_PTR(-EIO);
1333     + } else {
1334     + return ERR_CAST(inode);
1335     + }
1336     + }
1337     }
1338     return d_splice_alias(inode, dentry);
1339     }
1340     @@ -2306,7 +2314,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
1341     struct inode * old_inode, * new_inode;
1342     struct buffer_head * old_bh, * new_bh, * dir_bh;
1343     struct ext4_dir_entry_2 * old_de, * new_de;
1344     - int retval;
1345     + int retval, force_da_alloc = 0;
1346    
1347     old_bh = new_bh = dir_bh = NULL;
1348    
1349     @@ -2444,6 +2452,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
1350     ext4_mark_inode_dirty(handle, new_inode);
1351     if (!new_inode->i_nlink)
1352     ext4_orphan_add(handle, new_inode);
1353     + force_da_alloc = 1;
1354     }
1355     retval = 0;
1356    
1357     @@ -2452,6 +2461,8 @@ end_rename:
1358     brelse (old_bh);
1359     brelse (new_bh);
1360     ext4_journal_stop(handle);
1361     + if (retval == 0 && force_da_alloc)
1362     + ext4_alloc_da_blocks(old_inode);
1363     return retval;
1364     }
1365    
1366     diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
1367     index 257ff26..bbe6d59 100644
1368     --- a/fs/jbd2/revoke.c
1369     +++ b/fs/jbd2/revoke.c
1370     @@ -55,6 +55,25 @@
1371     * need do nothing.
1372     * RevokeValid set, Revoked set:
1373     * buffer has been revoked.
1374     + *
1375     + * Locking rules:
1376     + * We keep two hash tables of revoke records. One hashtable belongs to the
1377     + * running transaction (is pointed to by journal->j_revoke), the other one
1378     + * belongs to the committing transaction. Accesses to the second hash table
1379     + * happen only from the kjournald and no other thread touches this table. Also
1380     + * journal_switch_revoke_table() which switches which hashtable belongs to the
1381     + * running and which to the committing transaction is called only from
1382     + * kjournald. Therefore we need no locks when accessing the hashtable belonging
1383     + * to the committing transaction.
1384     + *
1385     + * All users operating on the hash table belonging to the running transaction
1386     + * have a handle to the transaction. Therefore they are safe from kjournald
1387     + * switching hash tables under them. For operations on the lists of entries in
1388     + * the hash table j_revoke_lock is used.
1389     + *
1390     + * Finally, also replay code uses the hash tables but at this moment noone else
1391     + * can touch them (filesystem isn't mounted yet) and hence no locking is
1392     + * needed.
1393     */
1394    
1395     #ifndef __KERNEL__
1396     @@ -401,8 +420,6 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
1397     * the second time we would still have a pending revoke to cancel. So,
1398     * do not trust the Revoked bit on buffers unless RevokeValid is also
1399     * set.
1400     - *
1401     - * The caller must have the journal locked.
1402     */
1403     int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
1404     {
1405     @@ -480,10 +497,7 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
1406     /*
1407     * Write revoke records to the journal for all entries in the current
1408     * revoke hash, deleting the entries as we go.
1409     - *
1410     - * Called with the journal lock held.
1411     */
1412     -
1413     void jbd2_journal_write_revoke_records(journal_t *journal,
1414     transaction_t *transaction)
1415     {
1416     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1417     index bff8733..c3fe156 100644
1418     --- a/fs/nfs/dir.c
1419     +++ b/fs/nfs/dir.c
1420     @@ -1925,7 +1925,8 @@ int nfs_permission(struct inode *inode, int mask)
1421     case S_IFREG:
1422     /* NFSv4 has atomic_open... */
1423     if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
1424     - && (mask & MAY_OPEN))
1425     + && (mask & MAY_OPEN)
1426     + && !(mask & MAY_EXEC))
1427     goto out;
1428     break;
1429     case S_IFDIR:
1430     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1431     index 81e9a82..1ecbcf6 100644
1432     --- a/mm/hugetlb.c
1433     +++ b/mm/hugetlb.c
1434     @@ -286,7 +286,7 @@ void resv_map_release(struct kref *ref)
1435     static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1436     {
1437     VM_BUG_ON(!is_vm_hugetlb_page(vma));
1438     - if (!(vma->vm_flags & VM_SHARED))
1439     + if (!(vma->vm_flags & VM_MAYSHARE))
1440     return (struct resv_map *)(get_vma_private_data(vma) &
1441     ~HPAGE_RESV_MASK);
1442     return 0;
1443     @@ -295,7 +295,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1444     static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1445     {
1446     VM_BUG_ON(!is_vm_hugetlb_page(vma));
1447     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
1448     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
1449    
1450     set_vma_private_data(vma, (get_vma_private_data(vma) &
1451     HPAGE_RESV_MASK) | (unsigned long)map);
1452     @@ -304,7 +304,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1453     static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1454     {
1455     VM_BUG_ON(!is_vm_hugetlb_page(vma));
1456     - VM_BUG_ON(vma->vm_flags & VM_SHARED);
1457     + VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
1458    
1459     set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1460     }
1461     @@ -323,7 +323,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
1462     if (vma->vm_flags & VM_NORESERVE)
1463     return;
1464    
1465     - if (vma->vm_flags & VM_SHARED) {
1466     + if (vma->vm_flags & VM_MAYSHARE) {
1467     /* Shared mappings always use reserves */
1468     h->resv_huge_pages--;
1469     } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1470     @@ -339,14 +339,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
1471     void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1472     {
1473     VM_BUG_ON(!is_vm_hugetlb_page(vma));
1474     - if (!(vma->vm_flags & VM_SHARED))
1475     + if (!(vma->vm_flags & VM_MAYSHARE))
1476     vma->vm_private_data = (void *)0;
1477     }
1478    
1479     /* Returns true if the VMA has associated reserve pages */
1480     static int vma_has_reserves(struct vm_area_struct *vma)
1481     {
1482     - if (vma->vm_flags & VM_SHARED)
1483     + if (vma->vm_flags & VM_MAYSHARE)
1484     return 1;
1485     if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
1486     return 1;
1487     @@ -890,7 +890,7 @@ static int vma_needs_reservation(struct hstate *h,
1488     struct address_space *mapping = vma->vm_file->f_mapping;
1489     struct inode *inode = mapping->host;
1490    
1491     - if (vma->vm_flags & VM_SHARED) {
1492     + if (vma->vm_flags & VM_MAYSHARE) {
1493     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1494     return region_chg(&inode->i_mapping->private_list,
1495     idx, idx + 1);
1496     @@ -915,7 +915,7 @@ static void vma_commit_reservation(struct hstate *h,
1497     struct address_space *mapping = vma->vm_file->f_mapping;
1498     struct inode *inode = mapping->host;
1499    
1500     - if (vma->vm_flags & VM_SHARED) {
1501     + if (vma->vm_flags & VM_MAYSHARE) {
1502     pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1503     region_add(&inode->i_mapping->private_list, idx, idx + 1);
1504    
1505     @@ -1862,7 +1862,7 @@ retry_avoidcopy:
1506     * at the time of fork() could consume its reserves on COW instead
1507     * of the full address range.
1508     */
1509     - if (!(vma->vm_flags & VM_SHARED) &&
1510     + if (!(vma->vm_flags & VM_MAYSHARE) &&
1511     is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1512     old_page != pagecache_page)
1513     outside_reserve = 1;
1514     @@ -1969,7 +1969,7 @@ retry:
1515     clear_huge_page(page, address, huge_page_size(h));
1516     __SetPageUptodate(page);
1517    
1518     - if (vma->vm_flags & VM_SHARED) {
1519     + if (vma->vm_flags & VM_MAYSHARE) {
1520     int err;
1521     struct inode *inode = mapping->host;
1522    
1523     @@ -2073,7 +2073,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1524     goto out_unlock;
1525     }
1526    
1527     - if (!(vma->vm_flags & VM_SHARED))
1528     + if (!(vma->vm_flags & VM_MAYSHARE))
1529     pagecache_page = hugetlbfs_pagecache_page(h,
1530     vma, address);
1531     }
1532     @@ -2223,7 +2223,7 @@ int hugetlb_reserve_pages(struct inode *inode,
1533     * to reserve the full area even if read-only as mprotect() may be
1534     * called to make the mapping read-write. Assume !vma is a shm mapping
1535     */
1536     - if (!vma || vma->vm_flags & VM_SHARED)
1537     + if (!vma || vma->vm_flags & VM_MAYSHARE)
1538     chg = region_chg(&inode->i_mapping->private_list, from, to);
1539     else {
1540     struct resv_map *resv_map = resv_map_alloc();
1541     @@ -2246,7 +2246,7 @@ int hugetlb_reserve_pages(struct inode *inode,
1542     hugetlb_put_quota(inode->i_mapping, chg);
1543     return ret;
1544     }
1545     - if (!vma || vma->vm_flags & VM_SHARED)
1546     + if (!vma || vma->vm_flags & VM_MAYSHARE)
1547     region_add(&inode->i_mapping->private_list, from, to);
1548     return 0;
1549     }
1550     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1551     index a756847..86714d1 100644
1552     --- a/net/core/pktgen.c
1553     +++ b/net/core/pktgen.c
1554     @@ -2449,7 +2449,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
1555     if (pkt_dev->cflows) {
1556     /* let go of the SAs if we have them */
1557     int i = 0;
1558     - for (; i < pkt_dev->nflows; i++){
1559     + for (; i < pkt_dev->cflows; i++) {
1560     struct xfrm_state *x = pkt_dev->flows[i].x;
1561     if (x) {
1562     xfrm_state_put(x);
1563     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1564     index 0675991..7832287 100644
1565     --- a/net/core/skbuff.c
1566     +++ b/net/core/skbuff.c
1567     @@ -1992,7 +1992,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1568     next_skb:
1569     block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
1570    
1571     - if (abs_offset < block_limit) {
1572     + if (abs_offset < block_limit && !st->frag_data) {
1573     *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
1574     return block_limit - abs_offset;
1575     }
1576     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1577     index 7abc6b8..4eca4d3 100644
1578     --- a/net/ipv4/tcp_input.c
1579     +++ b/net/ipv4/tcp_input.c
1580     @@ -931,6 +931,8 @@ static void tcp_init_metrics(struct sock *sk)
1581     tcp_bound_rto(sk);
1582     if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
1583     goto reset;
1584     +
1585     +cwnd:
1586     tp->snd_cwnd = tcp_init_cwnd(tp, dst);
1587     tp->snd_cwnd_stamp = tcp_time_stamp;
1588     return;
1589     @@ -945,6 +947,7 @@ reset:
1590     tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
1591     inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
1592     }
1593     + goto cwnd;
1594     }
1595    
1596     static void tcp_update_reordering(struct sock *sk, const int metric,
1597     diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
1598     index a914ba7..4077676 100644
1599     --- a/net/mac80211/rc80211_pid_algo.c
1600     +++ b/net/mac80211/rc80211_pid_algo.c
1601     @@ -367,8 +367,40 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta,
1602     * Until that method is implemented, we will use the lowest supported
1603     * rate as a workaround. */
1604     struct ieee80211_supported_band *sband;
1605     + struct rc_pid_info *pinfo = priv;
1606     + struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
1607     + int i, j, tmp;
1608     + bool s;
1609    
1610     sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1611     +
1612     + /* Sort the rates. This is optimized for the most common case (i.e.
1613     + * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
1614     + * mapping too. */
1615     + for (i = 0; i < sband->n_bitrates; i++) {
1616     + rinfo[i].index = i;
1617     + rinfo[i].rev_index = i;
1618     + if (RC_PID_FAST_START)
1619     + rinfo[i].diff = 0;
1620     + else
1621     + rinfo[i].diff = i * pinfo->norm_offset;
1622     + }
1623     + for (i = 1; i < sband->n_bitrates; i++) {
1624     + s = 0;
1625     + for (j = 0; j < sband->n_bitrates - i; j++)
1626     + if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
1627     + sband->bitrates[rinfo[j + 1].index].bitrate)) {
1628     + tmp = rinfo[j].index;
1629     + rinfo[j].index = rinfo[j + 1].index;
1630     + rinfo[j + 1].index = tmp;
1631     + rinfo[rinfo[j].index].rev_index = j;
1632     + rinfo[rinfo[j + 1].index].rev_index = j + 1;
1633     + s = 1;
1634     + }
1635     + if (!s)
1636     + break;
1637     + }
1638     +
1639     sta->txrate_idx = rate_lowest_index(local, sband, sta);
1640     sta->fail_avg = 0;
1641     }
1642     @@ -378,21 +410,23 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
1643     struct rc_pid_info *pinfo;
1644     struct rc_pid_rateinfo *rinfo;
1645     struct ieee80211_supported_band *sband;
1646     - int i, j, tmp;
1647     - bool s;
1648     + int i, max_rates = 0;
1649     #ifdef CONFIG_MAC80211_DEBUGFS
1650     struct rc_pid_debugfs_entries *de;
1651     #endif
1652    
1653     - sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1654     -
1655     pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
1656     if (!pinfo)
1657     return NULL;
1658    
1659     + for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1660     + sband = local->hw.wiphy->bands[i];
1661     + if (sband && sband->n_bitrates > max_rates)
1662     + max_rates = sband->n_bitrates;
1663     + }
1664     /* We can safely assume that sband won't change unless we get
1665     * reinitialized. */
1666     - rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
1667     + rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
1668     if (!rinfo) {
1669     kfree(pinfo);
1670     return NULL;
1671     @@ -410,33 +444,6 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
1672     pinfo->rinfo = rinfo;
1673     pinfo->oldrate = 0;
1674    
1675     - /* Sort the rates. This is optimized for the most common case (i.e.
1676     - * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
1677     - * mapping too. */
1678     - for (i = 0; i < sband->n_bitrates; i++) {
1679     - rinfo[i].index = i;
1680     - rinfo[i].rev_index = i;
1681     - if (RC_PID_FAST_START)
1682     - rinfo[i].diff = 0;
1683     - else
1684     - rinfo[i].diff = i * pinfo->norm_offset;
1685     - }
1686     - for (i = 1; i < sband->n_bitrates; i++) {
1687     - s = 0;
1688     - for (j = 0; j < sband->n_bitrates - i; j++)
1689     - if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
1690     - sband->bitrates[rinfo[j + 1].index].bitrate)) {
1691     - tmp = rinfo[j].index;
1692     - rinfo[j].index = rinfo[j + 1].index;
1693     - rinfo[j + 1].index = tmp;
1694     - rinfo[rinfo[j].index].rev_index = j;
1695     - rinfo[rinfo[j + 1].index].rev_index = j + 1;
1696     - s = 1;
1697     - }
1698     - if (!s)
1699     - break;
1700     - }
1701     -
1702     #ifdef CONFIG_MAC80211_DEBUGFS
1703     de = &pinfo->dentries;
1704     de->dir = debugfs_create_dir("rc80211_pid",
1705     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1706     index f028f70..e2d25da 100644
1707     --- a/security/selinux/hooks.c
1708     +++ b/security/selinux/hooks.c
1709     @@ -4477,7 +4477,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
1710     if (err)
1711     return err;
1712    
1713     - if (send_perm != 0)
1714     + if (!send_perm)
1715     return 0;
1716    
1717     err = sel_netport_sid(sk->sk_protocol,
1718     diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
1719     index 1710623..c10e476 100644
1720     --- a/sound/usb/usbaudio.c
1721     +++ b/sound/usb/usbaudio.c
1722     @@ -3367,7 +3367,7 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip,
1723     [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface,
1724     [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface,
1725     [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface,
1726     - [QUIRK_MIDI_RAW] = snd_usb_create_midi_interface,
1727     + [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface,
1728     [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface,
1729     [QUIRK_MIDI_CME] = snd_usb_create_midi_interface,
1730     [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
1731     diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
1732     index 7cf18c3..7191d82 100644
1733     --- a/sound/usb/usbaudio.h
1734     +++ b/sound/usb/usbaudio.h
1735     @@ -153,7 +153,7 @@ enum quirk_type {
1736     QUIRK_MIDI_YAMAHA,
1737     QUIRK_MIDI_MIDIMAN,
1738     QUIRK_MIDI_NOVATION,
1739     - QUIRK_MIDI_RAW,
1740     + QUIRK_MIDI_FASTLANE,
1741     QUIRK_MIDI_EMAGIC,
1742     QUIRK_MIDI_CME,
1743     QUIRK_AUDIO_STANDARD_INTERFACE,
1744     diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
1745     index 940ae5a..cd2b622 100644
1746     --- a/sound/usb/usbmidi.c
1747     +++ b/sound/usb/usbmidi.c
1748     @@ -1733,8 +1733,18 @@ int snd_usb_create_midi_interface(struct snd_usb_audio* chip,
1749     umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
1750     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
1751     break;
1752     - case QUIRK_MIDI_RAW:
1753     + case QUIRK_MIDI_FASTLANE:
1754     umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
1755     + /*
1756     + * Interface 1 contains isochronous endpoints, but with the same
1757     + * numbers as in interface 0. Since it is interface 1 that the
1758     + * USB core has most recently seen, these descriptors are now
1759     + * associated with the endpoint numbers. This will foul up our
1760     + * attempts to submit bulk/interrupt URBs to the endpoints in
1761     + * interface 0, so we have to make sure that the USB core looks
1762     + * again at interface 0 by calling usb_set_interface() on it.
1763     + */
1764     + usb_set_interface(umidi->chip->dev, 0, 0);
1765     err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
1766     break;
1767     case QUIRK_MIDI_EMAGIC:
1768     diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
1769     index 9ea726c..076ca4c 100644
1770     --- a/sound/usb/usbquirks.h
1771     +++ b/sound/usb/usbquirks.h
1772     @@ -1756,7 +1756,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1773     .data = & (const struct snd_usb_audio_quirk[]) {
1774     {
1775     .ifnum = 0,
1776     - .type = QUIRK_MIDI_RAW
1777     + .type = QUIRK_MIDI_FASTLANE
1778     },
1779     {
1780     .ifnum = 1,