Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.1/0107-3.1.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1606 - (hide annotations) (download)
Tue Jan 10 13:37:55 2012 UTC (12 years, 4 months ago) by niro
File size: 120178 byte(s)
-consolidated patches dir and added patches up to linux-3.1.8
1 niro 1606 diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
2     index 5a886cd..73a6a5b 100644
3     --- a/arch/arm/mach-omap2/board-rx51-peripherals.c
4     +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
5     @@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
6     static void __init rx51_charger_init(void)
7     {
8     WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
9     - GPIOF_OUT_INIT_LOW, "isp1704_reset"));
10     + GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
11    
12     platform_device_register(&rx51_charger_device);
13     }
14     diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
15     index c074e66..4e0a371 100644
16     --- a/arch/arm/oprofile/common.c
17     +++ b/arch/arm/oprofile/common.c
18     @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
19     return oprofile_perf_init(ops);
20     }
21    
22     -void __exit oprofile_arch_exit(void)
23     +void oprofile_arch_exit(void)
24     {
25     oprofile_perf_exit();
26     }
27     diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
28     index 761c3c9..8d4fdb0 100644
29     --- a/arch/arm/plat-mxc/pwm.c
30     +++ b/arch/arm/plat-mxc/pwm.c
31     @@ -32,6 +32,9 @@
32     #define MX3_PWMSAR 0x0C /* PWM Sample Register */
33     #define MX3_PWMPR 0x10 /* PWM Period Register */
34     #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
35     +#define MX3_PWMCR_DOZEEN (1 << 24)
36     +#define MX3_PWMCR_WAITEN (1 << 23)
37     +#define MX3_PWMCR_DBGEN (1 << 22)
38     #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
39     #define MX3_PWMCR_CLKSRC_IPG (1 << 16)
40     #define MX3_PWMCR_EN (1 << 0)
41     @@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
42     do_div(c, period_ns);
43     duty_cycles = c;
44    
45     + /*
46     + * according to imx pwm RM, the real period value should be
47     + * PERIOD value in PWMPR plus 2.
48     + */
49     + if (period_cycles > 2)
50     + period_cycles -= 2;
51     + else
52     + period_cycles = 0;
53     +
54     writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
55     writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
56    
57     - cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
58     + cr = MX3_PWMCR_PRESCALER(prescale) |
59     + MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
60     + MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
61    
62     if (cpu_is_mx25())
63     cr |= MX3_PWMCR_CLKSRC_IPG;
64     diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
65     index 6efc18b..bd58b72 100644
66     --- a/arch/s390/oprofile/init.c
67     +++ b/arch/s390/oprofile/init.c
68     @@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
69     return -EINVAL;
70    
71     retval = oprofilefs_ulong_from_user(&val, buf, count);
72     - if (retval)
73     + if (retval <= 0)
74     return retval;
75    
76     if (oprofile_started)
77     diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
78     index b4c2d2b..e4dd5d5 100644
79     --- a/arch/sh/oprofile/common.c
80     +++ b/arch/sh/oprofile/common.c
81     @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
82     return oprofile_perf_init(ops);
83     }
84    
85     -void __exit oprofile_arch_exit(void)
86     +void oprofile_arch_exit(void)
87     {
88     oprofile_perf_exit();
89     kfree(sh_pmu_op_name);
90     @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
91     ops->backtrace = sh_backtrace;
92     return -ENODEV;
93     }
94     -void __exit oprofile_arch_exit(void) {}
95     +void oprofile_arch_exit(void) {}
96     #endif /* CONFIG_HW_PERF_EVENTS */
97     diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
98     index 5b31a8e..a790cc6 100644
99     --- a/arch/sparc/include/asm/pgtable_32.h
100     +++ b/arch/sparc/include/asm/pgtable_32.h
101     @@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
102     #define kern_addr_valid(addr) \
103     (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
104    
105     -extern int io_remap_pfn_range(struct vm_area_struct *vma,
106     - unsigned long from, unsigned long pfn,
107     - unsigned long size, pgprot_t prot);
108     -
109     /*
110     * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
111     * its high 4 bits. These macros/functions put it there or get it from there.
112     @@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
113     #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
114     #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
115    
116     +extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
117     + unsigned long, pgprot_t);
118     +
119     +static inline int io_remap_pfn_range(struct vm_area_struct *vma,
120     + unsigned long from, unsigned long pfn,
121     + unsigned long size, pgprot_t prot)
122     +{
123     + unsigned long long offset, space, phys_base;
124     +
125     + offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
126     + space = GET_IOSPACE(pfn);
127     + phys_base = offset | (space << 32ULL);
128     +
129     + return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
130     +}
131     +
132     #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
133     #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
134     ({ \
135     diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
136     index adf8932..38ebb2c 100644
137     --- a/arch/sparc/include/asm/pgtable_64.h
138     +++ b/arch/sparc/include/asm/pgtable_64.h
139     @@ -757,10 +757,6 @@ static inline bool kern_addr_valid(unsigned long addr)
140    
141     extern int page_in_phys_avail(unsigned long paddr);
142    
143     -extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
144     - unsigned long pfn,
145     - unsigned long size, pgprot_t prot);
146     -
147     /*
148     * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
149     * its high 4 bits. These macros/functions put it there or get it from there.
150     @@ -769,6 +765,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
151     #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
152     #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
153    
154     +extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
155     + unsigned long, pgprot_t);
156     +
157     +static inline int io_remap_pfn_range(struct vm_area_struct *vma,
158     + unsigned long from, unsigned long pfn,
159     + unsigned long size, pgprot_t prot)
160     +{
161     + unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
162     + int space = GET_IOSPACE(pfn);
163     + unsigned long phys_base;
164     +
165     + phys_base = offset | (((unsigned long) space) << 32UL);
166     +
167     + return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
168     +}
169     +
170     #include <asm-generic/pgtable.h>
171    
172     /* We provide our own get_unmapped_area to cope with VA holes and
173     diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
174     index e27f8ea..0c218e4 100644
175     --- a/arch/sparc/kernel/entry.h
176     +++ b/arch/sparc/kernel/entry.h
177     @@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
178     extern void fpload(unsigned long *fpregs, unsigned long *fsr);
179    
180     #else /* CONFIG_SPARC32 */
181     +
182     +#include <asm/trap_block.h>
183     +
184     struct popc_3insn_patch_entry {
185     unsigned int addr;
186     unsigned int insns[3];
187     @@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
188     __popc_6insn_patch_end;
189    
190     extern void __init per_cpu_patch(void);
191     +extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
192     + struct sun4v_1insn_patch_entry *);
193     +extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
194     + struct sun4v_2insn_patch_entry *);
195     extern void __init sun4v_patch(void);
196     extern void __init boot_cpu_id_too_large(int cpu);
197     extern unsigned int dcache_parity_tl1_occurred;
198     diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
199     index da0c6c7..e551987 100644
200     --- a/arch/sparc/kernel/module.c
201     +++ b/arch/sparc/kernel/module.c
202     @@ -17,6 +17,8 @@
203     #include <asm/processor.h>
204     #include <asm/spitfire.h>
205    
206     +#include "entry.h"
207     +
208     #ifdef CONFIG_SPARC64
209    
210     #include <linux/jump_label.h>
211     @@ -203,6 +205,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
212     }
213    
214     #ifdef CONFIG_SPARC64
215     +static void do_patch_sections(const Elf_Ehdr *hdr,
216     + const Elf_Shdr *sechdrs)
217     +{
218     + const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
219     + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
220     +
221     + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
222     + if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
223     + sun4v_1insn = s;
224     + if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
225     + sun4v_2insn = s;
226     + }
227     +
228     + if (sun4v_1insn && tlb_type == hypervisor) {
229     + void *p = (void *) sun4v_1insn->sh_addr;
230     + sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
231     + }
232     + if (sun4v_2insn && tlb_type == hypervisor) {
233     + void *p = (void *) sun4v_2insn->sh_addr;
234     + sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
235     + }
236     +}
237     +
238     int module_finalize(const Elf_Ehdr *hdr,
239     const Elf_Shdr *sechdrs,
240     struct module *me)
241     @@ -210,6 +235,8 @@ int module_finalize(const Elf_Ehdr *hdr,
242     /* make jump label nops */
243     jump_label_apply_nops(me);
244    
245     + do_patch_sections(hdr, sechdrs);
246     +
247     /* Cheetah's I-cache is fully coherent. */
248     if (tlb_type == spitfire) {
249     unsigned long va;
250     diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
251     index b01a06e..9e73c4a 100644
252     --- a/arch/sparc/kernel/pci_sun4v.c
253     +++ b/arch/sparc/kernel/pci_sun4v.c
254     @@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
255     if (!irq)
256     return -ENOMEM;
257    
258     - if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
259     - return -EINVAL;
260     if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
261     return -EINVAL;
262     + if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
263     + return -EINVAL;
264    
265     return irq;
266     }
267     diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
268     index c965595a..a854a1c 100644
269     --- a/arch/sparc/kernel/setup_64.c
270     +++ b/arch/sparc/kernel/setup_64.c
271     @@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
272     }
273     }
274    
275     -void __init sun4v_patch(void)
276     +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
277     + struct sun4v_1insn_patch_entry *end)
278     {
279     - extern void sun4v_hvapi_init(void);
280     - struct sun4v_1insn_patch_entry *p1;
281     - struct sun4v_2insn_patch_entry *p2;
282     -
283     - if (tlb_type != hypervisor)
284     - return;
285     + while (start < end) {
286     + unsigned long addr = start->addr;
287    
288     - p1 = &__sun4v_1insn_patch;
289     - while (p1 < &__sun4v_1insn_patch_end) {
290     - unsigned long addr = p1->addr;
291     -
292     - *(unsigned int *) (addr + 0) = p1->insn;
293     + *(unsigned int *) (addr + 0) = start->insn;
294     wmb();
295     __asm__ __volatile__("flush %0" : : "r" (addr + 0));
296    
297     - p1++;
298     + start++;
299     }
300     +}
301    
302     - p2 = &__sun4v_2insn_patch;
303     - while (p2 < &__sun4v_2insn_patch_end) {
304     - unsigned long addr = p2->addr;
305     +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
306     + struct sun4v_2insn_patch_entry *end)
307     +{
308     + while (start < end) {
309     + unsigned long addr = start->addr;
310    
311     - *(unsigned int *) (addr + 0) = p2->insns[0];
312     + *(unsigned int *) (addr + 0) = start->insns[0];
313     wmb();
314     __asm__ __volatile__("flush %0" : : "r" (addr + 0));
315    
316     - *(unsigned int *) (addr + 4) = p2->insns[1];
317     + *(unsigned int *) (addr + 4) = start->insns[1];
318     wmb();
319     __asm__ __volatile__("flush %0" : : "r" (addr + 4));
320    
321     - p2++;
322     + start++;
323     }
324     +}
325     +
326     +void __init sun4v_patch(void)
327     +{
328     + extern void sun4v_hvapi_init(void);
329     +
330     + if (tlb_type != hypervisor)
331     + return;
332     +
333     + sun4v_patch_1insn_range(&__sun4v_1insn_patch,
334     + &__sun4v_1insn_patch_end);
335     +
336     + sun4v_patch_2insn_range(&__sun4v_2insn_patch,
337     + &__sun4v_2insn_patch_end);
338    
339     sun4v_hvapi_init();
340     }
341     diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
342     index 2caa556..023b886 100644
343     --- a/arch/sparc/kernel/signal32.c
344     +++ b/arch/sparc/kernel/signal32.c
345     @@ -822,21 +822,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
346     * want to handle. Thus you cannot kill init even with a SIGKILL even by
347     * mistake.
348     */
349     -void do_signal32(sigset_t *oldset, struct pt_regs * regs,
350     - int restart_syscall, unsigned long orig_i0)
351     +void do_signal32(sigset_t *oldset, struct pt_regs * regs)
352     {
353     struct k_sigaction ka;
354     + unsigned long orig_i0;
355     + int restart_syscall;
356     siginfo_t info;
357     int signr;
358    
359     signr = get_signal_to_deliver(&info, &ka, regs, NULL);
360    
361     - /* If the debugger messes with the program counter, it clears
362     - * the "in syscall" bit, directing us to not perform a syscall
363     - * restart.
364     - */
365     - if (restart_syscall && !pt_regs_is_syscall(regs))
366     - restart_syscall = 0;
367     + restart_syscall = 0;
368     + orig_i0 = 0;
369     + if (pt_regs_is_syscall(regs) &&
370     + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
371     + restart_syscall = 1;
372     + orig_i0 = regs->u_regs[UREG_G6];
373     + }
374    
375     if (signr > 0) {
376     if (restart_syscall)
377     diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
378     index 8ce247a..d54c6e5 100644
379     --- a/arch/sparc/kernel/signal_32.c
380     +++ b/arch/sparc/kernel/signal_32.c
381     @@ -519,10 +519,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
382     siginfo_t info;
383     int signr;
384    
385     + /* It's a lot of work and synchronization to add a new ptrace
386     + * register for GDB to save and restore in order to get
387     + * orig_i0 correct for syscall restarts when debugging.
388     + *
389     + * Although it should be the case that most of the global
390     + * registers are volatile across a system call, glibc already
391     + * depends upon that fact that we preserve them. So we can't
392     + * just use any global register to save away the orig_i0 value.
393     + *
394     + * In particular %g2, %g3, %g4, and %g5 are all assumed to be
395     + * preserved across a system call trap by various pieces of
396     + * code in glibc.
397     + *
398     + * %g7 is used as the "thread register". %g6 is not used in
399     + * any fixed manner. %g6 is used as a scratch register and
400     + * a compiler temporary, but it's value is never used across
401     + * a system call. Therefore %g6 is usable for orig_i0 storage.
402     + */
403     if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
404     - restart_syscall = 1;
405     - else
406     - restart_syscall = 0;
407     + regs->u_regs[UREG_G6] = orig_i0;
408    
409     if (test_thread_flag(TIF_RESTORE_SIGMASK))
410     oldset = &current->saved_sigmask;
411     @@ -535,8 +551,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
412     * the software "in syscall" bit, directing us to not perform
413     * a syscall restart.
414     */
415     - if (restart_syscall && !pt_regs_is_syscall(regs))
416     - restart_syscall = 0;
417     + restart_syscall = 0;
418     + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
419     + restart_syscall = 1;
420     + orig_i0 = regs->u_regs[UREG_G6];
421     + }
422     +
423    
424     if (signr > 0) {
425     if (restart_syscall)
426     diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
427     index a2b8159..f0836cd 100644
428     --- a/arch/sparc/kernel/signal_64.c
429     +++ b/arch/sparc/kernel/signal_64.c
430     @@ -529,11 +529,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
431     siginfo_t info;
432     int signr;
433    
434     + /* It's a lot of work and synchronization to add a new ptrace
435     + * register for GDB to save and restore in order to get
436     + * orig_i0 correct for syscall restarts when debugging.
437     + *
438     + * Although it should be the case that most of the global
439     + * registers are volatile across a system call, glibc already
440     + * depends upon that fact that we preserve them. So we can't
441     + * just use any global register to save away the orig_i0 value.
442     + *
443     + * In particular %g2, %g3, %g4, and %g5 are all assumed to be
444     + * preserved across a system call trap by various pieces of
445     + * code in glibc.
446     + *
447     + * %g7 is used as the "thread register". %g6 is not used in
448     + * any fixed manner. %g6 is used as a scratch register and
449     + * a compiler temporary, but it's value is never used across
450     + * a system call. Therefore %g6 is usable for orig_i0 storage.
451     + */
452     if (pt_regs_is_syscall(regs) &&
453     - (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
454     - restart_syscall = 1;
455     - } else
456     - restart_syscall = 0;
457     + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
458     + regs->u_regs[UREG_G6] = orig_i0;
459    
460     if (current_thread_info()->status & TS_RESTORE_SIGMASK)
461     oldset = &current->saved_sigmask;
462     @@ -542,22 +558,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
463    
464     #ifdef CONFIG_COMPAT
465     if (test_thread_flag(TIF_32BIT)) {
466     - extern void do_signal32(sigset_t *, struct pt_regs *,
467     - int restart_syscall,
468     - unsigned long orig_i0);
469     - do_signal32(oldset, regs, restart_syscall, orig_i0);
470     + extern void do_signal32(sigset_t *, struct pt_regs *);
471     + do_signal32(oldset, regs);
472     return;
473     }
474     #endif
475    
476     signr = get_signal_to_deliver(&info, &ka, regs, NULL);
477    
478     - /* If the debugger messes with the program counter, it clears
479     - * the software "in syscall" bit, directing us to not perform
480     - * a syscall restart.
481     - */
482     - if (restart_syscall && !pt_regs_is_syscall(regs))
483     - restart_syscall = 0;
484     + restart_syscall = 0;
485     + if (pt_regs_is_syscall(regs) &&
486     + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
487     + restart_syscall = 1;
488     + orig_i0 = regs->u_regs[UREG_G6];
489     + }
490    
491     if (signr > 0) {
492     if (restart_syscall)
493     diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
494     index 32b626c..7337067 100644
495     --- a/arch/sparc/kernel/visemul.c
496     +++ b/arch/sparc/kernel/visemul.c
497     @@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
498     s16 b = (rs2 >> (i * 16)) & 0xffff;
499    
500     if (a > b)
501     - rd_val |= 1 << i;
502     + rd_val |= 8 >> i;
503     }
504     break;
505    
506     case FCMPGT32_OPF:
507     for (i = 0; i < 2; i++) {
508     - s32 a = (rs1 >> (i * 32)) & 0xffff;
509     - s32 b = (rs2 >> (i * 32)) & 0xffff;
510     + s32 a = (rs1 >> (i * 32)) & 0xffffffff;
511     + s32 b = (rs2 >> (i * 32)) & 0xffffffff;
512    
513     if (a > b)
514     - rd_val |= 1 << i;
515     + rd_val |= 2 >> i;
516     }
517     break;
518    
519     @@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
520     s16 b = (rs2 >> (i * 16)) & 0xffff;
521    
522     if (a <= b)
523     - rd_val |= 1 << i;
524     + rd_val |= 8 >> i;
525     }
526     break;
527    
528     case FCMPLE32_OPF:
529     for (i = 0; i < 2; i++) {
530     - s32 a = (rs1 >> (i * 32)) & 0xffff;
531     - s32 b = (rs2 >> (i * 32)) & 0xffff;
532     + s32 a = (rs1 >> (i * 32)) & 0xffffffff;
533     + s32 b = (rs2 >> (i * 32)) & 0xffffffff;
534    
535     if (a <= b)
536     - rd_val |= 1 << i;
537     + rd_val |= 2 >> i;
538     }
539     break;
540    
541     @@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
542     s16 b = (rs2 >> (i * 16)) & 0xffff;
543    
544     if (a != b)
545     - rd_val |= 1 << i;
546     + rd_val |= 8 >> i;
547     }
548     break;
549    
550     case FCMPNE32_OPF:
551     for (i = 0; i < 2; i++) {
552     - s32 a = (rs1 >> (i * 32)) & 0xffff;
553     - s32 b = (rs2 >> (i * 32)) & 0xffff;
554     + s32 a = (rs1 >> (i * 32)) & 0xffffffff;
555     + s32 b = (rs2 >> (i * 32)) & 0xffffffff;
556    
557     if (a != b)
558     - rd_val |= 1 << i;
559     + rd_val |= 2 >> i;
560     }
561     break;
562    
563     @@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
564     s16 b = (rs2 >> (i * 16)) & 0xffff;
565    
566     if (a == b)
567     - rd_val |= 1 << i;
568     + rd_val |= 8 >> i;
569     }
570     break;
571    
572     case FCMPEQ32_OPF:
573     for (i = 0; i < 2; i++) {
574     - s32 a = (rs1 >> (i * 32)) & 0xffff;
575     - s32 b = (rs2 >> (i * 32)) & 0xffff;
576     + s32 a = (rs1 >> (i * 32)) & 0xffffffff;
577     + s32 b = (rs2 >> (i * 32)) & 0xffffffff;
578    
579     if (a == b)
580     - rd_val |= 1 << i;
581     + rd_val |= 2 >> i;
582     }
583     break;
584     }
585     diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
586     index 34fe657..4d8c497 100644
587     --- a/arch/sparc/lib/memcpy.S
588     +++ b/arch/sparc/lib/memcpy.S
589     @@ -7,40 +7,12 @@
590     * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
591     */
592    
593     -#ifdef __KERNEL__
594     -
595     -#define FUNC(x) \
596     +#define FUNC(x) \
597     .globl x; \
598     .type x,@function; \
599     - .align 4; \
600     + .align 4; \
601     x:
602    
603     -#undef FASTER_REVERSE
604     -#undef FASTER_NONALIGNED
605     -#define FASTER_ALIGNED
606     -
607     -/* In kernel these functions don't return a value.
608     - * One should use macros in asm/string.h for that purpose.
609     - * We return 0, so that bugs are more apparent.
610     - */
611     -#define SETUP_RETL
612     -#define RETL_INSN clr %o0
613     -
614     -#else
615     -
616     -/* libc */
617     -
618     -#include "DEFS.h"
619     -
620     -#define FASTER_REVERSE
621     -#define FASTER_NONALIGNED
622     -#define FASTER_ALIGNED
623     -
624     -#define SETUP_RETL mov %o0, %g6
625     -#define RETL_INSN mov %g6, %o0
626     -
627     -#endif
628     -
629     /* Both these macros have to start with exactly the same insn */
630     #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
631     ldd [%src + (offset) + 0x00], %t0; \
632     @@ -164,30 +136,6 @@ x:
633     .text
634     .align 4
635    
636     -#ifdef FASTER_REVERSE
637     -
638     -70: /* rdword_align */
639     -
640     - andcc %o1, 1, %g0
641     - be 4f
642     - andcc %o1, 2, %g0
643     -
644     - ldub [%o1 - 1], %g2
645     - sub %o1, 1, %o1
646     - stb %g2, [%o0 - 1]
647     - sub %o2, 1, %o2
648     - be 3f
649     - sub %o0, 1, %o0
650     -4:
651     - lduh [%o1 - 2], %g2
652     - sub %o1, 2, %o1
653     - sth %g2, [%o0 - 2]
654     - sub %o2, 2, %o2
655     - b 3f
656     - sub %o0, 2, %o0
657     -
658     -#endif /* FASTER_REVERSE */
659     -
660     0:
661     retl
662     nop ! Only bcopy returns here and it retuns void...
663     @@ -198,7 +146,7 @@ FUNC(__memmove)
664     #endif
665     FUNC(memmove)
666     cmp %o0, %o1
667     - SETUP_RETL
668     + mov %o0, %g7
669     bleu 9f
670     sub %o0, %o1, %o4
671    
672     @@ -207,8 +155,6 @@ FUNC(memmove)
673     bleu 0f
674     andcc %o4, 3, %o5
675    
676     -#ifndef FASTER_REVERSE
677     -
678     add %o1, %o2, %o1
679     add %o0, %o2, %o0
680     sub %o1, 1, %o1
681     @@ -224,295 +170,7 @@ FUNC(memmove)
682     sub %o0, 1, %o0
683    
684     retl
685     - RETL_INSN
686     -
687     -#else /* FASTER_REVERSE */
688     -
689     - add %o1, %o2, %o1
690     - add %o0, %o2, %o0
691     - bne 77f
692     - cmp %o2, 15
693     - bleu 91f
694     - andcc %o1, 3, %g0
695     - bne 70b
696     -3:
697     - andcc %o1, 4, %g0
698     -
699     - be 2f
700     - mov %o2, %g1
701     -
702     - ld [%o1 - 4], %o4
703     - sub %g1, 4, %g1
704     - st %o4, [%o0 - 4]
705     - sub %o1, 4, %o1
706     - sub %o0, 4, %o0
707     -2:
708     - andcc %g1, 0xffffff80, %g7
709     - be 3f
710     - andcc %o0, 4, %g0
711     -
712     - be 74f + 4
713     -5:
714     - RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
715     - RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
716     - RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
717     - RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
718     - subcc %g7, 128, %g7
719     - sub %o1, 128, %o1
720     - bne 5b
721     - sub %o0, 128, %o0
722     -3:
723     - andcc %g1, 0x70, %g7
724     - be 72f
725     - andcc %g1, 8, %g0
726     -
727     - sethi %hi(72f), %o5
728     - srl %g7, 1, %o4
729     - add %g7, %o4, %o4
730     - sub %o1, %g7, %o1
731     - sub %o5, %o4, %o5
732     - jmpl %o5 + %lo(72f), %g0
733     - sub %o0, %g7, %o0
734     -
735     -71: /* rmemcpy_table */
736     - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
737     - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
738     - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
739     - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
740     - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
741     - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
742     - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
743     -
744     -72: /* rmemcpy_table_end */
745     -
746     - be 73f
747     - andcc %g1, 4, %g0
748     -
749     - ldd [%o1 - 0x08], %g2
750     - sub %o0, 8, %o0
751     - sub %o1, 8, %o1
752     - st %g2, [%o0]
753     - st %g3, [%o0 + 0x04]
754     -
755     -73: /* rmemcpy_last7 */
756     -
757     - be 1f
758     - andcc %g1, 2, %g0
759     -
760     - ld [%o1 - 4], %g2
761     - sub %o1, 4, %o1
762     - st %g2, [%o0 - 4]
763     - sub %o0, 4, %o0
764     -1:
765     - be 1f
766     - andcc %g1, 1, %g0
767     -
768     - lduh [%o1 - 2], %g2
769     - sub %o1, 2, %o1
770     - sth %g2, [%o0 - 2]
771     - sub %o0, 2, %o0
772     -1:
773     - be 1f
774     - nop
775     -
776     - ldub [%o1 - 1], %g2
777     - stb %g2, [%o0 - 1]
778     -1:
779     - retl
780     - RETL_INSN
781     -
782     -74: /* rldd_std */
783     - RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
784     - RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
785     - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
786     - RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
787     - subcc %g7, 128, %g7
788     - sub %o1, 128, %o1
789     - bne 74b
790     - sub %o0, 128, %o0
791     -
792     - andcc %g1, 0x70, %g7
793     - be 72b
794     - andcc %g1, 8, %g0
795     -
796     - sethi %hi(72b), %o5
797     - srl %g7, 1, %o4
798     - add %g7, %o4, %o4
799     - sub %o1, %g7, %o1
800     - sub %o5, %o4, %o5
801     - jmpl %o5 + %lo(72b), %g0
802     - sub %o0, %g7, %o0
803     -
804     -75: /* rshort_end */
805     -
806     - and %o2, 0xe, %o3
807     -2:
808     - sethi %hi(76f), %o5
809     - sll %o3, 3, %o4
810     - sub %o0, %o3, %o0
811     - sub %o5, %o4, %o5
812     - sub %o1, %o3, %o1
813     - jmpl %o5 + %lo(76f), %g0
814     - andcc %o2, 1, %g0
815     -
816     - RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
817     - RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
818     - RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
819     - RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
820     - RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
821     - RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
822     - RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
823     -
824     -76: /* rshort_table_end */
825     -
826     - be 1f
827     - nop
828     - ldub [%o1 - 1], %g2
829     - stb %g2, [%o0 - 1]
830     -1:
831     - retl
832     - RETL_INSN
833     -
834     -91: /* rshort_aligned_end */
835     -
836     - bne 75b
837     - andcc %o2, 8, %g0
838     -
839     - be 1f
840     - andcc %o2, 4, %g0
841     -
842     - ld [%o1 - 0x08], %g2
843     - ld [%o1 - 0x04], %g3
844     - sub %o1, 8, %o1
845     - st %g2, [%o0 - 0x08]
846     - st %g3, [%o0 - 0x04]
847     - sub %o0, 8, %o0
848     -1:
849     - b 73b
850     - mov %o2, %g1
851     -
852     -77: /* rnon_aligned */
853     - cmp %o2, 15
854     - bleu 75b
855     - andcc %o0, 3, %g0
856     - be 64f
857     - andcc %o0, 1, %g0
858     - be 63f
859     - andcc %o0, 2, %g0
860     - ldub [%o1 - 1], %g5
861     - sub %o1, 1, %o1
862     - stb %g5, [%o0 - 1]
863     - sub %o0, 1, %o0
864     - be 64f
865     - sub %o2, 1, %o2
866     -63:
867     - ldub [%o1 - 1], %g5
868     - sub %o1, 2, %o1
869     - stb %g5, [%o0 - 1]
870     - sub %o0, 2, %o0
871     - ldub [%o1], %g5
872     - sub %o2, 2, %o2
873     - stb %g5, [%o0]
874     -64:
875     - and %o1, 3, %g2
876     - and %o1, -4, %o1
877     - and %o2, 0xc, %g3
878     - add %o1, 4, %o1
879     - cmp %g3, 4
880     - sll %g2, 3, %g4
881     - mov 32, %g2
882     - be 4f
883     - sub %g2, %g4, %g7
884     -
885     - blu 3f
886     - cmp %g3, 8
887     -
888     - be 2f
889     - srl %o2, 2, %g3
890     -
891     - ld [%o1 - 4], %o3
892     - add %o0, -8, %o0
893     - ld [%o1 - 8], %o4
894     - add %o1, -16, %o1
895     - b 7f
896     - add %g3, 1, %g3
897     -2:
898     - ld [%o1 - 4], %o4
899     - add %o0, -4, %o0
900     - ld [%o1 - 8], %g1
901     - add %o1, -12, %o1
902     - b 8f
903     - add %g3, 2, %g3
904     -3:
905     - ld [%o1 - 4], %o5
906     - add %o0, -12, %o0
907     - ld [%o1 - 8], %o3
908     - add %o1, -20, %o1
909     - b 6f
910     - srl %o2, 2, %g3
911     -4:
912     - ld [%o1 - 4], %g1
913     - srl %o2, 2, %g3
914     - ld [%o1 - 8], %o5
915     - add %o1, -24, %o1
916     - add %o0, -16, %o0
917     - add %g3, -1, %g3
918     -
919     - ld [%o1 + 12], %o3
920     -5:
921     - sll %o5, %g4, %g2
922     - srl %g1, %g7, %g5
923     - or %g2, %g5, %g2
924     - st %g2, [%o0 + 12]
925     -6:
926     - ld [%o1 + 8], %o4
927     - sll %o3, %g4, %g2
928     - srl %o5, %g7, %g5
929     - or %g2, %g5, %g2
930     - st %g2, [%o0 + 8]
931     -7:
932     - ld [%o1 + 4], %g1
933     - sll %o4, %g4, %g2
934     - srl %o3, %g7, %g5
935     - or %g2, %g5, %g2
936     - st %g2, [%o0 + 4]
937     -8:
938     - ld [%o1], %o5
939     - sll %g1, %g4, %g2
940     - srl %o4, %g7, %g5
941     - addcc %g3, -4, %g3
942     - or %g2, %g5, %g2
943     - add %o1, -16, %o1
944     - st %g2, [%o0]
945     - add %o0, -16, %o0
946     - bne,a 5b
947     - ld [%o1 + 12], %o3
948     - sll %o5, %g4, %g2
949     - srl %g1, %g7, %g5
950     - srl %g4, 3, %g3
951     - or %g2, %g5, %g2
952     - add %o1, %g3, %o1
953     - andcc %o2, 2, %g0
954     - st %g2, [%o0 + 12]
955     - be 1f
956     - andcc %o2, 1, %g0
957     -
958     - ldub [%o1 + 15], %g5
959     - add %o1, -2, %o1
960     - stb %g5, [%o0 + 11]
961     - add %o0, -2, %o0
962     - ldub [%o1 + 16], %g5
963     - stb %g5, [%o0 + 12]
964     -1:
965     - be 1f
966     - nop
967     - ldub [%o1 + 15], %g5
968     - stb %g5, [%o0 + 11]
969     -1:
970     - retl
971     - RETL_INSN
972     -
973     -#endif /* FASTER_REVERSE */
974     + mov %g7, %o0
975    
976     /* NOTE: This code is executed just for the cases,
977     where %src (=%o1) & 3 is != 0.
978     @@ -546,7 +204,7 @@ FUNC(memmove)
979     FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
980    
981     sub %o0, %o1, %o4
982     - SETUP_RETL
983     + mov %o0, %g7
984     9:
985     andcc %o4, 3, %o5
986     0:
987     @@ -569,7 +227,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
988     add %o1, 4, %o1
989     add %o0, 4, %o0
990     2:
991     - andcc %g1, 0xffffff80, %g7
992     + andcc %g1, 0xffffff80, %g0
993     be 3f
994     andcc %o0, 4, %g0
995    
996     @@ -579,22 +237,23 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
997     MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
998     MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
999     MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
1000     - subcc %g7, 128, %g7
1001     + sub %g1, 128, %g1
1002     add %o1, 128, %o1
1003     - bne 5b
1004     + cmp %g1, 128
1005     + bge 5b
1006     add %o0, 128, %o0
1007     3:
1008     - andcc %g1, 0x70, %g7
1009     + andcc %g1, 0x70, %g4
1010     be 80f
1011     andcc %g1, 8, %g0
1012    
1013     sethi %hi(80f), %o5
1014     - srl %g7, 1, %o4
1015     - add %g7, %o4, %o4
1016     - add %o1, %g7, %o1
1017     + srl %g4, 1, %o4
1018     + add %g4, %o4, %o4
1019     + add %o1, %g4, %o1
1020     sub %o5, %o4, %o5
1021     jmpl %o5 + %lo(80f), %g0
1022     - add %o0, %g7, %o0
1023     + add %o0, %g4, %o0
1024    
1025     79: /* memcpy_table */
1026    
1027     @@ -641,43 +300,28 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1028     stb %g2, [%o0]
1029     1:
1030     retl
1031     - RETL_INSN
1032     + mov %g7, %o0
1033    
1034     82: /* ldd_std */
1035     MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
1036     MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
1037     MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
1038     MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
1039     - subcc %g7, 128, %g7
1040     + subcc %g1, 128, %g1
1041     add %o1, 128, %o1
1042     - bne 82b
1043     + cmp %g1, 128
1044     + bge 82b
1045     add %o0, 128, %o0
1046    
1047     -#ifndef FASTER_ALIGNED
1048     -
1049     - andcc %g1, 0x70, %g7
1050     - be 80b
1051     - andcc %g1, 8, %g0
1052     -
1053     - sethi %hi(80b), %o5
1054     - srl %g7, 1, %o4
1055     - add %g7, %o4, %o4
1056     - add %o1, %g7, %o1
1057     - sub %o5, %o4, %o5
1058     - jmpl %o5 + %lo(80b), %g0
1059     - add %o0, %g7, %o0
1060     -
1061     -#else /* FASTER_ALIGNED */
1062     -
1063     - andcc %g1, 0x70, %g7
1064     + andcc %g1, 0x70, %g4
1065     be 84f
1066     andcc %g1, 8, %g0
1067    
1068     sethi %hi(84f), %o5
1069     - add %o1, %g7, %o1
1070     - sub %o5, %g7, %o5
1071     + add %o1, %g4, %o1
1072     + sub %o5, %g4, %o5
1073     jmpl %o5 + %lo(84f), %g0
1074     - add %o0, %g7, %o0
1075     + add %o0, %g4, %o0
1076    
1077     83: /* amemcpy_table */
1078    
1079     @@ -721,382 +365,132 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1080     stb %g2, [%o0]
1081     1:
1082     retl
1083     - RETL_INSN
1084     -
1085     -#endif /* FASTER_ALIGNED */
1086     + mov %g7, %o0
1087    
1088     86: /* non_aligned */
1089     cmp %o2, 6
1090     bleu 88f
1091     + nop
1092    
1093     -#ifdef FASTER_NONALIGNED
1094     -
1095     - cmp %o2, 256
1096     - bcc 87f
1097     -
1098     -#endif /* FASTER_NONALIGNED */
1099     -
1100     - andcc %o0, 3, %g0
1101     + save %sp, -96, %sp
1102     + andcc %i0, 3, %g0
1103     be 61f
1104     - andcc %o0, 1, %g0
1105     + andcc %i0, 1, %g0
1106     be 60f
1107     - andcc %o0, 2, %g0
1108     + andcc %i0, 2, %g0
1109    
1110     - ldub [%o1], %g5
1111     - add %o1, 1, %o1
1112     - stb %g5, [%o0]
1113     - sub %o2, 1, %o2
1114     + ldub [%i1], %g5
1115     + add %i1, 1, %i1
1116     + stb %g5, [%i0]
1117     + sub %i2, 1, %i2
1118     bne 61f
1119     - add %o0, 1, %o0
1120     + add %i0, 1, %i0
1121     60:
1122     - ldub [%o1], %g3
1123     - add %o1, 2, %o1
1124     - stb %g3, [%o0]
1125     - sub %o2, 2, %o2
1126     - ldub [%o1 - 1], %g3
1127     - add %o0, 2, %o0
1128     - stb %g3, [%o0 - 1]
1129     + ldub [%i1], %g3
1130     + add %i1, 2, %i1
1131     + stb %g3, [%i0]
1132     + sub %i2, 2, %i2
1133     + ldub [%i1 - 1], %g3
1134     + add %i0, 2, %i0
1135     + stb %g3, [%i0 - 1]
1136     61:
1137     - and %o1, 3, %g2
1138     - and %o2, 0xc, %g3
1139     - and %o1, -4, %o1
1140     + and %i1, 3, %g2
1141     + and %i2, 0xc, %g3
1142     + and %i1, -4, %i1
1143     cmp %g3, 4
1144     sll %g2, 3, %g4
1145     mov 32, %g2
1146     be 4f
1147     - sub %g2, %g4, %g7
1148     + sub %g2, %g4, %l0
1149    
1150     blu 3f
1151     cmp %g3, 0x8
1152    
1153     be 2f
1154     - srl %o2, 2, %g3
1155     + srl %i2, 2, %g3
1156    
1157     - ld [%o1], %o3
1158     - add %o0, -8, %o0
1159     - ld [%o1 + 4], %o4
1160     + ld [%i1], %i3
1161     + add %i0, -8, %i0
1162     + ld [%i1 + 4], %i4
1163     b 8f
1164     add %g3, 1, %g3
1165     2:
1166     - ld [%o1], %o4
1167     - add %o0, -12, %o0
1168     - ld [%o1 + 4], %o5
1169     + ld [%i1], %i4
1170     + add %i0, -12, %i0
1171     + ld [%i1 + 4], %i5
1172     add %g3, 2, %g3
1173     b 9f
1174     - add %o1, -4, %o1
1175     + add %i1, -4, %i1
1176     3:
1177     - ld [%o1], %g1
1178     - add %o0, -4, %o0
1179     - ld [%o1 + 4], %o3
1180     - srl %o2, 2, %g3
1181     + ld [%i1], %g1
1182     + add %i0, -4, %i0
1183     + ld [%i1 + 4], %i3
1184     + srl %i2, 2, %g3
1185     b 7f
1186     - add %o1, 4, %o1
1187     + add %i1, 4, %i1
1188     4:
1189     - ld [%o1], %o5
1190     - cmp %o2, 7
1191     - ld [%o1 + 4], %g1
1192     - srl %o2, 2, %g3
1193     + ld [%i1], %i5
1194     + cmp %i2, 7
1195     + ld [%i1 + 4], %g1
1196     + srl %i2, 2, %g3
1197     bleu 10f
1198     - add %o1, 8, %o1
1199     + add %i1, 8, %i1
1200    
1201     - ld [%o1], %o3
1202     + ld [%i1], %i3
1203     add %g3, -1, %g3
1204     5:
1205     - sll %o5, %g4, %g2
1206     - srl %g1, %g7, %g5
1207     + sll %i5, %g4, %g2
1208     + srl %g1, %l0, %g5
1209     or %g2, %g5, %g2
1210     - st %g2, [%o0]
1211     + st %g2, [%i0]
1212     7:
1213     - ld [%o1 + 4], %o4
1214     + ld [%i1 + 4], %i4
1215     sll %g1, %g4, %g2
1216     - srl %o3, %g7, %g5
1217     + srl %i3, %l0, %g5
1218     or %g2, %g5, %g2
1219     - st %g2, [%o0 + 4]
1220     + st %g2, [%i0 + 4]
1221     8:
1222     - ld [%o1 + 8], %o5
1223     - sll %o3, %g4, %g2
1224     - srl %o4, %g7, %g5
1225     + ld [%i1 + 8], %i5
1226     + sll %i3, %g4, %g2
1227     + srl %i4, %l0, %g5
1228     or %g2, %g5, %g2
1229     - st %g2, [%o0 + 8]
1230     + st %g2, [%i0 + 8]
1231     9:
1232     - ld [%o1 + 12], %g1
1233     - sll %o4, %g4, %g2
1234     - srl %o5, %g7, %g5
1235     + ld [%i1 + 12], %g1
1236     + sll %i4, %g4, %g2
1237     + srl %i5, %l0, %g5
1238     addcc %g3, -4, %g3
1239     or %g2, %g5, %g2
1240     - add %o1, 16, %o1
1241     - st %g2, [%o0 + 12]
1242     - add %o0, 16, %o0
1243     + add %i1, 16, %i1
1244     + st %g2, [%i0 + 12]
1245     + add %i0, 16, %i0
1246     bne,a 5b
1247     - ld [%o1], %o3
1248     + ld [%i1], %i3
1249     10:
1250     - sll %o5, %g4, %g2
1251     - srl %g1, %g7, %g5
1252     - srl %g7, 3, %g3
1253     + sll %i5, %g4, %g2
1254     + srl %g1, %l0, %g5
1255     + srl %l0, 3, %g3
1256     or %g2, %g5, %g2
1257     - sub %o1, %g3, %o1
1258     - andcc %o2, 2, %g0
1259     - st %g2, [%o0]
1260     + sub %i1, %g3, %i1
1261     + andcc %i2, 2, %g0
1262     + st %g2, [%i0]
1263     be 1f
1264     - andcc %o2, 1, %g0
1265     -
1266     - ldub [%o1], %g2
1267     - add %o1, 2, %o1
1268     - stb %g2, [%o0 + 4]
1269     - add %o0, 2, %o0
1270     - ldub [%o1 - 1], %g2
1271     - stb %g2, [%o0 + 3]
1272     + andcc %i2, 1, %g0
1273     +
1274     + ldub [%i1], %g2
1275     + add %i1, 2, %i1
1276     + stb %g2, [%i0 + 4]
1277     + add %i0, 2, %i0
1278     + ldub [%i1 - 1], %g2
1279     + stb %g2, [%i0 + 3]
1280     1:
1281     be 1f
1282     nop
1283     - ldub [%o1], %g2
1284     - stb %g2, [%o0 + 4]
1285     -1:
1286     - retl
1287     - RETL_INSN
1288     -
1289     -#ifdef FASTER_NONALIGNED
1290     -
1291     -87: /* faster_nonaligned */
1292     -
1293     - andcc %o1, 3, %g0
1294     - be 3f
1295     - andcc %o1, 1, %g0
1296     -
1297     - be 4f
1298     - andcc %o1, 2, %g0
1299     -
1300     - ldub [%o1], %g2
1301     - add %o1, 1, %o1
1302     - stb %g2, [%o0]
1303     - sub %o2, 1, %o2
1304     - bne 3f
1305     - add %o0, 1, %o0
1306     -4:
1307     - lduh [%o1], %g2
1308     - add %o1, 2, %o1
1309     - srl %g2, 8, %g3
1310     - sub %o2, 2, %o2
1311     - stb %g3, [%o0]
1312     - add %o0, 2, %o0
1313     - stb %g2, [%o0 - 1]
1314     -3:
1315     - andcc %o1, 4, %g0
1316     -
1317     - bne 2f
1318     - cmp %o5, 1
1319     -
1320     - ld [%o1], %o4
1321     - srl %o4, 24, %g2
1322     - stb %g2, [%o0]
1323     - srl %o4, 16, %g3
1324     - stb %g3, [%o0 + 1]
1325     - srl %o4, 8, %g2
1326     - stb %g2, [%o0 + 2]
1327     - sub %o2, 4, %o2
1328     - stb %o4, [%o0 + 3]
1329     - add %o1, 4, %o1
1330     - add %o0, 4, %o0
1331     -2:
1332     - be 33f
1333     - cmp %o5, 2
1334     - be 32f
1335     - sub %o2, 4, %o2
1336     -31:
1337     - ld [%o1], %g2
1338     - add %o1, 4, %o1
1339     - srl %g2, 24, %g3
1340     - and %o0, 7, %g5
1341     - stb %g3, [%o0]
1342     - cmp %g5, 7
1343     - sll %g2, 8, %g1
1344     - add %o0, 4, %o0
1345     - be 41f
1346     - and %o2, 0xffffffc0, %o3
1347     - ld [%o0 - 7], %o4
1348     -4:
1349     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1350     - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1351     - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1352     - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1353     - subcc %o3, 64, %o3
1354     - add %o1, 64, %o1
1355     - bne 4b
1356     - add %o0, 64, %o0
1357     -
1358     - andcc %o2, 0x30, %o3
1359     - be,a 1f
1360     - srl %g1, 16, %g2
1361     -4:
1362     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1363     - subcc %o3, 16, %o3
1364     - add %o1, 16, %o1
1365     - bne 4b
1366     - add %o0, 16, %o0
1367     -
1368     - srl %g1, 16, %g2
1369     -1:
1370     - st %o4, [%o0 - 7]
1371     - sth %g2, [%o0 - 3]
1372     - srl %g1, 8, %g4
1373     - b 88f
1374     - stb %g4, [%o0 - 1]
1375     -32:
1376     - ld [%o1], %g2
1377     - add %o1, 4, %o1
1378     - srl %g2, 16, %g3
1379     - and %o0, 7, %g5
1380     - sth %g3, [%o0]
1381     - cmp %g5, 6
1382     - sll %g2, 16, %g1
1383     - add %o0, 4, %o0
1384     - be 42f
1385     - and %o2, 0xffffffc0, %o3
1386     - ld [%o0 - 6], %o4
1387     -4:
1388     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1389     - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1390     - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1391     - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1392     - subcc %o3, 64, %o3
1393     - add %o1, 64, %o1
1394     - bne 4b
1395     - add %o0, 64, %o0
1396     -
1397     - andcc %o2, 0x30, %o3
1398     - be,a 1f
1399     - srl %g1, 16, %g2
1400     -4:
1401     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1402     - subcc %o3, 16, %o3
1403     - add %o1, 16, %o1
1404     - bne 4b
1405     - add %o0, 16, %o0
1406     -
1407     - srl %g1, 16, %g2
1408     -1:
1409     - st %o4, [%o0 - 6]
1410     - b 88f
1411     - sth %g2, [%o0 - 2]
1412     -33:
1413     - ld [%o1], %g2
1414     - sub %o2, 4, %o2
1415     - srl %g2, 24, %g3
1416     - and %o0, 7, %g5
1417     - stb %g3, [%o0]
1418     - cmp %g5, 5
1419     - srl %g2, 8, %g4
1420     - sll %g2, 24, %g1
1421     - sth %g4, [%o0 + 1]
1422     - add %o1, 4, %o1
1423     - be 43f
1424     - and %o2, 0xffffffc0, %o3
1425     -
1426     - ld [%o0 - 1], %o4
1427     - add %o0, 4, %o0
1428     -4:
1429     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1430     - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1431     - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1432     - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1433     - subcc %o3, 64, %o3
1434     - add %o1, 64, %o1
1435     - bne 4b
1436     - add %o0, 64, %o0
1437     -
1438     - andcc %o2, 0x30, %o3
1439     - be,a 1f
1440     - srl %g1, 24, %g2
1441     -4:
1442     - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1443     - subcc %o3, 16, %o3
1444     - add %o1, 16, %o1
1445     - bne 4b
1446     - add %o0, 16, %o0
1447     -
1448     - srl %g1, 24, %g2
1449     -1:
1450     - st %o4, [%o0 - 5]
1451     - b 88f
1452     - stb %g2, [%o0 - 1]
1453     -41:
1454     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1455     - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1456     - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1457     - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1458     - subcc %o3, 64, %o3
1459     - add %o1, 64, %o1
1460     - bne 41b
1461     - add %o0, 64, %o0
1462     -
1463     - andcc %o2, 0x30, %o3
1464     - be,a 1f
1465     - srl %g1, 16, %g2
1466     -4:
1467     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1468     - subcc %o3, 16, %o3
1469     - add %o1, 16, %o1
1470     - bne 4b
1471     - add %o0, 16, %o0
1472     -
1473     - srl %g1, 16, %g2
1474     + ldub [%i1], %g2
1475     + stb %g2, [%i0 + 4]
1476     1:
1477     - sth %g2, [%o0 - 3]
1478     - srl %g1, 8, %g4
1479     - b 88f
1480     - stb %g4, [%o0 - 1]
1481     -43:
1482     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1483     - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1484     - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1485     - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1486     - subcc %o3, 64, %o3
1487     - add %o1, 64, %o1
1488     - bne 43b
1489     - add %o0, 64, %o0
1490     -
1491     - andcc %o2, 0x30, %o3
1492     - be,a 1f
1493     - srl %g1, 24, %g2
1494     -4:
1495     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1496     - subcc %o3, 16, %o3
1497     - add %o1, 16, %o1
1498     - bne 4b
1499     - add %o0, 16, %o0
1500     -
1501     - srl %g1, 24, %g2
1502     -1:
1503     - stb %g2, [%o0 + 3]
1504     - b 88f
1505     - add %o0, 4, %o0
1506     -42:
1507     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1508     - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1509     - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1510     - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1511     - subcc %o3, 64, %o3
1512     - add %o1, 64, %o1
1513     - bne 42b
1514     - add %o0, 64, %o0
1515     -
1516     - andcc %o2, 0x30, %o3
1517     - be,a 1f
1518     - srl %g1, 16, %g2
1519     -4:
1520     - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1521     - subcc %o3, 16, %o3
1522     - add %o1, 16, %o1
1523     - bne 4b
1524     - add %o0, 16, %o0
1525     -
1526     - srl %g1, 16, %g2
1527     -1:
1528     - sth %g2, [%o0 - 2]
1529     -
1530     - /* Fall through */
1531     -
1532     -#endif /* FASTER_NONALIGNED */
1533     + ret
1534     + restore %g7, %g0, %o0
1535    
1536     88: /* short_end */
1537    
1538     @@ -1127,7 +521,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
1539     stb %g2, [%o0]
1540     1:
1541     retl
1542     - RETL_INSN
1543     + mov %g7, %o0
1544    
1545     90: /* short_aligned_end */
1546     bne 88b
1547     diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
1548     index e3cda21..301421c 100644
1549     --- a/arch/sparc/mm/Makefile
1550     +++ b/arch/sparc/mm/Makefile
1551     @@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
1552     obj-y += fault_$(BITS).o
1553     obj-y += init_$(BITS).o
1554     obj-$(CONFIG_SPARC32) += loadmmu.o
1555     -obj-y += generic_$(BITS).o
1556     obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
1557     obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
1558     obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
1559     diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
1560     index 5175ac2..8a7f817 100644
1561     --- a/arch/sparc/mm/btfixup.c
1562     +++ b/arch/sparc/mm/btfixup.c
1563     @@ -302,8 +302,7 @@ void __init btfixup(void)
1564     case 'i': /* INT */
1565     if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
1566     set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
1567     - else if ((insn & 0x80002000) == 0x80002000 &&
1568     - (insn & 0x01800000) != 0x01800000) /* %LO */
1569     + else if ((insn & 0x80002000) == 0x80002000) /* %LO */
1570     set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
1571     else {
1572     prom_printf(insn_i, p, addr, insn);
1573     diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
1574     deleted file mode 100644
1575     index e6067b7..0000000
1576     --- a/arch/sparc/mm/generic_32.c
1577     +++ /dev/null
1578     @@ -1,98 +0,0 @@
1579     -/*
1580     - * generic.c: Generic Sparc mm routines that are not dependent upon
1581     - * MMU type but are Sparc specific.
1582     - *
1583     - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
1584     - */
1585     -
1586     -#include <linux/kernel.h>
1587     -#include <linux/mm.h>
1588     -#include <linux/swap.h>
1589     -#include <linux/pagemap.h>
1590     -
1591     -#include <asm/pgalloc.h>
1592     -#include <asm/pgtable.h>
1593     -#include <asm/page.h>
1594     -#include <asm/cacheflush.h>
1595     -#include <asm/tlbflush.h>
1596     -
1597     -/* Remap IO memory, the same way as remap_pfn_range(), but use
1598     - * the obio memory space.
1599     - *
1600     - * They use a pgprot that sets PAGE_IO and does not check the
1601     - * mem_map table as this is independent of normal memory.
1602     - */
1603     -static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
1604     - unsigned long offset, pgprot_t prot, int space)
1605     -{
1606     - unsigned long end;
1607     -
1608     - address &= ~PMD_MASK;
1609     - end = address + size;
1610     - if (end > PMD_SIZE)
1611     - end = PMD_SIZE;
1612     - do {
1613     - set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
1614     - address += PAGE_SIZE;
1615     - offset += PAGE_SIZE;
1616     - pte++;
1617     - } while (address < end);
1618     -}
1619     -
1620     -static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
1621     - unsigned long offset, pgprot_t prot, int space)
1622     -{
1623     - unsigned long end;
1624     -
1625     - address &= ~PGDIR_MASK;
1626     - end = address + size;
1627     - if (end > PGDIR_SIZE)
1628     - end = PGDIR_SIZE;
1629     - offset -= address;
1630     - do {
1631     - pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
1632     - if (!pte)
1633     - return -ENOMEM;
1634     - io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
1635     - address = (address + PMD_SIZE) & PMD_MASK;
1636     - pmd++;
1637     - } while (address < end);
1638     - return 0;
1639     -}
1640     -
1641     -int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1642     - unsigned long pfn, unsigned long size, pgprot_t prot)
1643     -{
1644     - int error = 0;
1645     - pgd_t * dir;
1646     - unsigned long beg = from;
1647     - unsigned long end = from + size;
1648     - struct mm_struct *mm = vma->vm_mm;
1649     - int space = GET_IOSPACE(pfn);
1650     - unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1651     -
1652     - /* See comment in mm/memory.c remap_pfn_range */
1653     - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1654     - vma->vm_pgoff = (offset >> PAGE_SHIFT) |
1655     - ((unsigned long)space << 28UL);
1656     -
1657     - offset -= from;
1658     - dir = pgd_offset(mm, from);
1659     - flush_cache_range(vma, beg, end);
1660     -
1661     - while (from < end) {
1662     - pmd_t *pmd = pmd_alloc(mm, dir, from);
1663     - error = -ENOMEM;
1664     - if (!pmd)
1665     - break;
1666     - error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
1667     - if (error)
1668     - break;
1669     - from = (from + PGDIR_SIZE) & PGDIR_MASK;
1670     - dir++;
1671     - }
1672     -
1673     - flush_tlb_range(vma, beg, end);
1674     - return error;
1675     -}
1676     -EXPORT_SYMBOL(io_remap_pfn_range);
1677     diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
1678     deleted file mode 100644
1679     index 3cb00df..0000000
1680     --- a/arch/sparc/mm/generic_64.c
1681     +++ /dev/null
1682     @@ -1,164 +0,0 @@
1683     -/*
1684     - * generic.c: Generic Sparc mm routines that are not dependent upon
1685     - * MMU type but are Sparc specific.
1686     - *
1687     - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
1688     - */
1689     -
1690     -#include <linux/kernel.h>
1691     -#include <linux/mm.h>
1692     -#include <linux/swap.h>
1693     -#include <linux/pagemap.h>
1694     -
1695     -#include <asm/pgalloc.h>
1696     -#include <asm/pgtable.h>
1697     -#include <asm/page.h>
1698     -#include <asm/tlbflush.h>
1699     -
1700     -/* Remap IO memory, the same way as remap_pfn_range(), but use
1701     - * the obio memory space.
1702     - *
1703     - * They use a pgprot that sets PAGE_IO and does not check the
1704     - * mem_map table as this is independent of normal memory.
1705     - */
1706     -static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
1707     - unsigned long address,
1708     - unsigned long size,
1709     - unsigned long offset, pgprot_t prot,
1710     - int space)
1711     -{
1712     - unsigned long end;
1713     -
1714     - /* clear hack bit that was used as a write_combine side-effect flag */
1715     - offset &= ~0x1UL;
1716     - address &= ~PMD_MASK;
1717     - end = address + size;
1718     - if (end > PMD_SIZE)
1719     - end = PMD_SIZE;
1720     - do {
1721     - pte_t entry;
1722     - unsigned long curend = address + PAGE_SIZE;
1723     -
1724     - entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
1725     - if (!(address & 0xffff)) {
1726     - if (PAGE_SIZE < (4 * 1024 * 1024) &&
1727     - !(address & 0x3fffff) &&
1728     - !(offset & 0x3ffffe) &&
1729     - end >= address + 0x400000) {
1730     - entry = mk_pte_io(offset, prot, space,
1731     - 4 * 1024 * 1024);
1732     - curend = address + 0x400000;
1733     - offset += 0x400000;
1734     - } else if (PAGE_SIZE < (512 * 1024) &&
1735     - !(address & 0x7ffff) &&
1736     - !(offset & 0x7fffe) &&
1737     - end >= address + 0x80000) {
1738     - entry = mk_pte_io(offset, prot, space,
1739     - 512 * 1024 * 1024);
1740     - curend = address + 0x80000;
1741     - offset += 0x80000;
1742     - } else if (PAGE_SIZE < (64 * 1024) &&
1743     - !(offset & 0xfffe) &&
1744     - end >= address + 0x10000) {
1745     - entry = mk_pte_io(offset, prot, space,
1746     - 64 * 1024);
1747     - curend = address + 0x10000;
1748     - offset += 0x10000;
1749     - } else
1750     - offset += PAGE_SIZE;
1751     - } else
1752     - offset += PAGE_SIZE;
1753     -
1754     - if (pte_write(entry))
1755     - entry = pte_mkdirty(entry);
1756     - do {
1757     - BUG_ON(!pte_none(*pte));
1758     - set_pte_at(mm, address, pte, entry);
1759     - address += PAGE_SIZE;
1760     - pte_val(entry) += PAGE_SIZE;
1761     - pte++;
1762     - } while (address < curend);
1763     - } while (address < end);
1764     -}
1765     -
1766     -static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
1767     - unsigned long offset, pgprot_t prot, int space)
1768     -{
1769     - unsigned long end;
1770     -
1771     - address &= ~PGDIR_MASK;
1772     - end = address + size;
1773     - if (end > PGDIR_SIZE)
1774     - end = PGDIR_SIZE;
1775     - offset -= address;
1776     - do {
1777     - pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
1778     - if (!pte)
1779     - return -ENOMEM;
1780     - io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
1781     - pte_unmap(pte);
1782     - address = (address + PMD_SIZE) & PMD_MASK;
1783     - pmd++;
1784     - } while (address < end);
1785     - return 0;
1786     -}
1787     -
1788     -static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
1789     - unsigned long offset, pgprot_t prot, int space)
1790     -{
1791     - unsigned long end;
1792     -
1793     - address &= ~PUD_MASK;
1794     - end = address + size;
1795     - if (end > PUD_SIZE)
1796     - end = PUD_SIZE;
1797     - offset -= address;
1798     - do {
1799     - pmd_t *pmd = pmd_alloc(mm, pud, address);
1800     - if (!pud)
1801     - return -ENOMEM;
1802     - io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
1803     - address = (address + PUD_SIZE) & PUD_MASK;
1804     - pud++;
1805     - } while (address < end);
1806     - return 0;
1807     -}
1808     -
1809     -int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1810     - unsigned long pfn, unsigned long size, pgprot_t prot)
1811     -{
1812     - int error = 0;
1813     - pgd_t * dir;
1814     - unsigned long beg = from;
1815     - unsigned long end = from + size;
1816     - struct mm_struct *mm = vma->vm_mm;
1817     - int space = GET_IOSPACE(pfn);
1818     - unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1819     - unsigned long phys_base;
1820     -
1821     - phys_base = offset | (((unsigned long) space) << 32UL);
1822     -
1823     - /* See comment in mm/memory.c remap_pfn_range */
1824     - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1825     - vma->vm_pgoff = phys_base >> PAGE_SHIFT;
1826     -
1827     - offset -= from;
1828     - dir = pgd_offset(mm, from);
1829     - flush_cache_range(vma, beg, end);
1830     -
1831     - while (from < end) {
1832     - pud_t *pud = pud_alloc(mm, dir, from);
1833     - error = -ENOMEM;
1834     - if (!pud)
1835     - break;
1836     - error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
1837     - if (error)
1838     - break;
1839     - from = (from + PGDIR_SIZE) & PGDIR_MASK;
1840     - dir++;
1841     - }
1842     -
1843     - flush_tlb_range(vma, beg, end);
1844     - return error;
1845     -}
1846     -EXPORT_SYMBOL(io_remap_pfn_range);
1847     diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
1848     index bfab3fa..7b65f75 100644
1849     --- a/arch/x86/net/bpf_jit_comp.c
1850     +++ b/arch/x86/net/bpf_jit_comp.c
1851     @@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
1852     break;
1853     }
1854     if (filter[i].jt != 0) {
1855     - if (filter[i].jf)
1856     - t_offset += is_near(f_offset) ? 2 : 6;
1857     + if (filter[i].jf && f_offset)
1858     + t_offset += is_near(f_offset) ? 2 : 5;
1859     EMIT_COND_JMP(t_op, t_offset);
1860     if (filter[i].jf)
1861     EMIT_JMP(f_offset);
1862     diff --git a/block/blk-core.c b/block/blk-core.c
1863     index 795154e..8fc4ae2 100644
1864     --- a/block/blk-core.c
1865     +++ b/block/blk-core.c
1866     @@ -418,6 +418,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1867     q->backing_dev_info.state = 0;
1868     q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
1869     q->backing_dev_info.name = "block";
1870     + q->node = node_id;
1871    
1872     err = bdi_init(&q->backing_dev_info);
1873     if (err) {
1874     @@ -502,7 +503,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1875     if (!uninit_q)
1876     return NULL;
1877    
1878     - q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
1879     + q = blk_init_allocated_queue(uninit_q, rfn, lock);
1880     if (!q)
1881     blk_cleanup_queue(uninit_q);
1882    
1883     @@ -514,18 +515,9 @@ struct request_queue *
1884     blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
1885     spinlock_t *lock)
1886     {
1887     - return blk_init_allocated_queue_node(q, rfn, lock, -1);
1888     -}
1889     -EXPORT_SYMBOL(blk_init_allocated_queue);
1890     -
1891     -struct request_queue *
1892     -blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
1893     - spinlock_t *lock, int node_id)
1894     -{
1895     if (!q)
1896     return NULL;
1897    
1898     - q->node = node_id;
1899     if (blk_init_free_list(q))
1900     return NULL;
1901    
1902     @@ -555,7 +547,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
1903    
1904     return NULL;
1905     }
1906     -EXPORT_SYMBOL(blk_init_allocated_queue_node);
1907     +EXPORT_SYMBOL(blk_init_allocated_queue);
1908    
1909     int blk_get_queue(struct request_queue *q)
1910     {
1911     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1912     index 16ace89..4c12869 100644
1913     --- a/block/cfq-iosched.c
1914     +++ b/block/cfq-iosched.c
1915     @@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1916     }
1917     }
1918    
1919     - if (ret)
1920     + if (ret && ret != -EEXIST)
1921     printk(KERN_ERR "cfq: cic link failed!\n");
1922    
1923     return ret;
1924     @@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1925     {
1926     struct io_context *ioc = NULL;
1927     struct cfq_io_context *cic;
1928     + int ret;
1929    
1930     might_sleep_if(gfp_mask & __GFP_WAIT);
1931    
1932     @@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1933     if (!ioc)
1934     return NULL;
1935    
1936     +retry:
1937     cic = cfq_cic_lookup(cfqd, ioc);
1938     if (cic)
1939     goto out;
1940     @@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1941     if (cic == NULL)
1942     goto err;
1943    
1944     - if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1945     + ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
1946     + if (ret == -EEXIST) {
1947     + /* someone has linked cic to ioc already */
1948     + cfq_cic_free(cic);
1949     + goto retry;
1950     + } else if (ret)
1951     goto err_free;
1952    
1953     out:
1954     @@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
1955    
1956     if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
1957     kfree(cfqg);
1958     +
1959     + spin_lock(&cic_index_lock);
1960     + ida_remove(&cic_index_ida, cfqd->cic_index);
1961     + spin_unlock(&cic_index_lock);
1962     +
1963     kfree(cfqd);
1964     return NULL;
1965     }
1966     diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
1967     index 8a3942c..c72b590 100644
1968     --- a/drivers/gpu/drm/i915/i915_dma.c
1969     +++ b/drivers/gpu/drm/i915/i915_dma.c
1970     @@ -1453,6 +1453,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1971    
1972     diff1 = now - dev_priv->last_time1;
1973    
1974     + /* Prevent division-by-zero if we are asking too fast.
1975     + * Also, we don't get interesting results if we are polling
1976     + * faster than once in 10ms, so just return the saved value
1977     + * in such cases.
1978     + */
1979     + if (diff1 <= 10)
1980     + return dev_priv->chipset_power;
1981     +
1982     count1 = I915_READ(DMIEC);
1983     count2 = I915_READ(DDREC);
1984     count3 = I915_READ(CSIEC);
1985     @@ -1483,6 +1491,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1986     dev_priv->last_count1 = total_count;
1987     dev_priv->last_time1 = now;
1988    
1989     + dev_priv->chipset_power = ret;
1990     +
1991     return ret;
1992     }
1993    
1994     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1995     index 7916bd9..1a2a2d1 100644
1996     --- a/drivers/gpu/drm/i915/i915_drv.h
1997     +++ b/drivers/gpu/drm/i915/i915_drv.h
1998     @@ -707,6 +707,7 @@ typedef struct drm_i915_private {
1999    
2000     u64 last_count1;
2001     unsigned long last_time1;
2002     + unsigned long chipset_power;
2003     u64 last_count2;
2004     struct timespec last_time2;
2005     unsigned long gfx_power;
2006     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2007     index ad381a2..2ae29de 100644
2008     --- a/drivers/gpu/drm/i915/i915_reg.h
2009     +++ b/drivers/gpu/drm/i915/i915_reg.h
2010     @@ -3271,10 +3271,10 @@
2011     /* or SDVOB */
2012     #define HDMIB 0xe1140
2013     #define PORT_ENABLE (1 << 31)
2014     -#define TRANSCODER_A (0)
2015     -#define TRANSCODER_B (1 << 30)
2016     -#define TRANSCODER(pipe) ((pipe) << 30)
2017     -#define TRANSCODER_MASK (1 << 30)
2018     +#define TRANSCODER(pipe) ((pipe) << 30)
2019     +#define TRANSCODER_CPT(pipe) ((pipe) << 29)
2020     +#define TRANSCODER_MASK (1 << 30)
2021     +#define TRANSCODER_MASK_CPT (3 << 29)
2022     #define COLOR_FORMAT_8bpc (0)
2023     #define COLOR_FORMAT_12bpc (3 << 26)
2024     #define SDVOB_HOTPLUG_ENABLE (1 << 23)
2025     diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
2026     index 6348c49..ac0c323 100644
2027     --- a/drivers/gpu/drm/i915/intel_sdvo.c
2028     +++ b/drivers/gpu/drm/i915/intel_sdvo.c
2029     @@ -1085,8 +1085,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
2030     }
2031     sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
2032     }
2033     - if (intel_crtc->pipe == 1)
2034     - sdvox |= SDVO_PIPE_B_SELECT;
2035     +
2036     + if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
2037     + sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
2038     + else
2039     + sdvox |= TRANSCODER(intel_crtc->pipe);
2040     +
2041     if (intel_sdvo->has_hdmi_audio)
2042     sdvox |= SDVO_AUDIO_ENABLE;
2043    
2044     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
2045     index d4ee6f0..c3f0d42 100644
2046     --- a/drivers/gpu/drm/radeon/evergreen.c
2047     +++ b/drivers/gpu/drm/radeon/evergreen.c
2048     @@ -3258,6 +3258,18 @@ int evergreen_init(struct radeon_device *rdev)
2049     rdev->accel_working = false;
2050     }
2051     }
2052     +
2053     + /* Don't start up if the MC ucode is missing on BTC parts.
2054     + * The default clocks and voltages before the MC ucode
2055     + * is loaded are not suffient for advanced operations.
2056     + */
2057     + if (ASIC_IS_DCE5(rdev)) {
2058     + if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2059     + DRM_ERROR("radeon: MC ucode required for NI+.\n");
2060     + return -EINVAL;
2061     + }
2062     + }
2063     +
2064     return 0;
2065     }
2066    
2067     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2068     index 285acc4..a098edc 100644
2069     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2070     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2071     @@ -2568,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2072    
2073     rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2074     rdev->pm.current_clock_mode_index = 0;
2075     - rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
2076     + if (rdev->pm.default_power_state_index >= 0)
2077     + rdev->pm.current_vddc =
2078     + rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
2079     + else
2080     + rdev->pm.current_vddc = 0;
2081     }
2082    
2083     void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2084     diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
2085     index fa643f4..059a865 100644
2086     --- a/drivers/infiniband/hw/mlx4/main.c
2087     +++ b/drivers/infiniband/hw/mlx4/main.c
2088     @@ -1144,7 +1144,8 @@ err_reg:
2089    
2090     err_counter:
2091     for (; i; --i)
2092     - mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2093     + if (ibdev->counters[i - 1] != -1)
2094     + mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2095    
2096     err_map:
2097     iounmap(ibdev->uar_map);
2098     @@ -1175,7 +1176,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2099     }
2100     iounmap(ibdev->uar_map);
2101     for (p = 0; p < ibdev->num_ports; ++p)
2102     - mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2103     + if (ibdev->counters[p] != -1)
2104     + mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2105     mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2106     mlx4_CLOSE_PORT(dev, p);
2107    
2108     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
2109     index 5538fc6..7675363 100644
2110     --- a/drivers/input/mouse/synaptics.c
2111     +++ b/drivers/input/mouse/synaptics.c
2112     @@ -24,6 +24,7 @@
2113     */
2114    
2115     #include <linux/module.h>
2116     +#include <linux/delay.h>
2117     #include <linux/dmi.h>
2118     #include <linux/input/mt.h>
2119     #include <linux/serio.h>
2120     @@ -786,6 +787,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
2121    
2122     do {
2123     psmouse_reset(psmouse);
2124     + if (retry) {
2125     + /*
2126     + * On some boxes, right after resuming, the touchpad
2127     + * needs some time to finish initializing (I assume
2128     + * it needs time to calibrate) and start responding
2129     + * to Synaptics-specific queries, so let's wait a
2130     + * bit.
2131     + */
2132     + ssleep(1);
2133     + }
2134     error = synaptics_detect(psmouse, 0);
2135     } while (error && ++retry < 3);
2136    
2137     diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
2138     index b3a5ecd..3422da0 100644
2139     --- a/drivers/media/video/omap/omap_vout.c
2140     +++ b/drivers/media/video/omap/omap_vout.c
2141     @@ -38,6 +38,7 @@
2142     #include <linux/irq.h>
2143     #include <linux/videodev2.h>
2144     #include <linux/dma-mapping.h>
2145     +#include <linux/slab.h>
2146    
2147     #include <media/videobuf-dma-contig.h>
2148     #include <media/v4l2-device.h>
2149     diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
2150     index aa55066..b062b1a 100644
2151     --- a/drivers/media/video/s5p-fimc/fimc-core.c
2152     +++ b/drivers/media/video/s5p-fimc/fimc-core.c
2153     @@ -35,7 +35,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
2154     static struct fimc_fmt fimc_formats[] = {
2155     {
2156     .name = "RGB565",
2157     - .fourcc = V4L2_PIX_FMT_RGB565X,
2158     + .fourcc = V4L2_PIX_FMT_RGB565,
2159     .depth = { 16 },
2160     .color = S5P_FIMC_RGB565,
2161     .memplanes = 1,
2162     diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
2163     index b8eef46..35cdc80 100644
2164     --- a/drivers/mfd/twl-core.c
2165     +++ b/drivers/mfd/twl-core.c
2166     @@ -362,13 +362,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
2167     pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
2168     return -EPERM;
2169     }
2170     - sid = twl_map[mod_no].sid;
2171     - twl = &twl_modules[sid];
2172     -
2173     if (unlikely(!inuse)) {
2174     - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
2175     + pr_err("%s: not initialized\n", DRIVER_NAME);
2176     return -EPERM;
2177     }
2178     + sid = twl_map[mod_no].sid;
2179     + twl = &twl_modules[sid];
2180     +
2181     mutex_lock(&twl->xfer_lock);
2182     /*
2183     * [MSG1]: fill the register address data
2184     @@ -419,13 +419,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
2185     pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
2186     return -EPERM;
2187     }
2188     - sid = twl_map[mod_no].sid;
2189     - twl = &twl_modules[sid];
2190     -
2191     if (unlikely(!inuse)) {
2192     - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
2193     + pr_err("%s: not initialized\n", DRIVER_NAME);
2194     return -EPERM;
2195     }
2196     + sid = twl_map[mod_no].sid;
2197     + twl = &twl_modules[sid];
2198     +
2199     mutex_lock(&twl->xfer_lock);
2200     /* [MSG1] fill the register address data */
2201     msg = &twl->xfer_msg[0];
2202     diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
2203     index 7cbf2aa..834f824 100644
2204     --- a/drivers/mfd/twl4030-madc.c
2205     +++ b/drivers/mfd/twl4030-madc.c
2206     @@ -740,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
2207     TWL4030_BCI_BCICTL1);
2208     goto err_i2c;
2209     }
2210     +
2211     + /* Check that MADC clock is on */
2212     + ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &regval, TWL4030_REG_GPBR1);
2213     + if (ret) {
2214     + dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n",
2215     + TWL4030_REG_GPBR1);
2216     + goto err_i2c;
2217     + }
2218     +
2219     + /* If MADC clk is not on, turn it on */
2220     + if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) {
2221     + dev_info(&pdev->dev, "clk disabled, enabling\n");
2222     + regval |= TWL4030_GPBR1_MADC_HFCLK_EN;
2223     + ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval,
2224     + TWL4030_REG_GPBR1);
2225     + if (ret) {
2226     + dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n",
2227     + TWL4030_REG_GPBR1);
2228     + goto err_i2c;
2229     + }
2230     + }
2231     +
2232     platform_set_drvdata(pdev, madc);
2233     mutex_init(&madc->lock);
2234     ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL,
2235     diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
2236     index 56e9a41..d8eac24 100644
2237     --- a/drivers/mmc/host/mmci.c
2238     +++ b/drivers/mmc/host/mmci.c
2239     @@ -673,7 +673,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
2240     unsigned int status)
2241     {
2242     /* First check for errors */
2243     - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
2244     + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
2245     + MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
2246     u32 remain, success;
2247    
2248     /* Terminate the DMA transfer */
2249     @@ -752,8 +753,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
2250     }
2251    
2252     if (!cmd->data || cmd->error) {
2253     - if (host->data)
2254     + if (host->data) {
2255     + /* Terminate the DMA transfer */
2256     + if (dma_inprogress(host))
2257     + mmci_dma_data_error(host);
2258     mmci_stop_data(host);
2259     + }
2260     mmci_request_end(host, cmd->mrq);
2261     } else if (!(cmd->data->flags & MMC_DATA_READ)) {
2262     mmci_start_data(host, cmd->data);
2263     @@ -953,8 +958,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
2264     dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
2265    
2266     data = host->data;
2267     - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
2268     - MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
2269     + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
2270     + MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
2271     + MCI_DATABLOCKEND) && data)
2272     mmci_data_irq(host, data, status);
2273    
2274     cmd = host->cmd;
2275     diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
2276     index e8f6e65..2ec978b 100644
2277     --- a/drivers/mmc/host/vub300.c
2278     +++ b/drivers/mmc/host/vub300.c
2279     @@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
2280     static int firmware_rom_wait_states = 0x1C;
2281     #endif
2282    
2283     -module_param(firmware_rom_wait_states, bool, 0644);
2284     +module_param(firmware_rom_wait_states, int, 0644);
2285     MODULE_PARM_DESC(firmware_rom_wait_states,
2286     "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
2287    
2288     diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
2289     index 89f829f..f8a6853 100644
2290     --- a/drivers/net/pptp.c
2291     +++ b/drivers/net/pptp.c
2292     @@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
2293     lock_sock(sk);
2294    
2295     opt->src_addr = sp->sa_addr.pptp;
2296     - if (add_chan(po)) {
2297     - release_sock(sk);
2298     + if (add_chan(po))
2299     error = -EBUSY;
2300     - }
2301    
2302     release_sock(sk);
2303     return error;
2304     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2305     index 722967b..69736d8 100644
2306     --- a/drivers/net/wireless/ath/ath9k/main.c
2307     +++ b/drivers/net/wireless/ath/ath9k/main.c
2308     @@ -1841,6 +1841,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
2309     struct ath_softc *sc = hw->priv;
2310     struct ath_node *an = (struct ath_node *) sta->drv_priv;
2311    
2312     + if (!(sc->sc_flags & SC_OP_TXAGGR))
2313     + return;
2314     +
2315     switch (cmd) {
2316     case STA_NOTIFY_SLEEP:
2317     an->sleeping = true;
2318     diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
2319     index c04a6c3..297d762 100644
2320     --- a/drivers/net/wireless/ath/ath9k/rc.c
2321     +++ b/drivers/net/wireless/ath/ath9k/rc.c
2322     @@ -1250,7 +1250,9 @@ static void ath_rc_init(struct ath_softc *sc,
2323    
2324     ath_rc_priv->max_valid_rate = k;
2325     ath_rc_sort_validrates(rate_table, ath_rc_priv);
2326     - ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
2327     + ath_rc_priv->rate_max_phy = (k > 4) ?
2328     + ath_rc_priv->valid_rate_index[k-4] :
2329     + ath_rc_priv->valid_rate_index[k-1];
2330     ath_rc_priv->rate_table = rate_table;
2331    
2332     ath_dbg(common, ATH_DBG_CONFIG,
2333     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2334     index eabbf1a..5493f94 100644
2335     --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2336     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2337     @@ -620,8 +620,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
2338     if (ctx->ht.enabled) {
2339     /* if HT40 is used, it should not change
2340     * after associated except channel switch */
2341     - if (iwl_is_associated_ctx(ctx) &&
2342     - !ctx->ht.is_40mhz)
2343     + if (!ctx->ht.is_40mhz ||
2344     + !iwl_is_associated_ctx(ctx))
2345     iwlagn_config_ht40(conf, ctx);
2346     } else
2347     ctx->ht.is_40mhz = false;
2348     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2349     index 53bb59e..475f9d4 100644
2350     --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2351     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
2352     @@ -166,7 +166,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
2353     tx_cmd->tid_tspec = qc[0] & 0xf;
2354     tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2355     } else {
2356     - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2357     + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
2358     + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2359     + else
2360     + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2361     }
2362    
2363     iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
2364     diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
2365     index 41f0de9..32eb4fe 100644
2366     --- a/drivers/net/wireless/iwlwifi/iwl-trans.c
2367     +++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
2368     @@ -1068,9 +1068,7 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
2369     iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
2370    
2371     /* Set up entry for this TFD in Tx byte-count array */
2372     - if (ampdu)
2373     - iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
2374     - le16_to_cpu(tx_cmd->len));
2375     + iwl_trans_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len));
2376    
2377     dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
2378     DMA_BIDIRECTIONAL);
2379     diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
2380     index 592a10a..3b585aa 100644
2381     --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
2382     +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
2383     @@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2384     }
2385     case ERFSLEEP:{
2386     if (ppsc->rfpwr_state == ERFOFF)
2387     - break;
2388     + return false;
2389     for (queue_id = 0, i = 0;
2390     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2391     ring = &pcipriv->dev.tx_ring[queue_id];
2392     diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
2393     index 7285290..e49cf22 100644
2394     --- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
2395     +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
2396     @@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
2397     break;
2398     case ERFSLEEP:
2399     if (ppsc->rfpwr_state == ERFOFF)
2400     - break;
2401     + return false;
2402     for (queue_id = 0, i = 0;
2403     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2404     ring = &pcipriv->dev.tx_ring[queue_id];
2405     diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
2406     index 3ac7af1..0883349 100644
2407     --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
2408     +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
2409     @@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
2410     break;
2411     case ERFSLEEP:
2412     if (ppsc->rfpwr_state == ERFOFF)
2413     - break;
2414     + return false;
2415    
2416     for (queue_id = 0, i = 0;
2417     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2418     diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
2419     index f27171a..f10ac1a 100644
2420     --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
2421     +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
2422     @@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
2423     }
2424     case ERFSLEEP:
2425     if (ppsc->rfpwr_state == ERFOFF)
2426     - break;
2427     + return false;
2428    
2429     for (queue_id = 0, i = 0;
2430     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2431     diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
2432     index 89f6345..84a208d 100644
2433     --- a/drivers/oprofile/oprofile_files.c
2434     +++ b/drivers/oprofile/oprofile_files.c
2435     @@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
2436     return -EINVAL;
2437    
2438     retval = oprofilefs_ulong_from_user(&val, buf, count);
2439     - if (retval)
2440     + if (retval <= 0)
2441     return retval;
2442    
2443     retval = oprofile_set_timeout(val);
2444     @@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
2445     return -EINVAL;
2446    
2447     retval = oprofilefs_ulong_from_user(&val, buf, count);
2448     - if (retval)
2449     + if (retval <= 0)
2450     return retval;
2451    
2452     retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
2453     @@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
2454     return -EINVAL;
2455    
2456     retval = oprofilefs_ulong_from_user(&val, buf, count);
2457     - if (retval)
2458     + if (retval <= 0)
2459     return retval;
2460    
2461     + retval = 0;
2462     if (val)
2463     retval = oprofile_start();
2464     else
2465     diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
2466     index e9ff6f7..1c0b799 100644
2467     --- a/drivers/oprofile/oprofilefs.c
2468     +++ b/drivers/oprofile/oprofilefs.c
2469     @@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
2470     }
2471    
2472    
2473     +/*
2474     + * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
2475     + * unchanged and might be uninitialized. This follows write syscall
2476     + * implementation when count is zero: "If count is zero ... [and if]
2477     + * no errors are detected, 0 will be returned without causing any
2478     + * other effect." (man 2 write)
2479     + */
2480     int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
2481     {
2482     char tmpbuf[TMPBUFSIZE];
2483     @@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
2484     spin_lock_irqsave(&oprofilefs_lock, flags);
2485     *val = simple_strtoul(tmpbuf, NULL, 0);
2486     spin_unlock_irqrestore(&oprofilefs_lock, flags);
2487     - return 0;
2488     + return count;
2489     }
2490    
2491    
2492     @@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
2493     return -EINVAL;
2494    
2495     retval = oprofilefs_ulong_from_user(&value, buf, count);
2496     - if (retval)
2497     + if (retval <= 0)
2498     return retval;
2499    
2500     retval = oprofile_set_ulong(file->private_data, value);
2501     diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
2502     index 3d9d2b9..44e91e5 100644
2503     --- a/drivers/rtc/interface.c
2504     +++ b/drivers/rtc/interface.c
2505     @@ -318,20 +318,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2506     }
2507     EXPORT_SYMBOL_GPL(rtc_read_alarm);
2508    
2509     -static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2510     -{
2511     - int err;
2512     -
2513     - if (!rtc->ops)
2514     - err = -ENODEV;
2515     - else if (!rtc->ops->set_alarm)
2516     - err = -EINVAL;
2517     - else
2518     - err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
2519     -
2520     - return err;
2521     -}
2522     -
2523     static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2524     {
2525     struct rtc_time tm;
2526     @@ -355,7 +341,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2527     * over right here, before we set the alarm.
2528     */
2529    
2530     - return ___rtc_set_alarm(rtc, alarm);
2531     + if (!rtc->ops)
2532     + err = -ENODEV;
2533     + else if (!rtc->ops->set_alarm)
2534     + err = -EINVAL;
2535     + else
2536     + err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
2537     +
2538     + return err;
2539     }
2540    
2541     int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2542     @@ -769,20 +762,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
2543     return 0;
2544     }
2545    
2546     -static void rtc_alarm_disable(struct rtc_device *rtc)
2547     -{
2548     - struct rtc_wkalrm alarm;
2549     - struct rtc_time tm;
2550     -
2551     - __rtc_read_time(rtc, &tm);
2552     -
2553     - alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
2554     - ktime_set(300, 0)));
2555     - alarm.enabled = 0;
2556     -
2557     - ___rtc_set_alarm(rtc, &alarm);
2558     -}
2559     -
2560     /**
2561     * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
2562     * @rtc rtc device
2563     @@ -804,10 +783,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
2564     struct rtc_wkalrm alarm;
2565     int err;
2566     next = timerqueue_getnext(&rtc->timerqueue);
2567     - if (!next) {
2568     - rtc_alarm_disable(rtc);
2569     + if (!next)
2570     return;
2571     - }
2572     alarm.time = rtc_ktime_to_tm(next->expires);
2573     alarm.enabled = 1;
2574     err = __rtc_set_alarm(rtc, &alarm);
2575     @@ -869,8 +846,7 @@ again:
2576     err = __rtc_set_alarm(rtc, &alarm);
2577     if (err == -ETIME)
2578     goto again;
2579     - } else
2580     - rtc_alarm_disable(rtc);
2581     + }
2582    
2583     mutex_unlock(&rtc->ops_lock);
2584     }
2585     diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
2586     index eda128f..64aedd8 100644
2587     --- a/drivers/rtc/rtc-m41t80.c
2588     +++ b/drivers/rtc/rtc-m41t80.c
2589     @@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
2590     static struct rtc_class_ops m41t80_rtc_ops = {
2591     .read_time = m41t80_rtc_read_time,
2592     .set_time = m41t80_rtc_set_time,
2593     + /*
2594     + * XXX - m41t80 alarm functionality is reported broken.
2595     + * until it is fixed, don't register alarm functions.
2596     + *
2597     .read_alarm = m41t80_rtc_read_alarm,
2598     .set_alarm = m41t80_rtc_set_alarm,
2599     + */
2600     .proc = m41t80_rtc_proc,
2601     + /*
2602     + * See above comment on broken alarm
2603     + *
2604     .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
2605     + */
2606     };
2607    
2608     #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
2609     diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2610     index 7cac873..169ba7b 100644
2611     --- a/drivers/s390/scsi/zfcp_scsi.c
2612     +++ b/drivers/s390/scsi/zfcp_scsi.c
2613     @@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
2614     {
2615     struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2616    
2617     + /* if previous slave_alloc returned early, there is nothing to do */
2618     + if (!zfcp_sdev->port)
2619     + return;
2620     +
2621     zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
2622     put_device(&zfcp_sdev->port->dev);
2623     }
2624     diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
2625     index 5d0e9a2..8858170 100644
2626     --- a/drivers/scsi/fcoe/fcoe.c
2627     +++ b/drivers/scsi/fcoe/fcoe.c
2628     @@ -1635,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
2629     stats->InvalidCRCCount++;
2630     if (stats->InvalidCRCCount < 5)
2631     printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
2632     + put_cpu();
2633     return -EINVAL;
2634     }
2635    
2636     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2637     index 83035bd..39e81cd 100644
2638     --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2639     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2640     @@ -1082,41 +1082,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
2641     }
2642    
2643     /**
2644     - * _base_save_msix_table - backup msix vector table
2645     - * @ioc: per adapter object
2646     - *
2647     - * This address an errata where diag reset clears out the table
2648     - */
2649     -static void
2650     -_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
2651     -{
2652     - int i;
2653     -
2654     - if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
2655     - return;
2656     -
2657     - for (i = 0; i < ioc->msix_vector_count; i++)
2658     - ioc->msix_table_backup[i] = ioc->msix_table[i];
2659     -}
2660     -
2661     -/**
2662     - * _base_restore_msix_table - this restores the msix vector table
2663     - * @ioc: per adapter object
2664     - *
2665     - */
2666     -static void
2667     -_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
2668     -{
2669     - int i;
2670     -
2671     - if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
2672     - return;
2673     -
2674     - for (i = 0; i < ioc->msix_vector_count; i++)
2675     - ioc->msix_table[i] = ioc->msix_table_backup[i];
2676     -}
2677     -
2678     -/**
2679     * _base_check_enable_msix - checks MSIX capabable.
2680     * @ioc: per adapter object
2681     *
2682     @@ -1128,7 +1093,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2683     {
2684     int base;
2685     u16 message_control;
2686     - u32 msix_table_offset;
2687     +
2688    
2689     base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2690     if (!base) {
2691     @@ -1141,14 +1106,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2692     pci_read_config_word(ioc->pdev, base + 2, &message_control);
2693     ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2694    
2695     - /* get msix table */
2696     - pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
2697     - msix_table_offset &= 0xFFFFFFF8;
2698     - ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
2699     -
2700     dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
2701     - "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
2702     - ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
2703     + "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
2704     return 0;
2705     }
2706    
2707     @@ -1162,8 +1121,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
2708     {
2709     if (ioc->msix_enable) {
2710     pci_disable_msix(ioc->pdev);
2711     - kfree(ioc->msix_table_backup);
2712     - ioc->msix_table_backup = NULL;
2713     ioc->msix_enable = 0;
2714     }
2715     }
2716     @@ -1189,14 +1146,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
2717     if (_base_check_enable_msix(ioc) != 0)
2718     goto try_ioapic;
2719    
2720     - ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
2721     - sizeof(u32), GFP_KERNEL);
2722     - if (!ioc->msix_table_backup) {
2723     - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
2724     - "msix_table_backup failed!!!\n", ioc->name));
2725     - goto try_ioapic;
2726     - }
2727     -
2728     memset(&entries, 0, sizeof(struct msix_entry));
2729     r = pci_enable_msix(ioc->pdev, &entries, 1);
2730     if (r) {
2731     @@ -3513,9 +3462,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2732     u32 hcb_size;
2733    
2734     printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
2735     -
2736     - _base_save_msix_table(ioc);
2737     -
2738     drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
2739     ioc->name));
2740    
2741     @@ -3611,7 +3557,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2742     goto out;
2743     }
2744    
2745     - _base_restore_msix_table(ioc);
2746     printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
2747     return 0;
2748    
2749     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
2750     index 8d5be21..7df640f 100644
2751     --- a/drivers/scsi/mpt2sas/mpt2sas_base.h
2752     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
2753     @@ -636,8 +636,6 @@ enum mutex_type {
2754     * @wait_for_port_enable_to_complete:
2755     * @msix_enable: flag indicating msix is enabled
2756     * @msix_vector_count: number msix vectors
2757     - * @msix_table: virt address to the msix table
2758     - * @msix_table_backup: backup msix table
2759     * @scsi_io_cb_idx: shost generated commands
2760     * @tm_cb_idx: task management commands
2761     * @scsih_cb_idx: scsih internal commands
2762     @@ -779,8 +777,6 @@ struct MPT2SAS_ADAPTER {
2763    
2764     u8 msix_enable;
2765     u16 msix_vector_count;
2766     - u32 *msix_table;
2767     - u32 *msix_table_backup;
2768     u32 ioc_reset_count;
2769    
2770     /* internal commands, callback index */
2771     diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2772     index 97aac82..d3b3567 100644
2773     --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2774     +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2775     @@ -4210,7 +4210,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2776     /* insert into event log */
2777     sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
2778     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
2779     - event_reply = kzalloc(sz, GFP_KERNEL);
2780     + event_reply = kzalloc(sz, GFP_ATOMIC);
2781     if (!event_reply) {
2782     printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
2783     ioc->name, __FILE__, __LINE__, __func__);
2784     diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
2785     index e6ac317..32c535f 100644
2786     --- a/drivers/ssb/driver_pcicore.c
2787     +++ b/drivers/ssb/driver_pcicore.c
2788     @@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
2789    
2790     static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
2791     {
2792     - ssb_pcicore_fix_sprom_core_index(pc);
2793     + struct ssb_device *pdev = pc->dev;
2794     + struct ssb_bus *bus = pdev->bus;
2795     +
2796     + if (bus->bustype == SSB_BUSTYPE_PCI)
2797     + ssb_pcicore_fix_sprom_core_index(pc);
2798    
2799     /* Disable PCI interrupts. */
2800     - ssb_write32(pc->dev, SSB_INTVEC, 0);
2801     + ssb_write32(pdev, SSB_INTVEC, 0);
2802    
2803     /* Additional PCIe always once-executed workarounds */
2804     if (pc->dev->id.coreid == SSB_DEV_PCIE) {
2805     diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
2806     index 809cbda..1795977 100644
2807     --- a/drivers/watchdog/hpwdt.c
2808     +++ b/drivers/watchdog/hpwdt.c
2809     @@ -230,6 +230,7 @@ static int __devinit cru_detect(unsigned long map_entry,
2810    
2811     cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
2812    
2813     + set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
2814     asminline_call(&cmn_regs, bios32_entrypoint);
2815    
2816     if (cmn_regs.u1.ral != 0) {
2817     @@ -247,8 +248,10 @@ static int __devinit cru_detect(unsigned long map_entry,
2818     if ((physical_bios_base + physical_bios_offset)) {
2819     cru_rom_addr =
2820     ioremap(cru_physical_address, cru_length);
2821     - if (cru_rom_addr)
2822     + if (cru_rom_addr) {
2823     + set_memory_x((unsigned long)cru_rom_addr, cru_length);
2824     retval = 0;
2825     + }
2826     }
2827    
2828     printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
2829     diff --git a/fs/nfs/file.c b/fs/nfs/file.c
2830     index 5b3d984..babaf3a 100644
2831     --- a/fs/nfs/file.c
2832     +++ b/fs/nfs/file.c
2833     @@ -191,7 +191,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
2834     * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
2835     * the cached file length
2836     */
2837     - if (origin != SEEK_SET || origin != SEEK_CUR) {
2838     + if (origin != SEEK_SET && origin != SEEK_CUR) {
2839     struct inode *inode = filp->f_mapping->host;
2840    
2841     int retval = nfs_revalidate_file_size(inode, filp);
2842     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2843     index 39914be..efd8431 100644
2844     --- a/fs/nfs/nfs4state.c
2845     +++ b/fs/nfs/nfs4state.c
2846     @@ -1525,16 +1525,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2847     {
2848     if (!flags)
2849     return;
2850     - else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2851     + if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2852     nfs41_handle_server_reboot(clp);
2853     - else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
2854     + if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
2855     SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2856     SEQ4_STATUS_ADMIN_STATE_REVOKED |
2857     SEQ4_STATUS_LEASE_MOVED))
2858     nfs41_handle_state_revoked(clp);
2859     - else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2860     + if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
2861     nfs41_handle_recallable_state_revoked(clp);
2862     - else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2863     + if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
2864     SEQ4_STATUS_BACKCHANNEL_FAULT |
2865     SEQ4_STATUS_CB_PATH_DOWN_SESSION))
2866     nfs41_handle_cb_path_down(clp);
2867     diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
2868     index 41d6743..3e65427 100644
2869     --- a/fs/nilfs2/ioctl.c
2870     +++ b/fs/nilfs2/ioctl.c
2871     @@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2872     case FS_IOC32_GETVERSION:
2873     cmd = FS_IOC_GETVERSION;
2874     break;
2875     + case NILFS_IOCTL_CHANGE_CPMODE:
2876     + case NILFS_IOCTL_DELETE_CHECKPOINT:
2877     + case NILFS_IOCTL_GET_CPINFO:
2878     + case NILFS_IOCTL_GET_CPSTAT:
2879     + case NILFS_IOCTL_GET_SUINFO:
2880     + case NILFS_IOCTL_GET_SUSTAT:
2881     + case NILFS_IOCTL_GET_VINFO:
2882     + case NILFS_IOCTL_GET_BDESCS:
2883     + case NILFS_IOCTL_CLEAN_SEGMENTS:
2884     + case NILFS_IOCTL_SYNC:
2885     + case NILFS_IOCTL_RESIZE:
2886     + case NILFS_IOCTL_SET_ALLOC_RANGE:
2887     + break;
2888     default:
2889     return -ENOIOCTLCMD;
2890     }
2891     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2892     index 7fbaa91..5e30b45 100644
2893     --- a/include/linux/blkdev.h
2894     +++ b/include/linux/blkdev.h
2895     @@ -803,9 +803,6 @@ extern void blk_unprep_request(struct request *);
2896     */
2897     extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
2898     spinlock_t *lock, int node_id);
2899     -extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
2900     - request_fn_proc *,
2901     - spinlock_t *, int node_id);
2902     extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
2903     extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
2904     request_fn_proc *, spinlock_t *);
2905     diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
2906     index 6427d29..530e11b 100644
2907     --- a/include/linux/i2c/twl4030-madc.h
2908     +++ b/include/linux/i2c/twl4030-madc.h
2909     @@ -129,6 +129,10 @@ enum sample_type {
2910     #define REG_BCICTL2 0x024
2911     #define TWL4030_BCI_ITHSENS 0x007
2912    
2913     +/* Register and bits for GPBR1 register */
2914     +#define TWL4030_REG_GPBR1 0x0c
2915     +#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7)
2916     +
2917     struct twl4030_madc_user_parms {
2918     int channel;
2919     int average;
2920     diff --git a/include/linux/lglock.h b/include/linux/lglock.h
2921     index f549056..87f402c 100644
2922     --- a/include/linux/lglock.h
2923     +++ b/include/linux/lglock.h
2924     @@ -22,6 +22,7 @@
2925     #include <linux/spinlock.h>
2926     #include <linux/lockdep.h>
2927     #include <linux/percpu.h>
2928     +#include <linux/cpu.h>
2929    
2930     /* can make br locks by using local lock for read side, global lock for write */
2931     #define br_lock_init(name) name##_lock_init()
2932     @@ -72,9 +73,31 @@
2933    
2934     #define DEFINE_LGLOCK(name) \
2935     \
2936     + DEFINE_SPINLOCK(name##_cpu_lock); \
2937     + cpumask_t name##_cpus __read_mostly; \
2938     DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
2939     DEFINE_LGLOCK_LOCKDEP(name); \
2940     \
2941     + static int \
2942     + name##_lg_cpu_callback(struct notifier_block *nb, \
2943     + unsigned long action, void *hcpu) \
2944     + { \
2945     + switch (action & ~CPU_TASKS_FROZEN) { \
2946     + case CPU_UP_PREPARE: \
2947     + spin_lock(&name##_cpu_lock); \
2948     + cpu_set((unsigned long)hcpu, name##_cpus); \
2949     + spin_unlock(&name##_cpu_lock); \
2950     + break; \
2951     + case CPU_UP_CANCELED: case CPU_DEAD: \
2952     + spin_lock(&name##_cpu_lock); \
2953     + cpu_clear((unsigned long)hcpu, name##_cpus); \
2954     + spin_unlock(&name##_cpu_lock); \
2955     + } \
2956     + return NOTIFY_OK; \
2957     + } \
2958     + static struct notifier_block name##_lg_cpu_notifier = { \
2959     + .notifier_call = name##_lg_cpu_callback, \
2960     + }; \
2961     void name##_lock_init(void) { \
2962     int i; \
2963     LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
2964     @@ -83,6 +106,11 @@
2965     lock = &per_cpu(name##_lock, i); \
2966     *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
2967     } \
2968     + register_hotcpu_notifier(&name##_lg_cpu_notifier); \
2969     + get_online_cpus(); \
2970     + for_each_online_cpu(i) \
2971     + cpu_set(i, name##_cpus); \
2972     + put_online_cpus(); \
2973     } \
2974     EXPORT_SYMBOL(name##_lock_init); \
2975     \
2976     @@ -124,9 +152,9 @@
2977     \
2978     void name##_global_lock_online(void) { \
2979     int i; \
2980     - preempt_disable(); \
2981     + spin_lock(&name##_cpu_lock); \
2982     rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
2983     - for_each_online_cpu(i) { \
2984     + for_each_cpu(i, &name##_cpus) { \
2985     arch_spinlock_t *lock; \
2986     lock = &per_cpu(name##_lock, i); \
2987     arch_spin_lock(lock); \
2988     @@ -137,12 +165,12 @@
2989     void name##_global_unlock_online(void) { \
2990     int i; \
2991     rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
2992     - for_each_online_cpu(i) { \
2993     + for_each_cpu(i, &name##_cpus) { \
2994     arch_spinlock_t *lock; \
2995     lock = &per_cpu(name##_lock, i); \
2996     arch_spin_unlock(lock); \
2997     } \
2998     - preempt_enable(); \
2999     + spin_unlock(&name##_cpu_lock); \
3000     } \
3001     EXPORT_SYMBOL(name##_global_unlock_online); \
3002     \
3003     diff --git a/include/net/dst.h b/include/net/dst.h
3004     index 13d507d..8295249 100644
3005     --- a/include/net/dst.h
3006     +++ b/include/net/dst.h
3007     @@ -53,6 +53,7 @@ struct dst_entry {
3008     #define DST_NOHASH 0x0008
3009     #define DST_NOCACHE 0x0010
3010     #define DST_NOCOUNT 0x0020
3011     +#define DST_NOPEER 0x0040
3012    
3013     short error;
3014     short obsolete;
3015     diff --git a/include/net/flow.h b/include/net/flow.h
3016     index a094477..57f15a7 100644
3017     --- a/include/net/flow.h
3018     +++ b/include/net/flow.h
3019     @@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
3020     u8 dir, flow_resolve_t resolver, void *ctx);
3021    
3022     extern void flow_cache_flush(void);
3023     +extern void flow_cache_flush_deferred(void);
3024     extern atomic_t flow_cache_genid;
3025    
3026     #endif
3027     diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
3028     index f7d9c3f..ec86952 100644
3029     --- a/include/net/sctp/structs.h
3030     +++ b/include/net/sctp/structs.h
3031     @@ -241,6 +241,9 @@ extern struct sctp_globals {
3032     * bits is an indicator of when to send and window update SACK.
3033     */
3034     int rwnd_update_shift;
3035     +
3036     + /* Threshold for autoclose timeout, in seconds. */
3037     + unsigned long max_autoclose;
3038     } sctp_globals;
3039    
3040     #define sctp_rto_initial (sctp_globals.rto_initial)
3041     @@ -281,6 +284,7 @@ extern struct sctp_globals {
3042     #define sctp_auth_enable (sctp_globals.auth_enable)
3043     #define sctp_checksum_disable (sctp_globals.checksum_disable)
3044     #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
3045     +#define sctp_max_autoclose (sctp_globals.max_autoclose)
3046    
3047     /* SCTP Socket type: UDP or TCP style. */
3048     typedef enum {
3049     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
3050     index 1d2b6ce..b7ab0b8 100644
3051     --- a/kernel/cgroup.c
3052     +++ b/kernel/cgroup.c
3053     @@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
3054     continue;
3055     /* get old css_set pointer */
3056     task_lock(tsk);
3057     - if (tsk->flags & PF_EXITING) {
3058     - /* ignore this task if it's going away */
3059     - task_unlock(tsk);
3060     - continue;
3061     - }
3062     oldcg = tsk->cgroups;
3063     get_css_set(oldcg);
3064     task_unlock(tsk);
3065     diff --git a/kernel/exit.c b/kernel/exit.c
3066     index 2913b35..9e316ae 100644
3067     --- a/kernel/exit.c
3068     +++ b/kernel/exit.c
3069     @@ -1542,8 +1542,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
3070     }
3071    
3072     /* dead body doesn't have much to contribute */
3073     - if (p->exit_state == EXIT_DEAD)
3074     + if (unlikely(p->exit_state == EXIT_DEAD)) {
3075     + /*
3076     + * But do not ignore this task until the tracer does
3077     + * wait_task_zombie()->do_notify_parent().
3078     + */
3079     + if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
3080     + wo->notask_error = 0;
3081     return 0;
3082     + }
3083    
3084     /* slay zombie? */
3085     if (p->exit_state == EXIT_ZOMBIE) {
3086     diff --git a/kernel/futex.c b/kernel/futex.c
3087     index 11cbe05..e6160fa 100644
3088     --- a/kernel/futex.c
3089     +++ b/kernel/futex.c
3090     @@ -314,17 +314,29 @@ again:
3091     #endif
3092    
3093     lock_page(page_head);
3094     +
3095     + /*
3096     + * If page_head->mapping is NULL, then it cannot be a PageAnon
3097     + * page; but it might be the ZERO_PAGE or in the gate area or
3098     + * in a special mapping (all cases which we are happy to fail);
3099     + * or it may have been a good file page when get_user_pages_fast
3100     + * found it, but truncated or holepunched or subjected to
3101     + * invalidate_complete_page2 before we got the page lock (also
3102     + * cases which we are happy to fail). And we hold a reference,
3103     + * so refcount care in invalidate_complete_page's remove_mapping
3104     + * prevents drop_caches from setting mapping to NULL beneath us.
3105     + *
3106     + * The case we do have to guard against is when memory pressure made
3107     + * shmem_writepage move it from filecache to swapcache beneath us:
3108     + * an unlikely race, but we do need to retry for page_head->mapping.
3109     + */
3110     if (!page_head->mapping) {
3111     + int shmem_swizzled = PageSwapCache(page_head);
3112     unlock_page(page_head);
3113     put_page(page_head);
3114     - /*
3115     - * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
3116     - * trying to find one. RW mapping would have COW'd (and thus
3117     - * have a mapping) so this page is RO and won't ever change.
3118     - */
3119     - if ((page_head == ZERO_PAGE(address)))
3120     - return -EFAULT;
3121     - goto again;
3122     + if (shmem_swizzled)
3123     + goto again;
3124     + return -EFAULT;
3125     }
3126    
3127     /*
3128     diff --git a/kernel/hung_task.c b/kernel/hung_task.c
3129     index ea64012..e972276 100644
3130     --- a/kernel/hung_task.c
3131     +++ b/kernel/hung_task.c
3132     @@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
3133    
3134     /*
3135     * Ensure the task is not frozen.
3136     - * Also, when a freshly created task is scheduled once, changes
3137     - * its state to TASK_UNINTERRUPTIBLE without having ever been
3138     - * switched out once, it musn't be checked.
3139     + * Also, skip vfork and any other user process that freezer should skip.
3140     */
3141     - if (unlikely(t->flags & PF_FROZEN || !switch_count))
3142     + if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
3143     + return;
3144     +
3145     + /*
3146     + * When a freshly created task is scheduled once, changes its state to
3147     + * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
3148     + * musn't be checked.
3149     + */
3150     + if (unlikely(!switch_count))
3151     return;
3152    
3153     if (switch_count != t->last_switch_count) {
3154     diff --git a/kernel/ptrace.c b/kernel/ptrace.c
3155     index a70d2a5..67d1fdd 100644
3156     --- a/kernel/ptrace.c
3157     +++ b/kernel/ptrace.c
3158     @@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
3159     */
3160     if (!(child->flags & PF_EXITING) &&
3161     (child->signal->flags & SIGNAL_STOP_STOPPED ||
3162     - child->signal->group_stop_count))
3163     + child->signal->group_stop_count)) {
3164     child->jobctl |= JOBCTL_STOP_PENDING;
3165    
3166     + /*
3167     + * This is only possible if this thread was cloned by the
3168     + * traced task running in the stopped group, set the signal
3169     + * for the future reports.
3170     + * FIXME: we should change ptrace_init_task() to handle this
3171     + * case.
3172     + */
3173     + if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
3174     + child->jobctl |= SIGSTOP;
3175     + }
3176     +
3177     /*
3178     * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
3179     * @child in the butt. Note that @resume should be used iff @child
3180     diff --git a/kernel/signal.c b/kernel/signal.c
3181     index 291c970..195331c 100644
3182     --- a/kernel/signal.c
3183     +++ b/kernel/signal.c
3184     @@ -1986,8 +1986,6 @@ static bool do_signal_stop(int signr)
3185     */
3186     if (!(sig->flags & SIGNAL_STOP_STOPPED))
3187     sig->group_exit_code = signr;
3188     - else
3189     - WARN_ON_ONCE(!current->ptrace);
3190    
3191     sig->group_stop_count = 0;
3192    
3193     diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
3194     index e8bffbe..2ce1b30 100644
3195     --- a/kernel/sysctl_binary.c
3196     +++ b/kernel/sysctl_binary.c
3197     @@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
3198    
3199     fput(file);
3200     out_putname:
3201     - putname(pathname);
3202     + __putname(pathname);
3203     out:
3204     return result;
3205     }
3206     diff --git a/mm/filemap.c b/mm/filemap.c
3207     index 7771871..b91f3aa 100644
3208     --- a/mm/filemap.c
3209     +++ b/mm/filemap.c
3210     @@ -1828,7 +1828,7 @@ repeat:
3211     page = __page_cache_alloc(gfp | __GFP_COLD);
3212     if (!page)
3213     return ERR_PTR(-ENOMEM);
3214     - err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
3215     + err = add_to_page_cache_lru(page, mapping, index, gfp);
3216     if (unlikely(err)) {
3217     page_cache_release(page);
3218     if (err == -EEXIST)
3219     @@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
3220     * @gfp: the page allocator flags to use if allocating
3221     *
3222     * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3223     - * any new page allocations done using the specified allocation flags. Note
3224     - * that the Radix tree operations will still use GFP_KERNEL, so you can't
3225     - * expect to do this atomically or anything like that - but you can pass in
3226     - * other page requirements.
3227     + * any new page allocations done using the specified allocation flags.
3228     *
3229     * If the page does not get brought uptodate, return -EIO.
3230     */
3231     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3232     index 73f17c0..2316840 100644
3233     --- a/mm/hugetlb.c
3234     +++ b/mm/hugetlb.c
3235     @@ -901,7 +901,6 @@ retry:
3236     h->resv_huge_pages += delta;
3237     ret = 0;
3238    
3239     - spin_unlock(&hugetlb_lock);
3240     /* Free the needed pages to the hugetlb pool */
3241     list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
3242     if ((--needed) < 0)
3243     @@ -915,6 +914,7 @@ retry:
3244     VM_BUG_ON(page_count(page));
3245     enqueue_huge_page(h, page);
3246     }
3247     + spin_unlock(&hugetlb_lock);
3248    
3249     /* Free unnecessary surplus pages to the buddy allocator */
3250     free:
3251     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3252     index 3508777..afde618 100644
3253     --- a/mm/memcontrol.c
3254     +++ b/mm/memcontrol.c
3255     @@ -4898,9 +4898,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3256     int cpu;
3257     enable_swap_cgroup();
3258     parent = NULL;
3259     - root_mem_cgroup = mem;
3260     if (mem_cgroup_soft_limit_tree_init())
3261     goto free_out;
3262     + root_mem_cgroup = mem;
3263     for_each_possible_cpu(cpu) {
3264     struct memcg_stock_pcp *stock =
3265     &per_cpu(memcg_stock, cpu);
3266     @@ -4939,7 +4939,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3267     return &mem->css;
3268     free_out:
3269     __mem_cgroup_free(mem);
3270     - root_mem_cgroup = NULL;
3271     return ERR_PTR(error);
3272     }
3273    
3274     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
3275     index 9c51f9f..2775fd0 100644
3276     --- a/mm/mempolicy.c
3277     +++ b/mm/mempolicy.c
3278     @@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
3279     struct vm_area_struct *prev;
3280     struct vm_area_struct *vma;
3281     int err = 0;
3282     + pgoff_t pgoff;
3283     unsigned long vmstart;
3284     unsigned long vmend;
3285    
3286     @@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
3287     if (!vma || vma->vm_start > start)
3288     return -EFAULT;
3289    
3290     + if (start > vma->vm_start)
3291     + prev = vma;
3292     +
3293     for (; vma && vma->vm_start < end; prev = vma, vma = next) {
3294     next = vma->vm_next;
3295     vmstart = max(start, vma->vm_start);
3296     vmend = min(end, vma->vm_end);
3297    
3298     + if (mpol_equal(vma_policy(vma), new_pol))
3299     + continue;
3300     +
3301     + pgoff = vma->vm_pgoff +
3302     + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
3303     prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
3304     - vma->anon_vma, vma->vm_file, vma->vm_pgoff,
3305     + vma->anon_vma, vma->vm_file, pgoff,
3306     new_pol);
3307     if (prev) {
3308     vma = prev;
3309     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
3310     index 626303b..e9a1785 100644
3311     --- a/mm/oom_kill.c
3312     +++ b/mm/oom_kill.c
3313     @@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
3314     unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
3315     const nodemask_t *nodemask, unsigned long totalpages)
3316     {
3317     - int points;
3318     + long points;
3319    
3320     if (oom_unkillable_task(p, mem, nodemask))
3321     return 0;
3322     diff --git a/mm/percpu.c b/mm/percpu.c
3323     index 93b5a7c..0ae7a09 100644
3324     --- a/mm/percpu.c
3325     +++ b/mm/percpu.c
3326     @@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
3327     if (!is_vmalloc_addr(addr))
3328     return __pa(addr);
3329     else
3330     - return page_to_phys(vmalloc_to_page(addr));
3331     + return page_to_phys(vmalloc_to_page(addr)) +
3332     + offset_in_page(addr);
3333     } else
3334     - return page_to_phys(pcpu_addr_to_page(addr));
3335     + return page_to_phys(pcpu_addr_to_page(addr)) +
3336     + offset_in_page(addr);
3337     }
3338    
3339     /**
3340     diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
3341     index d6ec372..5693e5f 100644
3342     --- a/net/bridge/br_netfilter.c
3343     +++ b/net/bridge/br_netfilter.c
3344     @@ -141,7 +141,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
3345     rt->dst.dev = br->dev;
3346     rt->dst.path = &rt->dst;
3347     dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
3348     - rt->dst.flags = DST_NOXFRM;
3349     + rt->dst.flags = DST_NOXFRM | DST_NOPEER;
3350     rt->dst.ops = &fake_dst_ops;
3351     }
3352    
3353     diff --git a/net/core/flow.c b/net/core/flow.c
3354     index 555a456..d6968e5 100644
3355     --- a/net/core/flow.c
3356     +++ b/net/core/flow.c
3357     @@ -358,6 +358,18 @@ void flow_cache_flush(void)
3358     put_online_cpus();
3359     }
3360    
3361     +static void flow_cache_flush_task(struct work_struct *work)
3362     +{
3363     + flow_cache_flush();
3364     +}
3365     +
3366     +static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
3367     +
3368     +void flow_cache_flush_deferred(void)
3369     +{
3370     + schedule_work(&flow_cache_flush_work);
3371     +}
3372     +
3373     static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
3374     {
3375     struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
3376     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
3377     index bc19bd0..070f214 100644
3378     --- a/net/ipv4/devinet.c
3379     +++ b/net/ipv4/devinet.c
3380     @@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
3381     void __user *buffer,
3382     size_t *lenp, loff_t *ppos)
3383     {
3384     + int old_value = *(int *)ctl->data;
3385     int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3386     + int new_value = *(int *)ctl->data;
3387    
3388     if (write) {
3389     struct ipv4_devconf *cnf = ctl->extra1;
3390     @@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
3391    
3392     if (cnf == net->ipv4.devconf_dflt)
3393     devinet_copy_dflt_conf(net, i);
3394     + if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
3395     + if ((new_value == 0) && (old_value != 0))
3396     + rt_cache_flush(net, 0);
3397     }
3398    
3399     return ret;
3400     diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
3401     index 472a8c4..004bb74 100644
3402     --- a/net/ipv4/ipconfig.c
3403     +++ b/net/ipv4/ipconfig.c
3404     @@ -252,6 +252,10 @@ static int __init ic_open_devs(void)
3405     }
3406     }
3407    
3408     + /* no point in waiting if we could not bring up at least one device */
3409     + if (!ic_first_dev)
3410     + goto have_carrier;
3411     +
3412     /* wait for a carrier on at least one device */
3413     start = jiffies;
3414     while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
3415     diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
3416     index 378b20b..6f06f7f 100644
3417     --- a/net/ipv4/ipip.c
3418     +++ b/net/ipv4/ipip.c
3419     @@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
3420     if (register_netdevice(dev) < 0)
3421     goto failed_free;
3422    
3423     + strcpy(nt->parms.name, dev->name);
3424     +
3425     dev_hold(dev);
3426     ipip_tunnel_link(ipn, nt);
3427     return nt;
3428     @@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
3429     struct ip_tunnel *tunnel = netdev_priv(dev);
3430    
3431     tunnel->dev = dev;
3432     - strcpy(tunnel->parms.name, dev->name);
3433    
3434     memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
3435     memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
3436     @@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
3437     static int __net_init ipip_init_net(struct net *net)
3438     {
3439     struct ipip_net *ipn = net_generic(net, ipip_net_id);
3440     + struct ip_tunnel *t;
3441     int err;
3442    
3443     ipn->tunnels[0] = ipn->tunnels_wc;
3444     @@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
3445     if ((err = register_netdev(ipn->fb_tunnel_dev)))
3446     goto err_reg_dev;
3447    
3448     + t = netdev_priv(ipn->fb_tunnel_dev);
3449     +
3450     + strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
3451     return 0;
3452    
3453     err_reg_dev:
3454     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3455     index 05ac666c..b563854 100644
3456     --- a/net/ipv4/route.c
3457     +++ b/net/ipv4/route.c
3458     @@ -91,6 +91,7 @@
3459     #include <linux/rcupdate.h>
3460     #include <linux/times.h>
3461     #include <linux/slab.h>
3462     +#include <linux/prefetch.h>
3463     #include <net/dst.h>
3464     #include <net/net_namespace.h>
3465     #include <net/protocol.h>
3466     @@ -134,6 +135,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
3467     static int rt_chain_length_max __read_mostly = 20;
3468     static int redirect_genid;
3469    
3470     +static struct delayed_work expires_work;
3471     +static unsigned long expires_ljiffies;
3472     +
3473     /*
3474     * Interface to generic destination cache.
3475     */
3476     @@ -831,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
3477     return ONE;
3478     }
3479    
3480     +static void rt_check_expire(void)
3481     +{
3482     + static unsigned int rover;
3483     + unsigned int i = rover, goal;
3484     + struct rtable *rth;
3485     + struct rtable __rcu **rthp;
3486     + unsigned long samples = 0;
3487     + unsigned long sum = 0, sum2 = 0;
3488     + unsigned long delta;
3489     + u64 mult;
3490     +
3491     + delta = jiffies - expires_ljiffies;
3492     + expires_ljiffies = jiffies;
3493     + mult = ((u64)delta) << rt_hash_log;
3494     + if (ip_rt_gc_timeout > 1)
3495     + do_div(mult, ip_rt_gc_timeout);
3496     + goal = (unsigned int)mult;
3497     + if (goal > rt_hash_mask)
3498     + goal = rt_hash_mask + 1;
3499     + for (; goal > 0; goal--) {
3500     + unsigned long tmo = ip_rt_gc_timeout;
3501     + unsigned long length;
3502     +
3503     + i = (i + 1) & rt_hash_mask;
3504     + rthp = &rt_hash_table[i].chain;
3505     +
3506     + if (need_resched())
3507     + cond_resched();
3508     +
3509     + samples++;
3510     +
3511     + if (rcu_dereference_raw(*rthp) == NULL)
3512     + continue;
3513     + length = 0;
3514     + spin_lock_bh(rt_hash_lock_addr(i));
3515     + while ((rth = rcu_dereference_protected(*rthp,
3516     + lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
3517     + prefetch(rth->dst.rt_next);
3518     + if (rt_is_expired(rth)) {
3519     + *rthp = rth->dst.rt_next;
3520     + rt_free(rth);
3521     + continue;
3522     + }
3523     + if (rth->dst.expires) {
3524     + /* Entry is expired even if it is in use */
3525     + if (time_before_eq(jiffies, rth->dst.expires)) {
3526     +nofree:
3527     + tmo >>= 1;
3528     + rthp = &rth->dst.rt_next;
3529     + /*
3530     + * We only count entries on
3531     + * a chain with equal hash inputs once
3532     + * so that entries for different QOS
3533     + * levels, and other non-hash input
3534     + * attributes don't unfairly skew
3535     + * the length computation
3536     + */
3537     + length += has_noalias(rt_hash_table[i].chain, rth);
3538     + continue;
3539     + }
3540     + } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
3541     + goto nofree;
3542     +
3543     + /* Cleanup aged off entries. */
3544     + *rthp = rth->dst.rt_next;
3545     + rt_free(rth);
3546     + }
3547     + spin_unlock_bh(rt_hash_lock_addr(i));
3548     + sum += length;
3549     + sum2 += length*length;
3550     + }
3551     + if (samples) {
3552     + unsigned long avg = sum / samples;
3553     + unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
3554     + rt_chain_length_max = max_t(unsigned long,
3555     + ip_rt_gc_elasticity,
3556     + (avg + 4*sd) >> FRACT_BITS);
3557     + }
3558     + rover = i;
3559     +}
3560     +
3561     +/*
3562     + * rt_worker_func() is run in process context.
3563     + * we call rt_check_expire() to scan part of the hash table
3564     + */
3565     +static void rt_worker_func(struct work_struct *work)
3566     +{
3567     + rt_check_expire();
3568     + schedule_delayed_work(&expires_work, ip_rt_gc_interval);
3569     +}
3570     +
3571     /*
3572     * Perturbation of rt_genid by a small quantity [1..256]
3573     * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
3574     @@ -1272,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
3575     {
3576     struct rtable *rt = (struct rtable *) dst;
3577    
3578     - if (rt) {
3579     + if (rt && !(rt->dst.flags & DST_NOPEER)) {
3580     if (rt->peer == NULL)
3581     rt_bind_peer(rt, rt->rt_dst, 1);
3582    
3583     @@ -1283,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
3584     iph->id = htons(inet_getid(rt->peer, more));
3585     return;
3586     }
3587     - } else
3588     + } else if (!rt)
3589     printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
3590     __builtin_return_address(0));
3591    
3592     @@ -3176,6 +3271,13 @@ static ctl_table ipv4_route_table[] = {
3593     .proc_handler = proc_dointvec_jiffies,
3594     },
3595     {
3596     + .procname = "gc_interval",
3597     + .data = &ip_rt_gc_interval,
3598     + .maxlen = sizeof(int),
3599     + .mode = 0644,
3600     + .proc_handler = proc_dointvec_jiffies,
3601     + },
3602     + {
3603     .procname = "redirect_load",
3604     .data = &ip_rt_redirect_load,
3605     .maxlen = sizeof(int),
3606     @@ -3385,6 +3487,11 @@ int __init ip_rt_init(void)
3607     devinet_init();
3608     ip_fib_init();
3609    
3610     + INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3611     + expires_ljiffies = jiffies;
3612     + schedule_delayed_work(&expires_work,
3613     + net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3614     +
3615     if (ip_rt_proc_init())
3616     printk(KERN_ERR "Unable to create route proc files\n");
3617     #ifdef CONFIG_XFRM
3618     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3619     index 4c882cf..55a35c1 100644
3620     --- a/net/ipv6/ip6_output.c
3621     +++ b/net/ipv6/ip6_output.c
3622     @@ -606,7 +606,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
3623     static atomic_t ipv6_fragmentation_id;
3624     int old, new;
3625    
3626     - if (rt) {
3627     + if (rt && !(rt->dst.flags & DST_NOPEER)) {
3628     struct inet_peer *peer;
3629    
3630     if (!rt->rt6i_peer)
3631     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3632     index 57b82dc..f02fe52 100644
3633     --- a/net/ipv6/route.c
3634     +++ b/net/ipv6/route.c
3635     @@ -725,7 +725,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
3636     int attempts = !in_softirq();
3637    
3638     if (!(rt->rt6i_flags&RTF_GATEWAY)) {
3639     - if (rt->rt6i_dst.plen != 128 &&
3640     + if (ort->rt6i_dst.plen != 128 &&
3641     ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
3642     rt->rt6i_flags |= RTF_ANYCAST;
3643     ipv6_addr_copy(&rt->rt6i_gateway, daddr);
3644     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
3645     index 00b15ac..c1e0d63 100644
3646     --- a/net/ipv6/sit.c
3647     +++ b/net/ipv6/sit.c
3648     @@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
3649     if (register_netdevice(dev) < 0)
3650     goto failed_free;
3651    
3652     + strcpy(nt->parms.name, dev->name);
3653     +
3654     dev_hold(dev);
3655    
3656     ipip6_tunnel_link(sitn, nt);
3657     @@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
3658     struct ip_tunnel *tunnel = netdev_priv(dev);
3659    
3660     tunnel->dev = dev;
3661     - strcpy(tunnel->parms.name, dev->name);
3662    
3663     memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
3664     memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
3665     @@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
3666     static int __net_init sit_init_net(struct net *net)
3667     {
3668     struct sit_net *sitn = net_generic(net, sit_net_id);
3669     + struct ip_tunnel *t;
3670     int err;
3671    
3672     sitn->tunnels[0] = sitn->tunnels_wc;
3673     @@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
3674     if ((err = register_netdev(sitn->fb_tunnel_dev)))
3675     goto err_reg_dev;
3676    
3677     + t = netdev_priv(sitn->fb_tunnel_dev);
3678     +
3679     + strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
3680     return 0;
3681    
3682     err_reg_dev:
3683     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3684     index dfd3a64..a18e6c3 100644
3685     --- a/net/llc/af_llc.c
3686     +++ b/net/llc/af_llc.c
3687     @@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
3688     copied += used;
3689     len -= used;
3690    
3691     + /* For non stream protcols we get one packet per recvmsg call */
3692     + if (sk->sk_type != SOCK_STREAM)
3693     + goto copy_uaddr;
3694     +
3695     if (!(flags & MSG_PEEK)) {
3696     sk_eat_skb(sk, skb, 0);
3697     *seq = 0;
3698     }
3699    
3700     - /* For non stream protcols we get one packet per recvmsg call */
3701     - if (sk->sk_type != SOCK_STREAM)
3702     - goto copy_uaddr;
3703     -
3704     /* Partial read */
3705     if (used + offset < skb->len)
3706     continue;
3707     @@ -857,6 +857,12 @@ copy_uaddr:
3708     }
3709     if (llc_sk(sk)->cmsg_flags)
3710     llc_cmsg_rcv(msg, skb);
3711     +
3712     + if (!(flags & MSG_PEEK)) {
3713     + sk_eat_skb(sk, skb, 0);
3714     + *seq = 0;
3715     + }
3716     +
3717     goto out;
3718     }
3719    
3720     diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3721     index db7db43..b7f4f5c 100644
3722     --- a/net/mac80211/agg-tx.c
3723     +++ b/net/mac80211/agg-tx.c
3724     @@ -304,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
3725     __release(agg_queue);
3726     }
3727    
3728     +/*
3729     + * splice packets from the STA's pending to the local pending,
3730     + * requires a call to ieee80211_agg_splice_finish later
3731     + */
3732     +static void __acquires(agg_queue)
3733     +ieee80211_agg_splice_packets(struct ieee80211_local *local,
3734     + struct tid_ampdu_tx *tid_tx, u16 tid)
3735     +{
3736     + int queue = ieee80211_ac_from_tid(tid);
3737     + unsigned long flags;
3738     +
3739     + ieee80211_stop_queue_agg(local, tid);
3740     +
3741     + if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
3742     + " from the pending queue\n", tid))
3743     + return;
3744     +
3745     + if (!skb_queue_empty(&tid_tx->pending)) {
3746     + spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
3747     + /* copy over remaining packets */
3748     + skb_queue_splice_tail_init(&tid_tx->pending,
3749     + &local->pending[queue]);
3750     + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
3751     + }
3752     +}
3753     +
3754     +static void __releases(agg_queue)
3755     +ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
3756     +{
3757     + ieee80211_wake_queue_agg(local, tid);
3758     +}
3759     +
3760     void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3761     {
3762     struct tid_ampdu_tx *tid_tx;
3763     @@ -315,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3764     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
3765    
3766     /*
3767     - * While we're asking the driver about the aggregation,
3768     - * stop the AC queue so that we don't have to worry
3769     - * about frames that came in while we were doing that,
3770     - * which would require us to put them to the AC pending
3771     - * afterwards which just makes the code more complex.
3772     + * Start queuing up packets for this aggregation session.
3773     + * We're going to release them once the driver is OK with
3774     + * that.
3775     */
3776     - ieee80211_stop_queue_agg(local, tid);
3777     -
3778     clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
3779    
3780     /*
3781     - * make sure no packets are being processed to get
3782     - * valid starting sequence number
3783     + * Make sure no packets are being processed. This ensures that
3784     + * we have a valid starting sequence number and that in-flight
3785     + * packets have been flushed out and no packets for this TID
3786     + * will go into the driver during the ampdu_action call.
3787     */
3788     synchronize_net();
3789    
3790     @@ -341,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
3791     " tid %d\n", tid);
3792     #endif
3793     spin_lock_bh(&sta->lock);
3794     + ieee80211_agg_splice_packets(local, tid_tx, tid);
3795     ieee80211_assign_tid_tx(sta, tid, NULL);
3796     + ieee80211_agg_splice_finish(local, tid);
3797     spin_unlock_bh(&sta->lock);
3798    
3799     - ieee80211_wake_queue_agg(local, tid);
3800     kfree_rcu(tid_tx, rcu_head);
3801     return;
3802     }
3803    
3804     - /* we can take packets again now */
3805     - ieee80211_wake_queue_agg(local, tid);
3806     -
3807     /* activate the timer for the recipient's addBA response */
3808     mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
3809     #ifdef CONFIG_MAC80211_HT_DEBUG
3810     @@ -471,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
3811     }
3812     EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
3813    
3814     -/*
3815     - * splice packets from the STA's pending to the local pending,
3816     - * requires a call to ieee80211_agg_splice_finish later
3817     - */
3818     -static void __acquires(agg_queue)
3819     -ieee80211_agg_splice_packets(struct ieee80211_local *local,
3820     - struct tid_ampdu_tx *tid_tx, u16 tid)
3821     -{
3822     - int queue = ieee80211_ac_from_tid(tid);
3823     - unsigned long flags;
3824     -
3825     - ieee80211_stop_queue_agg(local, tid);
3826     -
3827     - if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
3828     - " from the pending queue\n", tid))
3829     - return;
3830     -
3831     - if (!skb_queue_empty(&tid_tx->pending)) {
3832     - spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
3833     - /* copy over remaining packets */
3834     - skb_queue_splice_tail_init(&tid_tx->pending,
3835     - &local->pending[queue]);
3836     - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
3837     - }
3838     -}
3839     -
3840     -static void __releases(agg_queue)
3841     -ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
3842     -{
3843     - ieee80211_wake_queue_agg(local, tid);
3844     -}
3845     -
3846     static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
3847     struct sta_info *sta, u16 tid)
3848     {
3849     diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
3850     index b9493a0..6cd8ddf 100644
3851     --- a/net/sched/sch_gred.c
3852     +++ b/net/sched/sch_gred.c
3853     @@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
3854     struct gred_sched_data *q;
3855    
3856     if (table->tab[dp] == NULL) {
3857     - table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
3858     + table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
3859     if (table->tab[dp] == NULL)
3860     return -ENOMEM;
3861     }
3862     diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
3863     index ea17cbe..59b26b8 100644
3864     --- a/net/sched/sch_mqprio.c
3865     +++ b/net/sched/sch_mqprio.c
3866     @@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
3867     if (!netif_is_multiqueue(dev))
3868     return -EOPNOTSUPP;
3869    
3870     - if (nla_len(opt) < sizeof(*qopt))
3871     + if (!opt || nla_len(opt) < sizeof(*qopt))
3872     return -EINVAL;
3873    
3874     qopt = nla_data(opt);
3875     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3876     index dc16b90..4981482 100644
3877     --- a/net/sctp/associola.c
3878     +++ b/net/sctp/associola.c
3879     @@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
3880     asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
3881     asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
3882     asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
3883     - (unsigned long)sp->autoclose * HZ;
3884     + min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
3885    
3886     /* Initializes the timers */
3887     for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
3888     diff --git a/net/sctp/output.c b/net/sctp/output.c
3889     index 08b3cea..817174e 100644
3890     --- a/net/sctp/output.c
3891     +++ b/net/sctp/output.c
3892     @@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
3893     /* Keep track of how many bytes are in flight to the receiver. */
3894     asoc->outqueue.outstanding_bytes += datasize;
3895    
3896     - /* Update our view of the receiver's rwnd. Include sk_buff overhead
3897     - * while updating peer.rwnd so that it reduces the chances of a
3898     - * receiver running out of receive buffer space even when receive
3899     - * window is still open. This can happen when a sender is sending
3900     - * sending small messages.
3901     - */
3902     - datasize += sizeof(struct sk_buff);
3903     + /* Update our view of the receiver's rwnd. */
3904     if (datasize < rwnd)
3905     rwnd -= datasize;
3906     else
3907     diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
3908     index a6d27bf..6edd7de 100644
3909     --- a/net/sctp/outqueue.c
3910     +++ b/net/sctp/outqueue.c
3911     @@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
3912     chunk->transport->flight_size -=
3913     sctp_data_size(chunk);
3914     q->outstanding_bytes -= sctp_data_size(chunk);
3915     - q->asoc->peer.rwnd += (sctp_data_size(chunk) +
3916     - sizeof(struct sk_buff));
3917     + q->asoc->peer.rwnd += sctp_data_size(chunk);
3918     }
3919     continue;
3920     }
3921     @@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
3922     * (Section 7.2.4)), add the data size of those
3923     * chunks to the rwnd.
3924     */
3925     - q->asoc->peer.rwnd += (sctp_data_size(chunk) +
3926     - sizeof(struct sk_buff));
3927     + q->asoc->peer.rwnd += sctp_data_size(chunk);
3928     q->outstanding_bytes -= sctp_data_size(chunk);
3929     if (chunk->transport)
3930     transport->flight_size -= sctp_data_size(chunk);
3931     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3932     index 91784f4..48cb7b9 100644
3933     --- a/net/sctp/protocol.c
3934     +++ b/net/sctp/protocol.c
3935     @@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
3936     sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
3937     sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
3938    
3939     + /* Initialize maximum autoclose timeout. */
3940     + sctp_max_autoclose = INT_MAX / HZ;
3941     +
3942     /* Initialize handle used for association ids. */
3943     idr_init(&sctp_assocs_id);
3944    
3945     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3946     index 836aa63..4760f4e 100644
3947     --- a/net/sctp/socket.c
3948     +++ b/net/sctp/socket.c
3949     @@ -2199,8 +2199,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
3950     return -EINVAL;
3951     if (copy_from_user(&sp->autoclose, optval, optlen))
3952     return -EFAULT;
3953     - /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
3954     - sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
3955    
3956     return 0;
3957     }
3958     diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
3959     index 6b39529..60ffbd0 100644
3960     --- a/net/sctp/sysctl.c
3961     +++ b/net/sctp/sysctl.c
3962     @@ -53,6 +53,10 @@ static int sack_timer_min = 1;
3963     static int sack_timer_max = 500;
3964     static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
3965     static int rwnd_scale_max = 16;
3966     +static unsigned long max_autoclose_min = 0;
3967     +static unsigned long max_autoclose_max =
3968     + (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
3969     + ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
3970    
3971     extern long sysctl_sctp_mem[3];
3972     extern int sysctl_sctp_rmem[3];
3973     @@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
3974     .extra1 = &one,
3975     .extra2 = &rwnd_scale_max,
3976     },
3977     + {
3978     + .procname = "max_autoclose",
3979     + .data = &sctp_max_autoclose,
3980     + .maxlen = sizeof(unsigned long),
3981     + .mode = 0644,
3982     + .proc_handler = &proc_doulongvec_minmax,
3983     + .extra1 = &max_autoclose_min,
3984     + .extra2 = &max_autoclose_max,
3985     + },
3986    
3987     { /* sentinel */ }
3988     };
3989     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
3990     index f4385e4..c64c0ef 100644
3991     --- a/net/sunrpc/xprt.c
3992     +++ b/net/sunrpc/xprt.c
3993     @@ -995,13 +995,11 @@ out_init_req:
3994    
3995     static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
3996     {
3997     - if (xprt_dynamic_free_slot(xprt, req))
3998     - return;
3999     -
4000     - memset(req, 0, sizeof(*req)); /* mark unused */
4001     -
4002     spin_lock(&xprt->reserve_lock);
4003     - list_add(&req->rq_list, &xprt->free);
4004     + if (!xprt_dynamic_free_slot(xprt, req)) {
4005     + memset(req, 0, sizeof(*req)); /* mark unused */
4006     + list_add(&req->rq_list, &xprt->free);
4007     + }
4008     rpc_wake_up_next(&xprt->backlog);
4009     spin_unlock(&xprt->reserve_lock);
4010     }
4011     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
4012     index 552df27..7e088c0 100644
4013     --- a/net/xfrm/xfrm_policy.c
4014     +++ b/net/xfrm/xfrm_policy.c
4015     @@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
4016     {
4017     struct dst_entry *head, *next;
4018    
4019     - flow_cache_flush();
4020     -
4021     spin_lock_bh(&xfrm_policy_sk_bundle_lock);
4022     head = xfrm_policy_sk_bundles;
4023     xfrm_policy_sk_bundles = NULL;
4024     @@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
4025     }
4026     }
4027    
4028     +static void xfrm_garbage_collect(struct net *net)
4029     +{
4030     + flow_cache_flush();
4031     + __xfrm_garbage_collect(net);
4032     +}
4033     +
4034     +static void xfrm_garbage_collect_deferred(struct net *net)
4035     +{
4036     + flow_cache_flush_deferred();
4037     + __xfrm_garbage_collect(net);
4038     +}
4039     +
4040     static void xfrm_init_pmtu(struct dst_entry *dst)
4041     {
4042     do {
4043     @@ -2420,7 +2430,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
4044     if (likely(dst_ops->neigh_lookup == NULL))
4045     dst_ops->neigh_lookup = xfrm_neigh_lookup;
4046     if (likely(afinfo->garbage_collect == NULL))
4047     - afinfo->garbage_collect = __xfrm_garbage_collect;
4048     + afinfo->garbage_collect = xfrm_garbage_collect_deferred;
4049     xfrm_policy_afinfo[afinfo->family] = afinfo;
4050     }
4051     write_unlock_bh(&xfrm_policy_afinfo_lock);
4052     @@ -2514,7 +2524,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
4053    
4054     switch (event) {
4055     case NETDEV_DOWN:
4056     - __xfrm_garbage_collect(dev_net(dev));
4057     + xfrm_garbage_collect(dev_net(dev));
4058     }
4059     return NOTIFY_DONE;
4060     }
4061     diff --git a/security/selinux/netport.c b/security/selinux/netport.c
4062     index 0b62bd1..7b9eb1f 100644
4063     --- a/security/selinux/netport.c
4064     +++ b/security/selinux/netport.c
4065     @@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
4066     if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
4067     struct sel_netport *tail;
4068     tail = list_entry(
4069     - rcu_dereference(sel_netport_hash[idx].list.prev),
4070     + rcu_dereference_protected(
4071     + sel_netport_hash[idx].list.prev,
4072     + lockdep_is_held(&sel_netport_lock)),
4073     struct sel_netport, list);
4074     list_del_rcu(&tail->list);
4075     kfree_rcu(tail, rcu);
4076     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4077     index f665975..82b7c88 100644
4078     --- a/sound/pci/hda/hda_intel.c
4079     +++ b/sound/pci/hda/hda_intel.c
4080     @@ -2375,6 +2375,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
4081     SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
4082     SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
4083     SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
4084     + SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
4085     SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
4086     SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
4087     SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
4088     diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
4089     index c9c4e5c..5c40874 100644
4090     --- a/sound/soc/codecs/wm8996.c
4091     +++ b/sound/soc/codecs/wm8996.c
4092     @@ -1895,6 +1895,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
4093     break;
4094     case 24576000:
4095     ratediv = WM8996_SYSCLK_DIV;
4096     + wm8996->sysclk /= 2;
4097     case 12288000:
4098     snd_soc_update_bits(codec, WM8996_AIF_RATE,
4099     WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);