Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.10/0105-3.10.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2253 - (hide annotations) (download)
Tue Aug 13 14:26:09 2013 UTC (10 years, 9 months ago) by niro
File size: 143634 byte(s)
3.10.6-magellan-r1
1 niro 2253 diff --git a/Makefile b/Makefile
2     index f8349d0..fd92ffb 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,8 +1,8 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 5
9     +SUBLEVEL = 6
10     EXTRAVERSION =
11     -NAME = Unicycling Gorilla
12     +NAME = TOSSUG Baby Fish
13    
14     # *DOCUMENTATION*
15     # To see a list of typical targets execute "make help"
16     diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
17     index 136f263..18a9f5e 100644
18     --- a/arch/arm/Kconfig
19     +++ b/arch/arm/Kconfig
20     @@ -19,7 +19,6 @@ config ARM
21     select GENERIC_STRNCPY_FROM_USER
22     select GENERIC_STRNLEN_USER
23     select HARDIRQS_SW_RESEND
24     - select HAVE_AOUT
25     select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
26     select HAVE_ARCH_KGDB
27     select HAVE_ARCH_SECCOMP_FILTER
28     @@ -213,7 +212,8 @@ config VECTORS_BASE
29     default DRAM_BASE if REMAP_VECTORS_TO_RAM
30     default 0x00000000
31     help
32     - The base address of exception vectors.
33     + The base address of exception vectors. This must be two pages
34     + in size.
35    
36     config ARM_PATCH_PHYS_VIRT
37     bool "Patch physical to virtual translations at runtime" if EMBEDDED
38     diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
39     deleted file mode 100644
40     index 92f10cb..0000000
41     --- a/arch/arm/include/asm/a.out-core.h
42     +++ /dev/null
43     @@ -1,45 +0,0 @@
44     -/* a.out coredump register dumper
45     - *
46     - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
47     - * Written by David Howells (dhowells@redhat.com)
48     - *
49     - * This program is free software; you can redistribute it and/or
50     - * modify it under the terms of the GNU General Public Licence
51     - * as published by the Free Software Foundation; either version
52     - * 2 of the Licence, or (at your option) any later version.
53     - */
54     -
55     -#ifndef _ASM_A_OUT_CORE_H
56     -#define _ASM_A_OUT_CORE_H
57     -
58     -#ifdef __KERNEL__
59     -
60     -#include <linux/user.h>
61     -#include <linux/elfcore.h>
62     -
63     -/*
64     - * fill in the user structure for an a.out core dump
65     - */
66     -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
67     -{
68     - struct task_struct *tsk = current;
69     -
70     - dump->magic = CMAGIC;
71     - dump->start_code = tsk->mm->start_code;
72     - dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
73     -
74     - dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
75     - dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
76     - dump->u_ssize = 0;
77     -
78     - memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
79     -
80     - if (dump->start_stack < 0x04000000)
81     - dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
82     -
83     - dump->regs = *regs;
84     - dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
85     -}
86     -
87     -#endif /* __KERNEL__ */
88     -#endif /* _ASM_A_OUT_CORE_H */
89     diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
90     index 38050b1..56211f2 100644
91     --- a/arch/arm/include/asm/elf.h
92     +++ b/arch/arm/include/asm/elf.h
93     @@ -130,4 +130,10 @@ struct mm_struct;
94     extern unsigned long arch_randomize_brk(struct mm_struct *mm);
95     #define arch_randomize_brk arch_randomize_brk
96    
97     +#ifdef CONFIG_MMU
98     +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
99     +struct linux_binprm;
100     +int arch_setup_additional_pages(struct linux_binprm *, int);
101     +#endif
102     +
103     #endif
104     diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
105     index e3d5554..6f18da0 100644
106     --- a/arch/arm/include/asm/mmu.h
107     +++ b/arch/arm/include/asm/mmu.h
108     @@ -6,8 +6,11 @@
109     typedef struct {
110     #ifdef CONFIG_CPU_HAS_ASID
111     atomic64_t id;
112     +#else
113     + int switch_pending;
114     #endif
115     unsigned int vmalloc_seq;
116     + unsigned long sigpage;
117     } mm_context_t;
118    
119     #ifdef CONFIG_CPU_HAS_ASID
120     diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
121     index dc90203..e0b10f1 100644
122     --- a/arch/arm/include/asm/mmu_context.h
123     +++ b/arch/arm/include/asm/mmu_context.h
124     @@ -55,7 +55,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
125     * on non-ASID CPUs, the old mm will remain valid until the
126     * finish_arch_post_lock_switch() call.
127     */
128     - set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
129     + mm->context.switch_pending = 1;
130     else
131     cpu_switch_mm(mm->pgd, mm);
132     }
133     @@ -64,9 +64,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
134     finish_arch_post_lock_switch
135     static inline void finish_arch_post_lock_switch(void)
136     {
137     - if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
138     - struct mm_struct *mm = current->mm;
139     - cpu_switch_mm(mm->pgd, mm);
140     + struct mm_struct *mm = current->mm;
141     +
142     + if (mm && mm->context.switch_pending) {
143     + /*
144     + * Preemption must be disabled during cpu_switch_mm() as we
145     + * have some stateful cache flush implementations. Check
146     + * switch_pending again in case we were preempted and the
147     + * switch to this mm was already done.
148     + */
149     + preempt_disable();
150     + if (mm->context.switch_pending) {
151     + mm->context.switch_pending = 0;
152     + cpu_switch_mm(mm->pgd, mm);
153     + }
154     + preempt_enable_no_resched();
155     }
156     }
157    
158     diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
159     index 812a494..cbdc7a2 100644
160     --- a/arch/arm/include/asm/page.h
161     +++ b/arch/arm/include/asm/page.h
162     @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
163     #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
164     extern void copy_page(void *to, const void *from);
165    
166     +#ifdef CONFIG_KUSER_HELPERS
167     #define __HAVE_ARCH_GATE_AREA 1
168     +#endif
169    
170     #ifdef CONFIG_ARM_LPAE
171     #include <asm/pgtable-3level-types.h>
172     diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
173     index 06e7d50..413f387 100644
174     --- a/arch/arm/include/asm/processor.h
175     +++ b/arch/arm/include/asm/processor.h
176     @@ -54,7 +54,6 @@ struct thread_struct {
177    
178     #define start_thread(regs,pc,sp) \
179     ({ \
180     - unsigned long *stack = (unsigned long *)sp; \
181     memset(regs->uregs, 0, sizeof(regs->uregs)); \
182     if (current->personality & ADDR_LIMIT_32BIT) \
183     regs->ARM_cpsr = USR_MODE; \
184     @@ -65,9 +64,6 @@ struct thread_struct {
185     regs->ARM_cpsr |= PSR_ENDSTATE; \
186     regs->ARM_pc = pc & ~1; /* pc */ \
187     regs->ARM_sp = sp; /* sp */ \
188     - regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
189     - regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
190     - regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
191     nommu_start_thread(regs); \
192     })
193    
194     diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
195     index 1995d1a..f00b569 100644
196     --- a/arch/arm/include/asm/thread_info.h
197     +++ b/arch/arm/include/asm/thread_info.h
198     @@ -156,7 +156,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
199     #define TIF_USING_IWMMXT 17
200     #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
201     #define TIF_RESTORE_SIGMASK 20
202     -#define TIF_SWITCH_MM 22 /* deferred switch_mm */
203    
204     #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
205     #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
206     diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
207     index 47bcb2d..18d76fd 100644
208     --- a/arch/arm/include/uapi/asm/Kbuild
209     +++ b/arch/arm/include/uapi/asm/Kbuild
210     @@ -1,7 +1,6 @@
211     # UAPI Header export list
212     include include/uapi/asm-generic/Kbuild.asm
213    
214     -header-y += a.out.h
215     header-y += byteorder.h
216     header-y += fcntl.h
217     header-y += hwcap.h
218     diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
219     deleted file mode 100644
220     index 083894b..0000000
221     --- a/arch/arm/include/uapi/asm/a.out.h
222     +++ /dev/null
223     @@ -1,34 +0,0 @@
224     -#ifndef __ARM_A_OUT_H__
225     -#define __ARM_A_OUT_H__
226     -
227     -#include <linux/personality.h>
228     -#include <linux/types.h>
229     -
230     -struct exec
231     -{
232     - __u32 a_info; /* Use macros N_MAGIC, etc for access */
233     - __u32 a_text; /* length of text, in bytes */
234     - __u32 a_data; /* length of data, in bytes */
235     - __u32 a_bss; /* length of uninitialized data area for file, in bytes */
236     - __u32 a_syms; /* length of symbol table data in file, in bytes */
237     - __u32 a_entry; /* start address */
238     - __u32 a_trsize; /* length of relocation info for text, in bytes */
239     - __u32 a_drsize; /* length of relocation info for data, in bytes */
240     -};
241     -
242     -/*
243     - * This is always the same
244     - */
245     -#define N_TXTADDR(a) (0x00008000)
246     -
247     -#define N_TRSIZE(a) ((a).a_trsize)
248     -#define N_DRSIZE(a) ((a).a_drsize)
249     -#define N_SYMSIZE(a) ((a).a_syms)
250     -
251     -#define M_ARM 103
252     -
253     -#ifndef LIBRARY_START_TEXT
254     -#define LIBRARY_START_TEXT (0x00c00000)
255     -#endif
256     -
257     -#endif /* __A_OUT_GNU_H__ */
258     diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
259     index 582b405..d43c7e5 100644
260     --- a/arch/arm/kernel/entry-armv.S
261     +++ b/arch/arm/kernel/entry-armv.S
262     @@ -741,6 +741,18 @@ ENDPROC(__switch_to)
263     #endif
264     .endm
265    
266     + .macro kuser_pad, sym, size
267     + .if (. - \sym) & 3
268     + .rept 4 - (. - \sym) & 3
269     + .byte 0
270     + .endr
271     + .endif
272     + .rept (\size - (. - \sym)) / 4
273     + .word 0xe7fddef1
274     + .endr
275     + .endm
276     +
277     +#ifdef CONFIG_KUSER_HELPERS
278     .align 5
279     .globl __kuser_helper_start
280     __kuser_helper_start:
281     @@ -831,18 +843,13 @@ kuser_cmpxchg64_fixup:
282     #error "incoherent kernel configuration"
283     #endif
284    
285     - /* pad to next slot */
286     - .rept (16 - (. - __kuser_cmpxchg64)/4)
287     - .word 0
288     - .endr
289     -
290     - .align 5
291     + kuser_pad __kuser_cmpxchg64, 64
292    
293     __kuser_memory_barrier: @ 0xffff0fa0
294     smp_dmb arm
295     usr_ret lr
296    
297     - .align 5
298     + kuser_pad __kuser_memory_barrier, 32
299    
300     __kuser_cmpxchg: @ 0xffff0fc0
301    
302     @@ -915,13 +922,14 @@ kuser_cmpxchg32_fixup:
303    
304     #endif
305    
306     - .align 5
307     + kuser_pad __kuser_cmpxchg, 32
308    
309     __kuser_get_tls: @ 0xffff0fe0
310     ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
311     usr_ret lr
312     mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
313     - .rep 4
314     + kuser_pad __kuser_get_tls, 16
315     + .rep 3
316     .word 0 @ 0xffff0ff0 software TLS value, then
317     .endr @ pad up to __kuser_helper_version
318    
319     @@ -931,14 +939,16 @@ __kuser_helper_version: @ 0xffff0ffc
320     .globl __kuser_helper_end
321     __kuser_helper_end:
322    
323     +#endif
324     +
325     THUMB( .thumb )
326    
327     /*
328     * Vector stubs.
329     *
330     - * This code is copied to 0xffff0200 so we can use branches in the
331     - * vectors, rather than ldr's. Note that this code must not
332     - * exceed 0x300 bytes.
333     + * This code is copied to 0xffff1000 so we can use branches in the
334     + * vectors, rather than ldr's. Note that this code must not exceed
335     + * a page size.
336     *
337     * Common stub entry macro:
338     * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
339     @@ -985,8 +995,17 @@ ENDPROC(vector_\name)
340     1:
341     .endm
342    
343     - .globl __stubs_start
344     + .section .stubs, "ax", %progbits
345     __stubs_start:
346     + @ This must be the first word
347     + .word vector_swi
348     +
349     +vector_rst:
350     + ARM( swi SYS_ERROR0 )
351     + THUMB( svc #0 )
352     + THUMB( nop )
353     + b vector_und
354     +
355     /*
356     * Interrupt dispatcher
357     */
358     @@ -1081,6 +1100,16 @@ __stubs_start:
359     .align 5
360    
361     /*=============================================================================
362     + * Address exception handler
363     + *-----------------------------------------------------------------------------
364     + * These aren't too critical.
365     + * (they're not supposed to happen, and won't happen in 32-bit data mode).
366     + */
367     +
368     +vector_addrexcptn:
369     + b vector_addrexcptn
370     +
371     +/*=============================================================================
372     * Undefined FIQs
373     *-----------------------------------------------------------------------------
374     * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
375     @@ -1093,45 +1122,19 @@ __stubs_start:
376     vector_fiq:
377     subs pc, lr, #4
378    
379     -/*=============================================================================
380     - * Address exception handler
381     - *-----------------------------------------------------------------------------
382     - * These aren't too critical.
383     - * (they're not supposed to happen, and won't happen in 32-bit data mode).
384     - */
385     -
386     -vector_addrexcptn:
387     - b vector_addrexcptn
388     -
389     -/*
390     - * We group all the following data together to optimise
391     - * for CPUs with separate I & D caches.
392     - */
393     - .align 5
394     -
395     -.LCvswi:
396     - .word vector_swi
397     -
398     - .globl __stubs_end
399     -__stubs_end:
400     -
401     - .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
402     + .globl vector_fiq_offset
403     + .equ vector_fiq_offset, vector_fiq
404    
405     - .globl __vectors_start
406     + .section .vectors, "ax", %progbits
407     __vectors_start:
408     - ARM( swi SYS_ERROR0 )
409     - THUMB( svc #0 )
410     - THUMB( nop )
411     - W(b) vector_und + stubs_offset
412     - W(ldr) pc, .LCvswi + stubs_offset
413     - W(b) vector_pabt + stubs_offset
414     - W(b) vector_dabt + stubs_offset
415     - W(b) vector_addrexcptn + stubs_offset
416     - W(b) vector_irq + stubs_offset
417     - W(b) vector_fiq + stubs_offset
418     -
419     - .globl __vectors_end
420     -__vectors_end:
421     + W(b) vector_rst
422     + W(b) vector_und
423     + W(ldr) pc, __vectors_start + 0x1000
424     + W(b) vector_pabt
425     + W(b) vector_dabt
426     + W(b) vector_addrexcptn
427     + W(b) vector_irq
428     + W(b) vector_fiq
429    
430     .data
431    
432     diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
433     index 2adda11..25442f4 100644
434     --- a/arch/arm/kernel/fiq.c
435     +++ b/arch/arm/kernel/fiq.c
436     @@ -47,6 +47,11 @@
437     #include <asm/irq.h>
438     #include <asm/traps.h>
439    
440     +#define FIQ_OFFSET ({ \
441     + extern void *vector_fiq_offset; \
442     + (unsigned)&vector_fiq_offset; \
443     + })
444     +
445     static unsigned long no_fiq_insn;
446    
447     /* Default reacquire function
448     @@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
449     void set_fiq_handler(void *start, unsigned int length)
450     {
451     #if defined(CONFIG_CPU_USE_DOMAINS)
452     - memcpy((void *)0xffff001c, start, length);
453     + void *base = (void *)0xffff0000;
454     #else
455     - memcpy(vectors_page + 0x1c, start, length);
456     + void *base = vectors_page;
457     #endif
458     - flush_icache_range(0xffff001c, 0xffff001c + length);
459     + unsigned offset = FIQ_OFFSET;
460     +
461     + memcpy(base + offset, start, length);
462     + flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
463     if (!vectors_high())
464     - flush_icache_range(0x1c, 0x1c + length);
465     + flush_icache_range(offset, offset + length);
466     }
467    
468     int claim_fiq(struct fiq_handler *f)
469     @@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
470    
471     void __init init_FIQ(int start)
472     {
473     - no_fiq_insn = *(unsigned long *)0xffff001c;
474     + unsigned offset = FIQ_OFFSET;
475     + no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
476     fiq_start = start;
477     }
478     diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
479     index 6e8931c..5bc2615 100644
480     --- a/arch/arm/kernel/process.c
481     +++ b/arch/arm/kernel/process.c
482     @@ -433,10 +433,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
483     }
484    
485     #ifdef CONFIG_MMU
486     +#ifdef CONFIG_KUSER_HELPERS
487     /*
488     * The vectors page is always readable from user space for the
489     - * atomic helpers and the signal restart code. Insert it into the
490     - * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
491     + * atomic helpers. Insert it into the gate_vma so that it is visible
492     + * through ptrace and /proc/<pid>/mem.
493     */
494     static struct vm_area_struct gate_vma = {
495     .vm_start = 0xffff0000,
496     @@ -465,9 +466,48 @@ int in_gate_area_no_mm(unsigned long addr)
497     {
498     return in_gate_area(NULL, addr);
499     }
500     +#define is_gate_vma(vma) ((vma) = &gate_vma)
501     +#else
502     +#define is_gate_vma(vma) 0
503     +#endif
504    
505     const char *arch_vma_name(struct vm_area_struct *vma)
506     {
507     - return (vma == &gate_vma) ? "[vectors]" : NULL;
508     + return is_gate_vma(vma) ? "[vectors]" :
509     + (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
510     + "[sigpage]" : NULL;
511     +}
512     +
513     +static struct page *signal_page;
514     +extern struct page *get_signal_page(void);
515     +
516     +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
517     +{
518     + struct mm_struct *mm = current->mm;
519     + unsigned long addr;
520     + int ret;
521     +
522     + if (!signal_page)
523     + signal_page = get_signal_page();
524     + if (!signal_page)
525     + return -ENOMEM;
526     +
527     + down_write(&mm->mmap_sem);
528     + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
529     + if (IS_ERR_VALUE(addr)) {
530     + ret = addr;
531     + goto up_fail;
532     + }
533     +
534     + ret = install_special_mapping(mm, addr, PAGE_SIZE,
535     + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
536     + &signal_page);
537     +
538     + if (ret == 0)
539     + mm->context.sigpage = addr;
540     +
541     + up_fail:
542     + up_write(&mm->mmap_sem);
543     + return ret;
544     }
545     #endif
546     diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
547     index 296786b..5a42c12 100644
548     --- a/arch/arm/kernel/signal.c
549     +++ b/arch/arm/kernel/signal.c
550     @@ -8,6 +8,7 @@
551     * published by the Free Software Foundation.
552     */
553     #include <linux/errno.h>
554     +#include <linux/random.h>
555     #include <linux/signal.h>
556     #include <linux/personality.h>
557     #include <linux/uaccess.h>
558     @@ -15,12 +16,11 @@
559    
560     #include <asm/elf.h>
561     #include <asm/cacheflush.h>
562     +#include <asm/traps.h>
563     #include <asm/ucontext.h>
564     #include <asm/unistd.h>
565     #include <asm/vfp.h>
566    
567     -#include "signal.h"
568     -
569     /*
570     * For ARM syscalls, we encode the syscall number into the instruction.
571     */
572     @@ -40,11 +40,13 @@
573     #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
574     #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
575    
576     -const unsigned long sigreturn_codes[7] = {
577     +static const unsigned long sigreturn_codes[7] = {
578     MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
579     MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
580     };
581    
582     +static unsigned long signal_return_offset;
583     +
584     #ifdef CONFIG_CRUNCH
585     static int preserve_crunch_context(struct crunch_sigframe __user *frame)
586     {
587     @@ -396,13 +398,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
588     __put_user(sigreturn_codes[idx+1], rc+1))
589     return 1;
590    
591     +#ifdef CONFIG_MMU
592     if (cpsr & MODE32_BIT) {
593     + struct mm_struct *mm = current->mm;
594     /*
595     - * 32-bit code can use the new high-page
596     - * signal return code support.
597     + * 32-bit code can use the signal return page
598     + * except when the MPU has protected the vectors
599     + * page from PL0
600     */
601     - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
602     - } else {
603     + retcode = mm->context.sigpage + signal_return_offset +
604     + (idx << 2) + thumb;
605     + } else
606     +#endif
607     + {
608     /*
609     * Ensure that the instruction cache sees
610     * the return code written onto the stack.
611     @@ -603,3 +611,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
612     } while (thread_flags & _TIF_WORK_MASK);
613     return 0;
614     }
615     +
616     +struct page *get_signal_page(void)
617     +{
618     + unsigned long ptr;
619     + unsigned offset;
620     + struct page *page;
621     + void *addr;
622     +
623     + page = alloc_pages(GFP_KERNEL, 0);
624     +
625     + if (!page)
626     + return NULL;
627     +
628     + addr = page_address(page);
629     +
630     + /* Give the signal return code some randomness */
631     + offset = 0x200 + (get_random_int() & 0x7fc);
632     + signal_return_offset = offset;
633     +
634     + /*
635     + * Copy signal return handlers into the vector page, and
636     + * set sigreturn to be a pointer to these.
637     + */
638     + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
639     +
640     + ptr = (unsigned long)addr + offset;
641     + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
642     +
643     + return page;
644     +}
645     diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
646     deleted file mode 100644
647     index 5ff067b7..0000000
648     --- a/arch/arm/kernel/signal.h
649     +++ /dev/null
650     @@ -1,12 +0,0 @@
651     -/*
652     - * linux/arch/arm/kernel/signal.h
653     - *
654     - * Copyright (C) 2005-2009 Russell King.
655     - *
656     - * This program is free software; you can redistribute it and/or modify
657     - * it under the terms of the GNU General Public License version 2 as
658     - * published by the Free Software Foundation.
659     - */
660     -#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
661     -
662     -extern const unsigned long sigreturn_codes[7];
663     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
664     index 18b32e8..6b9567e 100644
665     --- a/arch/arm/kernel/traps.c
666     +++ b/arch/arm/kernel/traps.c
667     @@ -35,8 +35,6 @@
668     #include <asm/tls.h>
669     #include <asm/system_misc.h>
670    
671     -#include "signal.h"
672     -
673     static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
674    
675     void *vectors_page;
676     @@ -800,47 +798,55 @@ void __init trap_init(void)
677     return;
678     }
679    
680     -static void __init kuser_get_tls_init(unsigned long vectors)
681     +#ifdef CONFIG_KUSER_HELPERS
682     +static void __init kuser_init(void *vectors)
683     {
684     + extern char __kuser_helper_start[], __kuser_helper_end[];
685     + int kuser_sz = __kuser_helper_end - __kuser_helper_start;
686     +
687     + memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
688     +
689     /*
690     * vectors + 0xfe0 = __kuser_get_tls
691     * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
692     */
693     if (tls_emu || has_tls_reg)
694     - memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
695     + memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
696     }
697     +#else
698     +static void __init kuser_init(void *vectors)
699     +{
700     +}
701     +#endif
702    
703     void __init early_trap_init(void *vectors_base)
704     {
705     unsigned long vectors = (unsigned long)vectors_base;
706     extern char __stubs_start[], __stubs_end[];
707     extern char __vectors_start[], __vectors_end[];
708     - extern char __kuser_helper_start[], __kuser_helper_end[];
709     - int kuser_sz = __kuser_helper_end - __kuser_helper_start;
710     + unsigned i;
711    
712     vectors_page = vectors_base;
713    
714     /*
715     + * Poison the vectors page with an undefined instruction. This
716     + * instruction is chosen to be undefined for both ARM and Thumb
717     + * ISAs. The Thumb version is an undefined instruction with a
718     + * branch back to the undefined instruction.
719     + */
720     + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
721     + ((u32 *)vectors_base)[i] = 0xe7fddef1;
722     +
723     + /*
724     * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
725     * into the vector page, mapped at 0xffff0000, and ensure these
726     * are visible to the instruction stream.
727     */
728     memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
729     - memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
730     - memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
731     + memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
732    
733     - /*
734     - * Do processor specific fixups for the kuser helpers
735     - */
736     - kuser_get_tls_init(vectors);
737     -
738     - /*
739     - * Copy signal return handlers into the vector page, and
740     - * set sigreturn to be a pointer to these.
741     - */
742     - memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
743     - sigreturn_codes, sizeof(sigreturn_codes));
744     + kuser_init(vectors_base);
745    
746     - flush_icache_range(vectors, vectors + PAGE_SIZE);
747     + flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
748     modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
749     }
750     diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
751     index a871b8e..33f2ea3 100644
752     --- a/arch/arm/kernel/vmlinux.lds.S
753     +++ b/arch/arm/kernel/vmlinux.lds.S
754     @@ -152,6 +152,23 @@ SECTIONS
755     . = ALIGN(PAGE_SIZE);
756     __init_begin = .;
757     #endif
758     + /*
759     + * The vectors and stubs are relocatable code, and the
760     + * only thing that matters is their relative offsets
761     + */
762     + __vectors_start = .;
763     + .vectors 0 : AT(__vectors_start) {
764     + *(.vectors)
765     + }
766     + . = __vectors_start + SIZEOF(.vectors);
767     + __vectors_end = .;
768     +
769     + __stubs_start = .;
770     + .stubs 0x1000 : AT(__stubs_start) {
771     + *(.stubs)
772     + }
773     + . = __stubs_start + SIZEOF(.stubs);
774     + __stubs_end = .;
775    
776     INIT_TEXT_SECTION(8)
777     .exit.text : {
778     diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
779     index 35955b5..2950082 100644
780     --- a/arch/arm/mm/Kconfig
781     +++ b/arch/arm/mm/Kconfig
782     @@ -411,24 +411,28 @@ config CPU_32v3
783     select CPU_USE_DOMAINS if MMU
784     select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
785     select TLS_REG_EMUL if SMP || !MMU
786     + select NEED_KUSER_HELPERS
787    
788     config CPU_32v4
789     bool
790     select CPU_USE_DOMAINS if MMU
791     select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
792     select TLS_REG_EMUL if SMP || !MMU
793     + select NEED_KUSER_HELPERS
794    
795     config CPU_32v4T
796     bool
797     select CPU_USE_DOMAINS if MMU
798     select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
799     select TLS_REG_EMUL if SMP || !MMU
800     + select NEED_KUSER_HELPERS
801    
802     config CPU_32v5
803     bool
804     select CPU_USE_DOMAINS if MMU
805     select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
806     select TLS_REG_EMUL if SMP || !MMU
807     + select NEED_KUSER_HELPERS
808    
809     config CPU_32v6
810     bool
811     @@ -756,6 +760,7 @@ config CPU_BPREDICT_DISABLE
812    
813     config TLS_REG_EMUL
814     bool
815     + select NEED_KUSER_HELPERS
816     help
817     An SMP system using a pre-ARMv6 processor (there are apparently
818     a few prototypes like that in existence) and therefore access to
819     @@ -763,11 +768,40 @@ config TLS_REG_EMUL
820    
821     config NEEDS_SYSCALL_FOR_CMPXCHG
822     bool
823     + select NEED_KUSER_HELPERS
824     help
825     SMP on a pre-ARMv6 processor? Well OK then.
826     Forget about fast user space cmpxchg support.
827     It is just not possible.
828    
829     +config NEED_KUSER_HELPERS
830     + bool
831     +
832     +config KUSER_HELPERS
833     + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
834     + default y
835     + help
836     + Warning: disabling this option may break user programs.
837     +
838     + Provide kuser helpers in the vector page. The kernel provides
839     + helper code to userspace in read only form at a fixed location
840     + in the high vector page to allow userspace to be independent of
841     + the CPU type fitted to the system. This permits binaries to be
842     + run on ARMv4 through to ARMv7 without modification.
843     +
844     + However, the fixed address nature of these helpers can be used
845     + by ROP (return orientated programming) authors when creating
846     + exploits.
847     +
848     + If all of the binaries and libraries which run on your platform
849     + are built specifically for your platform, and make no use of
850     + these helpers, then you can turn this option off. However,
851     + when such an binary or library is run, it will receive a SIGILL
852     + signal, which will terminate the program.
853     +
854     + Say N here only if you are absolutely certain that you do not
855     + need these helpers; otherwise, the safe option is to say Y.
856     +
857     config DMA_CACHE_RWFO
858     bool "Enable read/write for ownership DMA cache maintenance"
859     depends on CPU_V6K && SMP
860     diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
861     index 4d409e6..daf336f 100644
862     --- a/arch/arm/mm/mmu.c
863     +++ b/arch/arm/mm/mmu.c
864     @@ -1175,7 +1175,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
865     /*
866     * Allocate the vector page early.
867     */
868     - vectors = early_alloc(PAGE_SIZE);
869     + vectors = early_alloc(PAGE_SIZE * 2);
870    
871     early_trap_init(vectors);
872    
873     @@ -1220,15 +1220,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
874     map.pfn = __phys_to_pfn(virt_to_phys(vectors));
875     map.virtual = 0xffff0000;
876     map.length = PAGE_SIZE;
877     +#ifdef CONFIG_KUSER_HELPERS
878     map.type = MT_HIGH_VECTORS;
879     +#else
880     + map.type = MT_LOW_VECTORS;
881     +#endif
882     create_mapping(&map);
883    
884     if (!vectors_high()) {
885     map.virtual = 0;
886     + map.length = PAGE_SIZE * 2;
887     map.type = MT_LOW_VECTORS;
888     create_mapping(&map);
889     }
890    
891     + /* Now create a kernel read-only mapping */
892     + map.pfn += 1;
893     + map.virtual = 0xffff0000 + PAGE_SIZE;
894     + map.length = PAGE_SIZE;
895     + map.type = MT_LOW_VECTORS;
896     + create_mapping(&map);
897     +
898     /*
899     * Ask the machine support to map in the statically mapped devices.
900     */
901     diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
902     index 9704097..b3997c7 100644
903     --- a/arch/arm/mm/proc-v7-2level.S
904     +++ b/arch/arm/mm/proc-v7-2level.S
905     @@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
906     ARM( str r3, [r0, #2048]! )
907     THUMB( add r0, r0, #2048 )
908     THUMB( str r3, [r0] )
909     - ALT_SMP(mov pc,lr)
910     + ALT_SMP(W(nop))
911     ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
912     #endif
913     mov pc, lr
914     diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
915     index 363027e..6ba4bd9 100644
916     --- a/arch/arm/mm/proc-v7-3level.S
917     +++ b/arch/arm/mm/proc-v7-3level.S
918     @@ -73,7 +73,7 @@ ENTRY(cpu_v7_set_pte_ext)
919     tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
920     orreq r2, #L_PTE_RDONLY
921     1: strd r2, r3, [r0]
922     - ALT_SMP(mov pc, lr)
923     + ALT_SMP(W(nop))
924     ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
925     #endif
926     mov pc, lr
927     diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
928     index e35fec3..5fbccee 100644
929     --- a/arch/arm/mm/proc-v7.S
930     +++ b/arch/arm/mm/proc-v7.S
931     @@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle)
932     ENDPROC(cpu_v7_do_idle)
933    
934     ENTRY(cpu_v7_dcache_clean_area)
935     - ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
936     - ALT_UP(W(nop))
937     - dcache_line_size r2, r3
938     -1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
939     + ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
940     + ALT_UP_B(1f)
941     + mov pc, lr
942     +1: dcache_line_size r2, r3
943     +2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
944     add r0, r0, r2
945     subs r1, r1, r2
946     - bhi 1b
947     + bhi 2b
948     dsb
949     mov pc, lr
950     ENDPROC(cpu_v7_dcache_clean_area)
951     diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
952     index 9afdad6..eaf4dc1 100644
953     --- a/arch/parisc/include/asm/parisc-device.h
954     +++ b/arch/parisc/include/asm/parisc-device.h
955     @@ -23,6 +23,7 @@ struct parisc_device {
956     /* generic info returned from pdc_pat_cell_module() */
957     unsigned long mod_info; /* PAT specific - Misc Module info */
958     unsigned long pmod_loc; /* physical Module location */
959     + unsigned long mod0;
960     #endif
961     u64 dma_mask; /* DMA mask for I/O */
962     struct device dev;
963     @@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
964    
965     extern struct bus_type parisc_bus_type;
966    
967     +int iosapic_serial_irq(struct parisc_device *dev);
968     +
969     #endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
970     diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
971     index 2e65aa5..c035673 100644
972     --- a/arch/parisc/kernel/cache.c
973     +++ b/arch/parisc/kernel/cache.c
974     @@ -71,18 +71,27 @@ flush_cache_all_local(void)
975     }
976     EXPORT_SYMBOL(flush_cache_all_local);
977    
978     +/* Virtual address of pfn. */
979     +#define pfn_va(pfn) __va(PFN_PHYS(pfn))
980     +
981     void
982     update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
983     {
984     - struct page *page = pte_page(*ptep);
985     + unsigned long pfn = pte_pfn(*ptep);
986     + struct page *page;
987    
988     - if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
989     - test_bit(PG_dcache_dirty, &page->flags)) {
990     + /* We don't have pte special. As a result, we can be called with
991     + an invalid pfn and we don't need to flush the kernel dcache page.
992     + This occurs with FireGL card in C8000. */
993     + if (!pfn_valid(pfn))
994     + return;
995    
996     - flush_kernel_dcache_page(page);
997     + page = pfn_to_page(pfn);
998     + if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
999     + flush_kernel_dcache_page_addr(pfn_va(pfn));
1000     clear_bit(PG_dcache_dirty, &page->flags);
1001     } else if (parisc_requires_coherency())
1002     - flush_kernel_dcache_page(page);
1003     + flush_kernel_dcache_page_addr(pfn_va(pfn));
1004     }
1005    
1006     void
1007     @@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
1008    
1009     void flush_cache_mm(struct mm_struct *mm)
1010     {
1011     + struct vm_area_struct *vma;
1012     + pgd_t *pgd;
1013     +
1014     /* Flushing the whole cache on each cpu takes forever on
1015     rp3440, etc. So, avoid it if the mm isn't too big. */
1016     - if (mm_total_size(mm) < parisc_cache_flush_threshold) {
1017     - struct vm_area_struct *vma;
1018     -
1019     - if (mm->context == mfsp(3)) {
1020     - for (vma = mm->mmap; vma; vma = vma->vm_next) {
1021     - flush_user_dcache_range_asm(vma->vm_start,
1022     - vma->vm_end);
1023     - if (vma->vm_flags & VM_EXEC)
1024     - flush_user_icache_range_asm(
1025     - vma->vm_start, vma->vm_end);
1026     - }
1027     - } else {
1028     - pgd_t *pgd = mm->pgd;
1029     -
1030     - for (vma = mm->mmap; vma; vma = vma->vm_next) {
1031     - unsigned long addr;
1032     -
1033     - for (addr = vma->vm_start; addr < vma->vm_end;
1034     - addr += PAGE_SIZE) {
1035     - pte_t *ptep = get_ptep(pgd, addr);
1036     - if (ptep != NULL) {
1037     - pte_t pte = *ptep;
1038     - __flush_cache_page(vma, addr,
1039     - page_to_phys(pte_page(pte)));
1040     - }
1041     - }
1042     - }
1043     + if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
1044     + flush_cache_all();
1045     + return;
1046     + }
1047     +
1048     + if (mm->context == mfsp(3)) {
1049     + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1050     + flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
1051     + if ((vma->vm_flags & VM_EXEC) == 0)
1052     + continue;
1053     + flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
1054     }
1055     return;
1056     }
1057    
1058     -#ifdef CONFIG_SMP
1059     - flush_cache_all();
1060     -#else
1061     - flush_cache_all_local();
1062     -#endif
1063     + pgd = mm->pgd;
1064     + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1065     + unsigned long addr;
1066     +
1067     + for (addr = vma->vm_start; addr < vma->vm_end;
1068     + addr += PAGE_SIZE) {
1069     + unsigned long pfn;
1070     + pte_t *ptep = get_ptep(pgd, addr);
1071     + if (!ptep)
1072     + continue;
1073     + pfn = pte_pfn(*ptep);
1074     + if (!pfn_valid(pfn))
1075     + continue;
1076     + __flush_cache_page(vma, addr, PFN_PHYS(pfn));
1077     + }
1078     + }
1079     }
1080    
1081     void
1082     @@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
1083     void flush_cache_range(struct vm_area_struct *vma,
1084     unsigned long start, unsigned long end)
1085     {
1086     + unsigned long addr;
1087     + pgd_t *pgd;
1088     +
1089     BUG_ON(!vma->vm_mm->context);
1090    
1091     - if ((end - start) < parisc_cache_flush_threshold) {
1092     - if (vma->vm_mm->context == mfsp(3)) {
1093     - flush_user_dcache_range_asm(start, end);
1094     - if (vma->vm_flags & VM_EXEC)
1095     - flush_user_icache_range_asm(start, end);
1096     - } else {
1097     - unsigned long addr;
1098     - pgd_t *pgd = vma->vm_mm->pgd;
1099     -
1100     - for (addr = start & PAGE_MASK; addr < end;
1101     - addr += PAGE_SIZE) {
1102     - pte_t *ptep = get_ptep(pgd, addr);
1103     - if (ptep != NULL) {
1104     - pte_t pte = *ptep;
1105     - flush_cache_page(vma,
1106     - addr, pte_pfn(pte));
1107     - }
1108     - }
1109     - }
1110     - } else {
1111     -#ifdef CONFIG_SMP
1112     + if ((end - start) >= parisc_cache_flush_threshold) {
1113     flush_cache_all();
1114     -#else
1115     - flush_cache_all_local();
1116     -#endif
1117     + return;
1118     + }
1119     +
1120     + if (vma->vm_mm->context == mfsp(3)) {
1121     + flush_user_dcache_range_asm(start, end);
1122     + if (vma->vm_flags & VM_EXEC)
1123     + flush_user_icache_range_asm(start, end);
1124     + return;
1125     + }
1126     +
1127     + pgd = vma->vm_mm->pgd;
1128     + for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
1129     + unsigned long pfn;
1130     + pte_t *ptep = get_ptep(pgd, addr);
1131     + if (!ptep)
1132     + continue;
1133     + pfn = pte_pfn(*ptep);
1134     + if (pfn_valid(pfn))
1135     + __flush_cache_page(vma, addr, PFN_PHYS(pfn));
1136     }
1137     }
1138    
1139     @@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
1140     {
1141     BUG_ON(!vma->vm_mm->context);
1142    
1143     - flush_tlb_page(vma, vmaddr);
1144     - __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
1145     -
1146     + if (pfn_valid(pfn)) {
1147     + flush_tlb_page(vma, vmaddr);
1148     + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
1149     + }
1150     }
1151    
1152     #ifdef CONFIG_PARISC_TMPALIAS
1153     diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
1154     index 3295ef4..f0b6722 100644
1155     --- a/arch/parisc/kernel/inventory.c
1156     +++ b/arch/parisc/kernel/inventory.c
1157     @@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
1158     /* REVISIT: who is the consumer of this? not sure yet... */
1159     dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
1160     dev->pmod_loc = pa_pdc_cell->mod_location;
1161     + dev->mod0 = pa_pdc_cell->mod[0];
1162    
1163     register_parisc_device(dev); /* advertise device */
1164    
1165     diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
1166     index ffbaabe..48cfc85 100644
1167     --- a/arch/powerpc/include/asm/smp.h
1168     +++ b/arch/powerpc/include/asm/smp.h
1169     @@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
1170     #define smp_setup_cpu_maps()
1171     static inline void inhibit_secondary_onlining(void) {}
1172     static inline void uninhibit_secondary_onlining(void) {}
1173     +static inline const struct cpumask *cpu_sibling_mask(int cpu)
1174     +{
1175     + return cpumask_of(cpu);
1176     +}
1177    
1178     #endif /* CONFIG_SMP */
1179    
1180     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
1181     index 2859a1f..cafad40 100644
1182     --- a/arch/powerpc/mm/numa.c
1183     +++ b/arch/powerpc/mm/numa.c
1184     @@ -27,6 +27,7 @@
1185     #include <linux/seq_file.h>
1186     #include <linux/uaccess.h>
1187     #include <linux/slab.h>
1188     +#include <asm/cputhreads.h>
1189     #include <asm/sparsemem.h>
1190     #include <asm/prom.h>
1191     #include <asm/smp.h>
1192     @@ -1319,7 +1320,8 @@ static int update_cpu_associativity_changes_mask(void)
1193     }
1194     }
1195     if (changed) {
1196     - cpumask_set_cpu(cpu, changes);
1197     + cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1198     + cpu = cpu_last_thread_sibling(cpu);
1199     }
1200     }
1201    
1202     @@ -1427,7 +1429,7 @@ static int update_cpu_topology(void *data)
1203     if (!data)
1204     return -EINVAL;
1205    
1206     - cpu = get_cpu();
1207     + cpu = smp_processor_id();
1208    
1209     for (update = data; update; update = update->next) {
1210     if (cpu != update->cpu)
1211     @@ -1447,12 +1449,12 @@ static int update_cpu_topology(void *data)
1212     */
1213     int arch_update_cpu_topology(void)
1214     {
1215     - unsigned int cpu, changed = 0;
1216     + unsigned int cpu, sibling, changed = 0;
1217     struct topology_update_data *updates, *ud;
1218     unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1219     cpumask_t updated_cpus;
1220     struct device *dev;
1221     - int weight, i = 0;
1222     + int weight, new_nid, i = 0;
1223    
1224     weight = cpumask_weight(&cpu_associativity_changes_mask);
1225     if (!weight)
1226     @@ -1465,19 +1467,46 @@ int arch_update_cpu_topology(void)
1227     cpumask_clear(&updated_cpus);
1228    
1229     for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1230     - ud = &updates[i++];
1231     - ud->cpu = cpu;
1232     - vphn_get_associativity(cpu, associativity);
1233     - ud->new_nid = associativity_to_nid(associativity);
1234     -
1235     - if (ud->new_nid < 0 || !node_online(ud->new_nid))
1236     - ud->new_nid = first_online_node;
1237     + /*
1238     + * If siblings aren't flagged for changes, updates list
1239     + * will be too short. Skip on this update and set for next
1240     + * update.
1241     + */
1242     + if (!cpumask_subset(cpu_sibling_mask(cpu),
1243     + &cpu_associativity_changes_mask)) {
1244     + pr_info("Sibling bits not set for associativity "
1245     + "change, cpu%d\n", cpu);
1246     + cpumask_or(&cpu_associativity_changes_mask,
1247     + &cpu_associativity_changes_mask,
1248     + cpu_sibling_mask(cpu));
1249     + cpu = cpu_last_thread_sibling(cpu);
1250     + continue;
1251     + }
1252    
1253     - ud->old_nid = numa_cpu_lookup_table[cpu];
1254     - cpumask_set_cpu(cpu, &updated_cpus);
1255     + /* Use associativity from first thread for all siblings */
1256     + vphn_get_associativity(cpu, associativity);
1257     + new_nid = associativity_to_nid(associativity);
1258     + if (new_nid < 0 || !node_online(new_nid))
1259     + new_nid = first_online_node;
1260     +
1261     + if (new_nid == numa_cpu_lookup_table[cpu]) {
1262     + cpumask_andnot(&cpu_associativity_changes_mask,
1263     + &cpu_associativity_changes_mask,
1264     + cpu_sibling_mask(cpu));
1265     + cpu = cpu_last_thread_sibling(cpu);
1266     + continue;
1267     + }
1268    
1269     - if (i < weight)
1270     - ud->next = &updates[i];
1271     + for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1272     + ud = &updates[i++];
1273     + ud->cpu = sibling;
1274     + ud->new_nid = new_nid;
1275     + ud->old_nid = numa_cpu_lookup_table[sibling];
1276     + cpumask_set_cpu(sibling, &updated_cpus);
1277     + if (i < weight)
1278     + ud->next = &updates[i];
1279     + }
1280     + cpu = cpu_last_thread_sibling(cpu);
1281     }
1282    
1283     stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1284     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
1285     index da183c5..97dcbea 100644
1286     --- a/arch/s390/Kconfig
1287     +++ b/arch/s390/Kconfig
1288     @@ -227,11 +227,12 @@ config MARCH_Z196
1289     not work on older machines.
1290    
1291     config MARCH_ZEC12
1292     - bool "IBM zEC12"
1293     + bool "IBM zBC12 and zEC12"
1294     select HAVE_MARCH_ZEC12_FEATURES if 64BIT
1295     help
1296     - Select this to enable optimizations for IBM zEC12 (2827 series). The
1297     - kernel will be slightly faster but will not work on older machines.
1298     + Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
1299     + 2827 series). The kernel will be slightly faster but will not work on
1300     + older machines.
1301    
1302     endchoice
1303    
1304     diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
1305     index 4d8604e..7d46767 100644
1306     --- a/arch/s390/include/asm/bitops.h
1307     +++ b/arch/s390/include/asm/bitops.h
1308     @@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
1309     size -= offset;
1310     p = addr + offset / BITS_PER_LONG;
1311     if (bit) {
1312     - set = __flo_word(0, *p & (~0UL << bit));
1313     + set = __flo_word(0, *p & (~0UL >> bit));
1314     if (set >= size)
1315     return size + offset;
1316     if (set < BITS_PER_LONG)
1317     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1318     index 0a49095..8ad9413 100644
1319     --- a/arch/s390/kernel/setup.c
1320     +++ b/arch/s390/kernel/setup.c
1321     @@ -998,6 +998,7 @@ static void __init setup_hwcaps(void)
1322     strcpy(elf_platform, "z196");
1323     break;
1324     case 0x2827:
1325     + case 0x2828:
1326     strcpy(elf_platform, "zEC12");
1327     break;
1328     }
1329     diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
1330     index 89ebae4..eba15f1 100644
1331     --- a/arch/s390/mm/init.c
1332     +++ b/arch/s390/mm/init.c
1333     @@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
1334     order = 2;
1335     break;
1336     case 0x2827: /* zEC12 */
1337     + case 0x2828: /* zEC12 */
1338     default:
1339     order = 5;
1340     break;
1341     diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
1342     index ffeb17c..930783d 100644
1343     --- a/arch/s390/oprofile/init.c
1344     +++ b/arch/s390/oprofile/init.c
1345     @@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
1346     switch (id.machine) {
1347     case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
1348     case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
1349     - case 0x2827: ops->cpu_type = "s390/zEC12"; break;
1350     + case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
1351     default: return -ENODEV;
1352     }
1353     }
1354     diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
1355     index 94ab6b9..63bdb29 100644
1356     --- a/arch/x86/kernel/early-quirks.c
1357     +++ b/arch/x86/kernel/early-quirks.c
1358     @@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
1359     static void __init intel_remapping_check(int num, int slot, int func)
1360     {
1361     u8 revision;
1362     + u16 device;
1363    
1364     + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
1365     revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
1366    
1367     /*
1368     - * Revision 0x13 of this chipset supports irq remapping
1369     - * but has an erratum that breaks its behavior, flag it as such
1370     + * Revision 13 of all triggering devices id in this quirk have
1371     + * a problem draining interrupts when irq remapping is enabled,
1372     + * and should be flagged as broken. Additionally revisions 0x12
1373     + * and 0x22 of device id 0x3405 has this problem.
1374     */
1375     if (revision == 0x13)
1376     set_irq_remapping_broken();
1377     + else if ((device == 0x3405) &&
1378     + ((revision == 0x12) ||
1379     + (revision == 0x22)))
1380     + set_irq_remapping_broken();
1381    
1382     }
1383    
1384     @@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
1385     PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
1386     { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
1387     PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
1388     + { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
1389     + PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
1390     { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
1391     PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
1392     {}
1393     diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
1394     index cb33909..f7ea30d 100644
1395     --- a/arch/x86/kernel/i387.c
1396     +++ b/arch/x86/kernel/i387.c
1397     @@ -116,7 +116,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
1398    
1399     if (cpu_has_fxsr) {
1400     memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
1401     - asm volatile("fxsave %0" : : "m" (fx_scratch));
1402     + asm volatile("fxsave %0" : "+m" (fx_scratch));
1403     mask = fx_scratch.mxcsr_mask;
1404     if (mask == 0)
1405     mask = 0x0000ffbf;
1406     diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
1407     index e710045..9533271 100644
1408     --- a/drivers/acpi/battery.c
1409     +++ b/drivers/acpi/battery.c
1410     @@ -117,6 +117,7 @@ struct acpi_battery {
1411     struct acpi_device *device;
1412     struct notifier_block pm_nb;
1413     unsigned long update_time;
1414     + int revision;
1415     int rate_now;
1416     int capacity_now;
1417     int voltage_now;
1418     @@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
1419     };
1420    
1421     static struct acpi_offsets extended_info_offsets[] = {
1422     + {offsetof(struct acpi_battery, revision), 0},
1423     {offsetof(struct acpi_battery, power_unit), 0},
1424     {offsetof(struct acpi_battery, design_capacity), 0},
1425     {offsetof(struct acpi_battery, full_charge_capacity), 0},
1426     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
1427     index d89ef86..69b45fc 100644
1428     --- a/drivers/block/xen-blkfront.c
1429     +++ b/drivers/block/xen-blkfront.c
1430     @@ -75,6 +75,7 @@ struct blk_shadow {
1431     struct blkif_request req;
1432     struct request *request;
1433     struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
1434     + struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
1435     };
1436    
1437     static DEFINE_MUTEX(blkfront_mutex);
1438     @@ -98,7 +99,6 @@ struct blkfront_info
1439     enum blkif_state connected;
1440     int ring_ref;
1441     struct blkif_front_ring ring;
1442     - struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
1443     unsigned int evtchn, irq;
1444     struct request_queue *rq;
1445     struct work_struct work;
1446     @@ -422,11 +422,11 @@ static int blkif_queue_request(struct request *req)
1447     ring_req->u.discard.flag = 0;
1448     } else {
1449     ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
1450     - info->sg);
1451     + info->shadow[id].sg);
1452     BUG_ON(ring_req->u.rw.nr_segments >
1453     BLKIF_MAX_SEGMENTS_PER_REQUEST);
1454    
1455     - for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
1456     + for_each_sg(info->shadow[id].sg, sg, ring_req->u.rw.nr_segments, i) {
1457     fsect = sg->offset >> 9;
1458     lsect = fsect + (sg->length >> 9) - 1;
1459    
1460     @@ -867,12 +867,12 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1461     struct blkif_response *bret)
1462     {
1463     int i = 0;
1464     - struct bio_vec *bvec;
1465     - struct req_iterator iter;
1466     - unsigned long flags;
1467     + struct scatterlist *sg;
1468     char *bvec_data;
1469     void *shared_data;
1470     - unsigned int offset = 0;
1471     + int nseg;
1472     +
1473     + nseg = s->req.u.rw.nr_segments;
1474    
1475     if (bret->operation == BLKIF_OP_READ) {
1476     /*
1477     @@ -881,19 +881,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1478     * than PAGE_SIZE, we have to keep track of the current offset,
1479     * to be sure we are copying the data from the right shared page.
1480     */
1481     - rq_for_each_segment(bvec, s->request, iter) {
1482     - BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
1483     - if (bvec->bv_offset < offset)
1484     - i++;
1485     - BUG_ON(i >= s->req.u.rw.nr_segments);
1486     + for_each_sg(s->sg, sg, nseg, i) {
1487     + BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1488     shared_data = kmap_atomic(
1489     pfn_to_page(s->grants_used[i]->pfn));
1490     - bvec_data = bvec_kmap_irq(bvec, &flags);
1491     - memcpy(bvec_data, shared_data + bvec->bv_offset,
1492     - bvec->bv_len);
1493     - bvec_kunmap_irq(bvec_data, &flags);
1494     + bvec_data = kmap_atomic(sg_page(sg));
1495     + memcpy(bvec_data + sg->offset,
1496     + shared_data + sg->offset,
1497     + sg->length);
1498     + kunmap_atomic(bvec_data);
1499     kunmap_atomic(shared_data);
1500     - offset = bvec->bv_offset + bvec->bv_len;
1501     }
1502     }
1503     /* Add the persistent grant into the list of free grants */
1504     @@ -1022,7 +1019,7 @@ static int setup_blkring(struct xenbus_device *dev,
1505     struct blkfront_info *info)
1506     {
1507     struct blkif_sring *sring;
1508     - int err;
1509     + int err, i;
1510    
1511     info->ring_ref = GRANT_INVALID_REF;
1512    
1513     @@ -1034,7 +1031,8 @@ static int setup_blkring(struct xenbus_device *dev,
1514     SHARED_RING_INIT(sring);
1515     FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
1516    
1517     - sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1518     + for (i = 0; i < BLK_RING_SIZE; i++)
1519     + sg_init_table(info->shadow[i].sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
1520    
1521     /* Allocate memory for grants */
1522     err = fill_grant_buffer(info, BLK_RING_SIZE *
1523     diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
1524     index 11f467c..a12b923 100644
1525     --- a/drivers/bluetooth/ath3k.c
1526     +++ b/drivers/bluetooth/ath3k.c
1527     @@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
1528     { USB_DEVICE(0x0489, 0xe04e) },
1529     { USB_DEVICE(0x0489, 0xe056) },
1530     { USB_DEVICE(0x0489, 0xe04d) },
1531     + { USB_DEVICE(0x04c5, 0x1330) },
1532     + { USB_DEVICE(0x13d3, 0x3402) },
1533     + { USB_DEVICE(0x0cf3, 0x3121) },
1534     + { USB_DEVICE(0x0cf3, 0xe003) },
1535    
1536     /* Atheros AR5BBU12 with sflash firmware */
1537     { USB_DEVICE(0x0489, 0xE02C) },
1538     @@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
1539     { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
1540     { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
1541     { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
1542     + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
1543     + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
1544     + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
1545     + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
1546    
1547     /* Atheros AR5BBU22 with sflash firmware */
1548     { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
1549     @@ -193,24 +201,44 @@ error:
1550    
1551     static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
1552     {
1553     - int pipe = 0;
1554     + int ret, pipe = 0;
1555     + char *buf;
1556     +
1557     + buf = kmalloc(sizeof(*buf), GFP_KERNEL);
1558     + if (!buf)
1559     + return -ENOMEM;
1560    
1561     pipe = usb_rcvctrlpipe(udev, 0);
1562     - return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
1563     - USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
1564     - state, 0x01, USB_CTRL_SET_TIMEOUT);
1565     + ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
1566     + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
1567     + buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
1568     +
1569     + *state = *buf;
1570     + kfree(buf);
1571     +
1572     + return ret;
1573     }
1574    
1575     static int ath3k_get_version(struct usb_device *udev,
1576     struct ath3k_version *version)
1577     {
1578     - int pipe = 0;
1579     + int ret, pipe = 0;
1580     + struct ath3k_version *buf;
1581     + const int size = sizeof(*buf);
1582     +
1583     + buf = kmalloc(size, GFP_KERNEL);
1584     + if (!buf)
1585     + return -ENOMEM;
1586    
1587     pipe = usb_rcvctrlpipe(udev, 0);
1588     - return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
1589     - USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
1590     - sizeof(struct ath3k_version),
1591     - USB_CTRL_SET_TIMEOUT);
1592     + ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
1593     + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
1594     + buf, size, USB_CTRL_SET_TIMEOUT);
1595     +
1596     + memcpy(version, buf, size);
1597     + kfree(buf);
1598     +
1599     + return ret;
1600     }
1601    
1602     static int ath3k_load_fwfile(struct usb_device *udev,
1603     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1604     index 7a7e5f8..d0b3d90 100644
1605     --- a/drivers/bluetooth/btusb.c
1606     +++ b/drivers/bluetooth/btusb.c
1607     @@ -57,6 +57,9 @@ static struct usb_device_id btusb_table[] = {
1608     /* Apple-specific (Broadcom) devices */
1609     { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
1610    
1611     + /* MediaTek MT76x0E */
1612     + { USB_DEVICE(0x0e8d, 0x763f) },
1613     +
1614     /* Broadcom SoftSailing reporting vendor specific */
1615     { USB_DEVICE(0x0a5c, 0x21e1) },
1616    
1617     @@ -151,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
1618     { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
1619     { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
1620     { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
1621     + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
1622     + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
1623     + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
1624     + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
1625    
1626     /* Atheros AR5BBU12 with sflash firmware */
1627     { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
1628     @@ -1092,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
1629     if (IS_ERR(skb)) {
1630     BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
1631     hdev->name, cmd->opcode, PTR_ERR(skb));
1632     - return -PTR_ERR(skb);
1633     + return PTR_ERR(skb);
1634     }
1635    
1636     /* It ensures that the returned event matches the event data read from
1637     @@ -1144,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1638     if (IS_ERR(skb)) {
1639     BT_ERR("%s sending initial HCI reset command failed (%ld)",
1640     hdev->name, PTR_ERR(skb));
1641     - return -PTR_ERR(skb);
1642     + return PTR_ERR(skb);
1643     }
1644     kfree_skb(skb);
1645    
1646     @@ -1158,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1647     if (IS_ERR(skb)) {
1648     BT_ERR("%s reading Intel fw version command failed (%ld)",
1649     hdev->name, PTR_ERR(skb));
1650     - return -PTR_ERR(skb);
1651     + return PTR_ERR(skb);
1652     }
1653    
1654     if (skb->len != sizeof(*ver)) {
1655     @@ -1216,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1656     BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
1657     hdev->name, PTR_ERR(skb));
1658     release_firmware(fw);
1659     - return -PTR_ERR(skb);
1660     + return PTR_ERR(skb);
1661     }
1662    
1663     if (skb->data[0]) {
1664     @@ -1273,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
1665     if (IS_ERR(skb)) {
1666     BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1667     hdev->name, PTR_ERR(skb));
1668     - return -PTR_ERR(skb);
1669     + return PTR_ERR(skb);
1670     }
1671     kfree_skb(skb);
1672    
1673     @@ -1289,7 +1296,7 @@ exit_mfg_disable:
1674     if (IS_ERR(skb)) {
1675     BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1676     hdev->name, PTR_ERR(skb));
1677     - return -PTR_ERR(skb);
1678     + return PTR_ERR(skb);
1679     }
1680     kfree_skb(skb);
1681    
1682     @@ -1307,7 +1314,7 @@ exit_mfg_deactivate:
1683     if (IS_ERR(skb)) {
1684     BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
1685     hdev->name, PTR_ERR(skb));
1686     - return -PTR_ERR(skb);
1687     + return PTR_ERR(skb);
1688     }
1689     kfree_skb(skb);
1690    
1691     diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
1692     index 94821ab..9576fad 100644
1693     --- a/drivers/char/agp/parisc-agp.c
1694     +++ b/drivers/char/agp/parisc-agp.c
1695     @@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
1696     off_t j, io_pg_start;
1697     int io_pg_count;
1698    
1699     - if (type != 0 || mem->type != 0) {
1700     + if (type != mem->type ||
1701     + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
1702     return -EINVAL;
1703     }
1704    
1705     @@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1706     struct _parisc_agp_info *info = &parisc_agp_info;
1707     int i, io_pg_start, io_pg_count;
1708    
1709     - if (type != 0 || mem->type != 0) {
1710     + if (type != mem->type ||
1711     + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
1712     return -EINVAL;
1713     }
1714    
1715     diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
1716     index eb7f147..43577ca 100644
1717     --- a/drivers/char/hw_random/bcm2835-rng.c
1718     +++ b/drivers/char/hw_random/bcm2835-rng.c
1719     @@ -110,4 +110,4 @@ module_platform_driver(bcm2835_rng_driver);
1720    
1721     MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
1722     MODULE_DESCRIPTION("BCM2835 Random Number Generator (RNG) driver");
1723     -MODULE_LICENSE("GPLv2");
1724     +MODULE_LICENSE("GPL v2");
1725     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1726     index 178fe7a..6485547 100644
1727     --- a/drivers/cpufreq/cpufreq.c
1728     +++ b/drivers/cpufreq/cpufreq.c
1729     @@ -1075,14 +1075,11 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1730     __func__, cpu_dev->id, cpu);
1731     }
1732    
1733     - if ((cpus == 1) && (cpufreq_driver->target))
1734     - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1735     -
1736     - pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1737     - cpufreq_cpu_put(data);
1738     -
1739     /* If cpu is last user of policy, free policy */
1740     if (cpus == 1) {
1741     + if (cpufreq_driver->target)
1742     + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1743     +
1744     lock_policy_rwsem_read(cpu);
1745     kobj = &data->kobj;
1746     cmp = &data->kobj_unregister;
1747     @@ -1103,9 +1100,13 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1748     free_cpumask_var(data->related_cpus);
1749     free_cpumask_var(data->cpus);
1750     kfree(data);
1751     - } else if (cpufreq_driver->target) {
1752     - __cpufreq_governor(data, CPUFREQ_GOV_START);
1753     - __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1754     + } else {
1755     + pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1756     + cpufreq_cpu_put(data);
1757     + if (cpufreq_driver->target) {
1758     + __cpufreq_governor(data, CPUFREQ_GOV_START);
1759     + __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1760     + }
1761     }
1762    
1763     per_cpu(cpufreq_policy_cpu, cpu) = -1;
1764     diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
1765     index fe343a0..bc580b6 100644
1766     --- a/drivers/cpuidle/governors/menu.c
1767     +++ b/drivers/cpuidle/governors/menu.c
1768     @@ -28,13 +28,6 @@
1769     #define MAX_INTERESTING 50000
1770     #define STDDEV_THRESH 400
1771    
1772     -/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
1773     -#define MAX_DEVIATION 60
1774     -
1775     -static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
1776     -static DEFINE_PER_CPU(int, hrtimer_status);
1777     -/* menu hrtimer mode */
1778     -enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
1779    
1780     /*
1781     * Concepts and ideas behind the menu governor
1782     @@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
1783     *
1784     */
1785    
1786     -/*
1787     - * The C-state residency is so long that is is worthwhile to exit
1788     - * from the shallow C-state and re-enter into a deeper C-state.
1789     - */
1790     -static unsigned int perfect_cstate_ms __read_mostly = 30;
1791     -module_param(perfect_cstate_ms, uint, 0000);
1792     -
1793     struct menu_device {
1794     int last_state_idx;
1795     int needs_update;
1796     @@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
1797     return div_u64(dividend + (divisor / 2), divisor);
1798     }
1799    
1800     -/* Cancel the hrtimer if it is not triggered yet */
1801     -void menu_hrtimer_cancel(void)
1802     -{
1803     - int cpu = smp_processor_id();
1804     - struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
1805     -
1806     - /* The timer is still not time out*/
1807     - if (per_cpu(hrtimer_status, cpu)) {
1808     - hrtimer_cancel(hrtmr);
1809     - per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
1810     - }
1811     -}
1812     -EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
1813     -
1814     -/* Call back for hrtimer is triggered */
1815     -static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
1816     -{
1817     - int cpu = smp_processor_id();
1818     - struct menu_device *data = &per_cpu(menu_devices, cpu);
1819     -
1820     - /* In general case, the expected residency is much larger than
1821     - * deepest C-state target residency, but prediction logic still
1822     - * predicts a small predicted residency, so the prediction
1823     - * history is totally broken if the timer is triggered.
1824     - * So reset the correction factor.
1825     - */
1826     - if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
1827     - data->correction_factor[data->bucket] = RESOLUTION * DECAY;
1828     -
1829     - per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
1830     -
1831     - return HRTIMER_NORESTART;
1832     -}
1833     -
1834     /*
1835     * Try detecting repeating patterns by keeping track of the last 8
1836     * intervals, and checking if the standard deviation of that set
1837     * of points is below a threshold. If it is... then use the
1838     * average of these 8 points as the estimated value.
1839     */
1840     -static u32 get_typical_interval(struct menu_device *data)
1841     +static void get_typical_interval(struct menu_device *data)
1842     {
1843     int i = 0, divisor = 0;
1844     uint64_t max = 0, avg = 0, stddev = 0;
1845     int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
1846     - unsigned int ret = 0;
1847    
1848     again:
1849    
1850     @@ -291,16 +242,13 @@ again:
1851     if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
1852     || stddev <= 20) {
1853     data->predicted_us = avg;
1854     - ret = 1;
1855     - return ret;
1856     + return;
1857    
1858     } else if ((divisor * 4) > INTERVALS * 3) {
1859     /* Exclude the max interval */
1860     thresh = max - 1;
1861     goto again;
1862     }
1863     -
1864     - return ret;
1865     }
1866    
1867     /**
1868     @@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
1869     int i;
1870     int multiplier;
1871     struct timespec t;
1872     - int repeat = 0, low_predicted = 0;
1873     - int cpu = smp_processor_id();
1874     - struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
1875    
1876     if (data->needs_update) {
1877     menu_update(drv, dev);
1878     @@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
1879     data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
1880     RESOLUTION * DECAY);
1881    
1882     - repeat = get_typical_interval(data);
1883     + get_typical_interval(data);
1884    
1885     /*
1886     * We want to default to C1 (hlt), not to busy polling
1887     @@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
1888    
1889     if (s->disabled || su->disable)
1890     continue;
1891     - if (s->target_residency > data->predicted_us) {
1892     - low_predicted = 1;
1893     + if (s->target_residency > data->predicted_us)
1894     continue;
1895     - }
1896     if (s->exit_latency > latency_req)
1897     continue;
1898     if (s->exit_latency * multiplier > data->predicted_us)
1899     @@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
1900     data->exit_us = s->exit_latency;
1901     }
1902    
1903     - /* not deepest C-state chosen for low predicted residency */
1904     - if (low_predicted) {
1905     - unsigned int timer_us = 0;
1906     - unsigned int perfect_us = 0;
1907     -
1908     - /*
1909     - * Set a timer to detect whether this sleep is much
1910     - * longer than repeat mode predicted. If the timer
1911     - * triggers, the code will evaluate whether to put
1912     - * the CPU into a deeper C-state.
1913     - * The timer is cancelled on CPU wakeup.
1914     - */
1915     - timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
1916     -
1917     - perfect_us = perfect_cstate_ms * 1000;
1918     -
1919     - if (repeat && (4 * timer_us < data->expected_us)) {
1920     - RCU_NONIDLE(hrtimer_start(hrtmr,
1921     - ns_to_ktime(1000 * timer_us),
1922     - HRTIMER_MODE_REL_PINNED));
1923     - /* In repeat case, menu hrtimer is started */
1924     - per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
1925     - } else if (perfect_us < data->expected_us) {
1926     - /*
1927     - * The next timer is long. This could be because
1928     - * we did not make a useful prediction.
1929     - * In that case, it makes sense to re-enter
1930     - * into a deeper C-state after some time.
1931     - */
1932     - RCU_NONIDLE(hrtimer_start(hrtmr,
1933     - ns_to_ktime(1000 * timer_us),
1934     - HRTIMER_MODE_REL_PINNED));
1935     - /* In general case, menu hrtimer is started */
1936     - per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
1937     - }
1938     -
1939     - }
1940     -
1941     return data->last_state_idx;
1942     }
1943    
1944     @@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
1945     struct cpuidle_device *dev)
1946     {
1947     struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
1948     - struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
1949     - hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1950     - t->function = menu_hrtimer_notify;
1951    
1952     memset(data, 0, sizeof(struct menu_device));
1953    
1954     diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1955     index 7ec82f0..4c2f465 100644
1956     --- a/drivers/dma/pl330.c
1957     +++ b/drivers/dma/pl330.c
1958     @@ -2527,6 +2527,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
1959     /* Assign cookies to all nodes */
1960     while (!list_empty(&last->node)) {
1961     desc = list_entry(last->node.next, struct dma_pl330_desc, node);
1962     + if (pch->cyclic) {
1963     + desc->txd.callback = last->txd.callback;
1964     + desc->txd.callback_param = last->txd.callback_param;
1965     + }
1966    
1967     dma_cookie_assign(&desc->txd);
1968    
1969     @@ -2710,45 +2714,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
1970     size_t period_len, enum dma_transfer_direction direction,
1971     unsigned long flags, void *context)
1972     {
1973     - struct dma_pl330_desc *desc;
1974     + struct dma_pl330_desc *desc = NULL, *first = NULL;
1975     struct dma_pl330_chan *pch = to_pchan(chan);
1976     + struct dma_pl330_dmac *pdmac = pch->dmac;
1977     + unsigned int i;
1978     dma_addr_t dst;
1979     dma_addr_t src;
1980    
1981     - desc = pl330_get_desc(pch);
1982     - if (!desc) {
1983     - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
1984     - __func__, __LINE__);
1985     + if (len % period_len != 0)
1986     return NULL;
1987     - }
1988    
1989     - switch (direction) {
1990     - case DMA_MEM_TO_DEV:
1991     - desc->rqcfg.src_inc = 1;
1992     - desc->rqcfg.dst_inc = 0;
1993     - desc->req.rqtype = MEMTODEV;
1994     - src = dma_addr;
1995     - dst = pch->fifo_addr;
1996     - break;
1997     - case DMA_DEV_TO_MEM:
1998     - desc->rqcfg.src_inc = 0;
1999     - desc->rqcfg.dst_inc = 1;
2000     - desc->req.rqtype = DEVTOMEM;
2001     - src = pch->fifo_addr;
2002     - dst = dma_addr;
2003     - break;
2004     - default:
2005     + if (!is_slave_direction(direction)) {
2006     dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
2007     __func__, __LINE__);
2008     return NULL;
2009     }
2010    
2011     - desc->rqcfg.brst_size = pch->burst_sz;
2012     - desc->rqcfg.brst_len = 1;
2013     + for (i = 0; i < len / period_len; i++) {
2014     + desc = pl330_get_desc(pch);
2015     + if (!desc) {
2016     + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
2017     + __func__, __LINE__);
2018    
2019     - pch->cyclic = true;
2020     + if (!first)
2021     + return NULL;
2022     +
2023     + spin_lock_irqsave(&pdmac->pool_lock, flags);
2024     +
2025     + while (!list_empty(&first->node)) {
2026     + desc = list_entry(first->node.next,
2027     + struct dma_pl330_desc, node);
2028     + list_move_tail(&desc->node, &pdmac->desc_pool);
2029     + }
2030     +
2031     + list_move_tail(&first->node, &pdmac->desc_pool);
2032    
2033     - fill_px(&desc->px, dst, src, period_len);
2034     + spin_unlock_irqrestore(&pdmac->pool_lock, flags);
2035     +
2036     + return NULL;
2037     + }
2038     +
2039     + switch (direction) {
2040     + case DMA_MEM_TO_DEV:
2041     + desc->rqcfg.src_inc = 1;
2042     + desc->rqcfg.dst_inc = 0;
2043     + desc->req.rqtype = MEMTODEV;
2044     + src = dma_addr;
2045     + dst = pch->fifo_addr;
2046     + break;
2047     + case DMA_DEV_TO_MEM:
2048     + desc->rqcfg.src_inc = 0;
2049     + desc->rqcfg.dst_inc = 1;
2050     + desc->req.rqtype = DEVTOMEM;
2051     + src = pch->fifo_addr;
2052     + dst = dma_addr;
2053     + break;
2054     + default:
2055     + break;
2056     + }
2057     +
2058     + desc->rqcfg.brst_size = pch->burst_sz;
2059     + desc->rqcfg.brst_len = 1;
2060     + fill_px(&desc->px, dst, src, period_len);
2061     +
2062     + if (!first)
2063     + first = desc;
2064     + else
2065     + list_add_tail(&desc->node, &first->node);
2066     +
2067     + dma_addr += period_len;
2068     + }
2069     +
2070     + if (!desc)
2071     + return NULL;
2072     +
2073     + pch->cyclic = true;
2074     + desc->txd.flags = flags;
2075    
2076     return &desc->txd;
2077     }
2078     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
2079     index fb961bb..16e674a 100644
2080     --- a/drivers/gpu/drm/i915/intel_ddi.c
2081     +++ b/drivers/gpu/drm/i915/intel_ddi.c
2082     @@ -684,7 +684,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
2083     struct intel_digital_port *intel_dig_port =
2084     enc_to_dig_port(encoder);
2085    
2086     - intel_dp->DP = intel_dig_port->port_reversal |
2087     + intel_dp->DP = intel_dig_port->saved_port_bits |
2088     DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
2089     switch (intel_dp->lane_count) {
2090     case 1:
2091     @@ -1324,7 +1324,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
2092     * enabling the port.
2093     */
2094     I915_WRITE(DDI_BUF_CTL(port),
2095     - intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
2096     + intel_dig_port->saved_port_bits |
2097     + DDI_BUF_CTL_ENABLE);
2098     } else if (type == INTEL_OUTPUT_EDP) {
2099     struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2100    
2101     @@ -1543,8 +1544,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
2102     intel_encoder->get_hw_state = intel_ddi_get_hw_state;
2103    
2104     intel_dig_port->port = port;
2105     - intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
2106     - DDI_BUF_PORT_REVERSAL;
2107     + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
2108     + (DDI_BUF_PORT_REVERSAL |
2109     + DDI_A_4_LANES);
2110     if (hdmi_connector)
2111     intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
2112     intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
2113     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2114     index e1f4e6e..eea5982 100644
2115     --- a/drivers/gpu/drm/i915/intel_display.c
2116     +++ b/drivers/gpu/drm/i915/intel_display.c
2117     @@ -4333,7 +4333,8 @@ static void vlv_update_pll(struct intel_crtc *crtc)
2118    
2119     static void i9xx_update_pll(struct intel_crtc *crtc,
2120     intel_clock_t *reduced_clock,
2121     - int num_connectors)
2122     + int num_connectors,
2123     + bool needs_tv_clock)
2124     {
2125     struct drm_device *dev = crtc->base.dev;
2126     struct drm_i915_private *dev_priv = dev->dev_private;
2127     @@ -4391,7 +4392,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
2128     if (INTEL_INFO(dev)->gen >= 4)
2129     dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2130    
2131     - if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
2132     + if (is_sdvo && needs_tv_clock)
2133     dpll |= PLL_REF_INPUT_TVCLKINBC;
2134     else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
2135     /* XXX: just matching BIOS for now */
2136     @@ -4716,7 +4717,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
2137     else
2138     i9xx_update_pll(intel_crtc,
2139     has_reduced_clock ? &reduced_clock : NULL,
2140     - num_connectors);
2141     + num_connectors,
2142     + is_sdvo && is_tv);
2143    
2144     /* Set up the display plane register */
2145     dspcntr = DISPPLANE_GAMMA_ENABLE;
2146     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2147     index 624a9e6..7cd5584 100644
2148     --- a/drivers/gpu/drm/i915/intel_drv.h
2149     +++ b/drivers/gpu/drm/i915/intel_drv.h
2150     @@ -426,7 +426,7 @@ struct intel_dp {
2151     struct intel_digital_port {
2152     struct intel_encoder base;
2153     enum port port;
2154     - u32 port_reversal;
2155     + u32 saved_port_bits;
2156     struct intel_dp dp;
2157     struct intel_hdmi hdmi;
2158     };
2159     diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2160     index f4dcfdd..aad18e6 100644
2161     --- a/drivers/gpu/drm/radeon/radeon.h
2162     +++ b/drivers/gpu/drm/radeon/radeon.h
2163     @@ -1145,6 +1145,8 @@ struct radeon_uvd {
2164     struct radeon_bo *vcpu_bo;
2165     void *cpu_addr;
2166     uint64_t gpu_addr;
2167     + void *saved_bo;
2168     + unsigned fw_size;
2169     atomic_t handles[RADEON_MAX_UVD_HANDLES];
2170     struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
2171     struct delayed_work idle_work;
2172     @@ -1684,7 +1686,6 @@ struct radeon_device {
2173     const struct firmware *rlc_fw; /* r6/700 RLC firmware */
2174     const struct firmware *mc_fw; /* NI MC firmware */
2175     const struct firmware *ce_fw; /* SI CE firmware */
2176     - const struct firmware *uvd_fw; /* UVD firmware */
2177     struct r600_blit r600_blit;
2178     struct r600_vram_scratch vram_scratch;
2179     int msi_enabled; /* msi enabled */
2180     diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
2181     index a2802b47..de36c47 100644
2182     --- a/drivers/gpu/drm/radeon/radeon_asic.c
2183     +++ b/drivers/gpu/drm/radeon/radeon_asic.c
2184     @@ -986,8 +986,8 @@ static struct radeon_asic r600_asic = {
2185     .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2186     .dma = &r600_copy_dma,
2187     .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2188     - .copy = &r600_copy_dma,
2189     - .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2190     + .copy = &r600_copy_blit,
2191     + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2192     },
2193     .surface = {
2194     .set_reg = r600_set_surface_reg,
2195     @@ -1074,8 +1074,8 @@ static struct radeon_asic rs780_asic = {
2196     .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2197     .dma = &r600_copy_dma,
2198     .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2199     - .copy = &r600_copy_dma,
2200     - .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
2201     + .copy = &r600_copy_blit,
2202     + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2203     },
2204     .surface = {
2205     .set_reg = r600_set_surface_reg,
2206     diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
2207     index ddb8f8e..7ddb0ef 100644
2208     --- a/drivers/gpu/drm/radeon/radeon_fence.c
2209     +++ b/drivers/gpu/drm/radeon/radeon_fence.c
2210     @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
2211    
2212     } else {
2213     /* put fence directly behind firmware */
2214     - index = ALIGN(rdev->uvd_fw->size, 8);
2215     + index = ALIGN(rdev->uvd.fw_size, 8);
2216     rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
2217     rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
2218     }
2219     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
2220     index cad735d..1b3a91b 100644
2221     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
2222     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
2223     @@ -55,6 +55,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
2224     int radeon_uvd_init(struct radeon_device *rdev)
2225     {
2226     struct platform_device *pdev;
2227     + const struct firmware *fw;
2228     unsigned long bo_size;
2229     const char *fw_name;
2230     int i, r;
2231     @@ -104,7 +105,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
2232     return -EINVAL;
2233     }
2234    
2235     - r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
2236     + r = request_firmware(&fw, fw_name, &pdev->dev);
2237     if (r) {
2238     dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
2239     fw_name);
2240     @@ -114,7 +115,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
2241    
2242     platform_device_unregister(pdev);
2243    
2244     - bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
2245     + bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
2246     RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
2247     r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
2248     RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
2249     @@ -123,16 +124,35 @@ int radeon_uvd_init(struct radeon_device *rdev)
2250     return r;
2251     }
2252    
2253     - r = radeon_uvd_resume(rdev);
2254     - if (r)
2255     + r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
2256     + if (r) {
2257     + radeon_bo_unref(&rdev->uvd.vcpu_bo);
2258     + dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
2259     return r;
2260     + }
2261    
2262     - memset(rdev->uvd.cpu_addr, 0, bo_size);
2263     - memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
2264     + r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
2265     + &rdev->uvd.gpu_addr);
2266     + if (r) {
2267     + radeon_bo_unreserve(rdev->uvd.vcpu_bo);
2268     + radeon_bo_unref(&rdev->uvd.vcpu_bo);
2269     + dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
2270     + return r;
2271     + }
2272    
2273     - r = radeon_uvd_suspend(rdev);
2274     - if (r)
2275     + r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
2276     + if (r) {
2277     + dev_err(rdev->dev, "(%d) UVD map failed\n", r);
2278     return r;
2279     + }
2280     +
2281     + radeon_bo_unreserve(rdev->uvd.vcpu_bo);
2282     +
2283     + rdev->uvd.fw_size = fw->size;
2284     + memset(rdev->uvd.cpu_addr, 0, bo_size);
2285     + memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
2286     +
2287     + release_firmware(fw);
2288    
2289     for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
2290     atomic_set(&rdev->uvd.handles[i], 0);
2291     @@ -144,71 +164,47 @@ int radeon_uvd_init(struct radeon_device *rdev)
2292    
2293     void radeon_uvd_fini(struct radeon_device *rdev)
2294     {
2295     - radeon_uvd_suspend(rdev);
2296     - radeon_bo_unref(&rdev->uvd.vcpu_bo);
2297     -}
2298     -
2299     -int radeon_uvd_suspend(struct radeon_device *rdev)
2300     -{
2301     int r;
2302    
2303     if (rdev->uvd.vcpu_bo == NULL)
2304     - return 0;
2305     + return;
2306    
2307     r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
2308     if (!r) {
2309     radeon_bo_kunmap(rdev->uvd.vcpu_bo);
2310     radeon_bo_unpin(rdev->uvd.vcpu_bo);
2311     - rdev->uvd.cpu_addr = NULL;
2312     - if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
2313     - radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
2314     - }
2315     radeon_bo_unreserve(rdev->uvd.vcpu_bo);
2316     -
2317     - if (rdev->uvd.cpu_addr) {
2318     - radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
2319     - } else {
2320     - rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
2321     - }
2322     }
2323     - return r;
2324     +
2325     + radeon_bo_unref(&rdev->uvd.vcpu_bo);
2326     }
2327    
2328     -int radeon_uvd_resume(struct radeon_device *rdev)
2329     +int radeon_uvd_suspend(struct radeon_device *rdev)
2330     {
2331     - int r;
2332     + unsigned size;
2333    
2334     if (rdev->uvd.vcpu_bo == NULL)
2335     - return -EINVAL;
2336     + return 0;
2337    
2338     - r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
2339     - if (r) {
2340     - radeon_bo_unref(&rdev->uvd.vcpu_bo);
2341     - dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
2342     - return r;
2343     - }
2344     + size = radeon_bo_size(rdev->uvd.vcpu_bo);
2345     + rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
2346     + memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
2347    
2348     - /* Have been pin in cpu unmap unpin */
2349     - radeon_bo_kunmap(rdev->uvd.vcpu_bo);
2350     - radeon_bo_unpin(rdev->uvd.vcpu_bo);
2351     + return 0;
2352     +}
2353    
2354     - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
2355     - &rdev->uvd.gpu_addr);
2356     - if (r) {
2357     - radeon_bo_unreserve(rdev->uvd.vcpu_bo);
2358     - radeon_bo_unref(&rdev->uvd.vcpu_bo);
2359     - dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
2360     - return r;
2361     - }
2362     +int radeon_uvd_resume(struct radeon_device *rdev)
2363     +{
2364     + if (rdev->uvd.vcpu_bo == NULL)
2365     + return -EINVAL;
2366    
2367     - r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
2368     - if (r) {
2369     - dev_err(rdev->dev, "(%d) UVD map failed\n", r);
2370     - return r;
2371     + if (rdev->uvd.saved_bo != NULL) {
2372     + unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
2373     + memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
2374     + kfree(rdev->uvd.saved_bo);
2375     + rdev->uvd.saved_bo = NULL;
2376     }
2377    
2378     - radeon_bo_unreserve(rdev->uvd.vcpu_bo);
2379     -
2380     return 0;
2381     }
2382    
2383     diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
2384     index 4a62ad2..30ea14e 100644
2385     --- a/drivers/gpu/drm/radeon/rv770.c
2386     +++ b/drivers/gpu/drm/radeon/rv770.c
2387     @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev)
2388    
2389     /* programm the VCPU memory controller bits 0-27 */
2390     addr = rdev->uvd.gpu_addr >> 3;
2391     - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
2392     + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
2393     WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
2394     WREG32(UVD_VCPU_CACHE_SIZE0, size);
2395    
2396     diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
2397     index 328fb03..a41b5f3 100644
2398     --- a/drivers/hwmon/max6697.c
2399     +++ b/drivers/hwmon/max6697.c
2400     @@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client)
2401     if (ret < 0)
2402     return ret;
2403     ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
2404     - pdata->ideality_mask >> 1);
2405     + pdata->ideality_value);
2406     if (ret < 0)
2407     return ret;
2408     ret = i2c_smbus_write_byte_data(client,
2409     MAX6581_REG_IDEALITY_SELECT,
2410     - pdata->ideality_value);
2411     + pdata->ideality_mask >> 1);
2412     if (ret < 0)
2413     return ret;
2414     }
2415     diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
2416     index 0b9a79b..82fc86a 100644
2417     --- a/drivers/macintosh/windfarm_rm31.c
2418     +++ b/drivers/macintosh/windfarm_rm31.c
2419     @@ -439,15 +439,15 @@ static void backside_setup_pid(void)
2420    
2421     /* Slots fan */
2422     static const struct wf_pid_param slots_param = {
2423     - .interval = 5,
2424     - .history_len = 2,
2425     - .gd = 30 << 20,
2426     - .gp = 5 << 20,
2427     - .gr = 0,
2428     - .itarget = 40 << 16,
2429     - .additive = 1,
2430     - .min = 300,
2431     - .max = 4000,
2432     + .interval = 1,
2433     + .history_len = 20,
2434     + .gd = 0,
2435     + .gp = 0,
2436     + .gr = 0x00100000,
2437     + .itarget = 3200000,
2438     + .additive = 0,
2439     + .min = 20,
2440     + .max = 100,
2441     };
2442    
2443     static void slots_fan_tick(void)
2444     diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
2445     index a746ba2..a956053 100644
2446     --- a/drivers/net/arcnet/arcnet.c
2447     +++ b/drivers/net/arcnet/arcnet.c
2448     @@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
2449    
2450     soft = &pkt.soft.rfc1201;
2451    
2452     - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
2453     + lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
2454     if (pkt.hard.offset[0]) {
2455     ofs = pkt.hard.offset[0];
2456     length = 256 - ofs;
2457     diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
2458     index b2bf324..0f05565 100644
2459     --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
2460     +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
2461     @@ -520,6 +520,9 @@ struct atl1c_adapter {
2462     struct net_device *netdev;
2463     struct pci_dev *pdev;
2464     struct napi_struct napi;
2465     + struct page *rx_page;
2466     + unsigned int rx_page_offset;
2467     + unsigned int rx_frag_size;
2468     struct atl1c_hw hw;
2469     struct atl1c_hw_stats hw_stats;
2470     struct mii_if_info mii; /* MII interface info */
2471     diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2472     index 0ba9007..11cdf1d 100644
2473     --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2474     +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
2475     @@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
2476     static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
2477     struct net_device *dev)
2478     {
2479     + unsigned int head_size;
2480     int mtu = dev->mtu;
2481    
2482     adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
2483     roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
2484     +
2485     + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
2486     + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2487     + adapter->rx_frag_size = roundup_pow_of_two(head_size);
2488     }
2489    
2490     static netdev_features_t atl1c_fix_features(struct net_device *netdev,
2491     @@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
2492     kfree(adapter->tpd_ring[0].buffer_info);
2493     adapter->tpd_ring[0].buffer_info = NULL;
2494     }
2495     + if (adapter->rx_page) {
2496     + put_page(adapter->rx_page);
2497     + adapter->rx_page = NULL;
2498     + }
2499     }
2500    
2501     /**
2502     @@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
2503     skb_checksum_none_assert(skb);
2504     }
2505    
2506     +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
2507     +{
2508     + struct sk_buff *skb;
2509     + struct page *page;
2510     +
2511     + if (adapter->rx_frag_size > PAGE_SIZE)
2512     + return netdev_alloc_skb(adapter->netdev,
2513     + adapter->rx_buffer_len);
2514     +
2515     + page = adapter->rx_page;
2516     + if (!page) {
2517     + adapter->rx_page = page = alloc_page(GFP_ATOMIC);
2518     + if (unlikely(!page))
2519     + return NULL;
2520     + adapter->rx_page_offset = 0;
2521     + }
2522     +
2523     + skb = build_skb(page_address(page) + adapter->rx_page_offset,
2524     + adapter->rx_frag_size);
2525     + if (likely(skb)) {
2526     + adapter->rx_page_offset += adapter->rx_frag_size;
2527     + if (adapter->rx_page_offset >= PAGE_SIZE)
2528     + adapter->rx_page = NULL;
2529     + else
2530     + get_page(page);
2531     + }
2532     + return skb;
2533     +}
2534     +
2535     static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
2536     {
2537     struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
2538     @@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
2539     while (next_info->flags & ATL1C_BUFFER_FREE) {
2540     rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
2541    
2542     - skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
2543     + skb = atl1c_alloc_skb(adapter);
2544     if (unlikely(!skb)) {
2545     if (netif_msg_rx_err(adapter))
2546     dev_warn(&pdev->dev, "alloc rx buffer failed\n");
2547     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
2548     index ac78077..7a77f37 100644
2549     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
2550     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
2551     @@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
2552    
2553     /* Enable arbiter */
2554     reg &= ~IXGBE_DPMCS_ARBDIS;
2555     - /* Enable DFP and Recycle mode */
2556     - reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
2557     reg |= IXGBE_DPMCS_TSOEF;
2558     +
2559     /* Configure Max TSO packet size 34KB including payload and headers */
2560     reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
2561    
2562     diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
2563     index 2c97901..593177d 100644
2564     --- a/drivers/net/ethernet/mellanox/mlx4/fw.c
2565     +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
2566     @@ -840,16 +840,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
2567     MLX4_CMD_NATIVE);
2568    
2569     if (!err && dev->caps.function != slave) {
2570     - /* if config MAC in DB use it */
2571     - if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
2572     - def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
2573     - else {
2574     - /* set slave default_mac address */
2575     - MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
2576     - def_mac += slave << 8;
2577     - priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
2578     - }
2579     -
2580     + def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
2581     MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
2582    
2583     /* get port type - currently only eth is enabled */
2584     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
2585     index 8a43499..1b195fc 100644
2586     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
2587     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
2588     @@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
2589    
2590     dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
2591    
2592     - if (!enable_64b_cqe_eqe) {
2593     + if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
2594     if (dev_cap->flags &
2595     (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
2596     mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
2597     diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
2598     index 0352345..887aebe 100644
2599     --- a/drivers/net/ethernet/realtek/8139cp.c
2600     +++ b/drivers/net/ethernet/realtek/8139cp.c
2601     @@ -478,7 +478,7 @@ rx_status_loop:
2602    
2603     while (1) {
2604     u32 status, len;
2605     - dma_addr_t mapping;
2606     + dma_addr_t mapping, new_mapping;
2607     struct sk_buff *skb, *new_skb;
2608     struct cp_desc *desc;
2609     const unsigned buflen = cp->rx_buf_sz;
2610     @@ -520,6 +520,13 @@ rx_status_loop:
2611     goto rx_next;
2612     }
2613    
2614     + new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
2615     + PCI_DMA_FROMDEVICE);
2616     + if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
2617     + dev->stats.rx_dropped++;
2618     + goto rx_next;
2619     + }
2620     +
2621     dma_unmap_single(&cp->pdev->dev, mapping,
2622     buflen, PCI_DMA_FROMDEVICE);
2623    
2624     @@ -531,12 +538,11 @@ rx_status_loop:
2625    
2626     skb_put(skb, len);
2627    
2628     - mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
2629     - PCI_DMA_FROMDEVICE);
2630     cp->rx_skb[rx_tail] = new_skb;
2631    
2632     cp_rx_skb(cp, skb, desc);
2633     rx++;
2634     + mapping = new_mapping;
2635    
2636     rx_next:
2637     cp->rx_ring[rx_tail].opts2 = 0;
2638     @@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
2639     TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
2640     }
2641    
2642     +static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
2643     + int first, int entry_last)
2644     +{
2645     + int frag, index;
2646     + struct cp_desc *txd;
2647     + skb_frag_t *this_frag;
2648     + for (frag = 0; frag+first < entry_last; frag++) {
2649     + index = first+frag;
2650     + cp->tx_skb[index] = NULL;
2651     + txd = &cp->tx_ring[index];
2652     + this_frag = &skb_shinfo(skb)->frags[frag];
2653     + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
2654     + skb_frag_size(this_frag), PCI_DMA_TODEVICE);
2655     + }
2656     +}
2657     +
2658     static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
2659     struct net_device *dev)
2660     {
2661     @@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
2662    
2663     len = skb->len;
2664     mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
2665     + if (dma_mapping_error(&cp->pdev->dev, mapping))
2666     + goto out_dma_error;
2667     +
2668     txd->opts2 = opts2;
2669     txd->addr = cpu_to_le64(mapping);
2670     wmb();
2671     @@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
2672     first_len = skb_headlen(skb);
2673     first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
2674     first_len, PCI_DMA_TODEVICE);
2675     + if (dma_mapping_error(&cp->pdev->dev, first_mapping))
2676     + goto out_dma_error;
2677     +
2678     cp->tx_skb[entry] = skb;
2679     entry = NEXT_TX(entry);
2680    
2681     @@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
2682     mapping = dma_map_single(&cp->pdev->dev,
2683     skb_frag_address(this_frag),
2684     len, PCI_DMA_TODEVICE);
2685     + if (dma_mapping_error(&cp->pdev->dev, mapping)) {
2686     + unwind_tx_frag_mapping(cp, skb, first_entry, entry);
2687     + goto out_dma_error;
2688     + }
2689     +
2690     eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
2691    
2692     ctrl = eor | len | DescOwn;
2693     @@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
2694     if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
2695     netif_stop_queue(dev);
2696    
2697     +out_unlock:
2698     spin_unlock_irqrestore(&cp->lock, intr_flags);
2699    
2700     cpw8(TxPoll, NormalTxPoll);
2701    
2702     return NETDEV_TX_OK;
2703     +out_dma_error:
2704     + kfree_skb(skb);
2705     + cp->dev->stats.tx_dropped++;
2706     + goto out_unlock;
2707     }
2708    
2709     /* Set or clear the multicast filter for this adaptor.
2710     @@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
2711    
2712     mapping = dma_map_single(&cp->pdev->dev, skb->data,
2713     cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2714     + if (dma_mapping_error(&cp->pdev->dev, mapping)) {
2715     + kfree_skb(skb);
2716     + goto err_out;
2717     + }
2718     cp->rx_skb[i] = skb;
2719    
2720     cp->rx_ring[i].opts2 = 0;
2721     diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
2722     index 2397f0e..2738b5f 100644
2723     --- a/drivers/net/ethernet/sfc/filter.c
2724     +++ b/drivers/net/ethernet/sfc/filter.c
2725     @@ -1196,7 +1196,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
2726     EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
2727     ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
2728    
2729     - efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
2730     + efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
2731     + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
2732     + rxq_index);
2733     rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
2734     ip->daddr, ports[1], ip->saddr, ports[0]);
2735     if (rc)
2736     diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2737     index bd8758f..cea1f3d 100644
2738     --- a/drivers/net/usb/ax88179_178a.c
2739     +++ b/drivers/net/usb/ax88179_178a.c
2740     @@ -1029,10 +1029,10 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
2741     dev->mii.supports_gmii = 1;
2742    
2743     dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2744     - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
2745     + NETIF_F_RXCSUM;
2746    
2747     dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2748     - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
2749     + NETIF_F_RXCSUM;
2750    
2751     /* Enable checksum offload */
2752     *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
2753     @@ -1173,7 +1173,6 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
2754     if (((skb->len + 8) % frame_size) == 0)
2755     tx_hdr2 |= 0x80008000; /* Enable padding */
2756    
2757     - skb_linearize(skb);
2758     headroom = skb_headroom(skb);
2759     tailroom = skb_tailroom(skb);
2760    
2761     @@ -1317,10 +1316,10 @@ static int ax88179_reset(struct usbnet *dev)
2762     1, 1, tmp);
2763    
2764     dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2765     - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
2766     + NETIF_F_RXCSUM;
2767    
2768     dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2769     - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
2770     + NETIF_F_RXCSUM;
2771    
2772     /* Enable checksum offload */
2773     *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
2774     diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2775     index 7540974..66ebbac 100644
2776     --- a/drivers/net/usb/smsc75xx.c
2777     +++ b/drivers/net/usb/smsc75xx.c
2778     @@ -45,7 +45,6 @@
2779     #define EEPROM_MAC_OFFSET (0x01)
2780     #define DEFAULT_TX_CSUM_ENABLE (true)
2781     #define DEFAULT_RX_CSUM_ENABLE (true)
2782     -#define DEFAULT_TSO_ENABLE (true)
2783     #define SMSC75XX_INTERNAL_PHY_ID (1)
2784     #define SMSC75XX_TX_OVERHEAD (8)
2785     #define MAX_RX_FIFO_SIZE (20 * 1024)
2786     @@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2787    
2788     INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
2789    
2790     - if (DEFAULT_TX_CSUM_ENABLE) {
2791     + if (DEFAULT_TX_CSUM_ENABLE)
2792     dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2793     - if (DEFAULT_TSO_ENABLE)
2794     - dev->net->features |= NETIF_F_SG |
2795     - NETIF_F_TSO | NETIF_F_TSO6;
2796     - }
2797     +
2798     if (DEFAULT_RX_CSUM_ENABLE)
2799     dev->net->features |= NETIF_F_RXCSUM;
2800    
2801     dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2802     - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
2803     + NETIF_F_RXCSUM;
2804    
2805     ret = smsc75xx_wait_ready(dev, 0);
2806     if (ret < 0) {
2807     @@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
2808     {
2809     u32 tx_cmd_a, tx_cmd_b;
2810    
2811     - skb_linearize(skb);
2812     -
2813     if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
2814     struct sk_buff *skb2 =
2815     skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
2816     diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
2817     index f5dda84..75a6376 100644
2818     --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
2819     +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
2820     @@ -1289,7 +1289,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
2821    
2822     usb_set_intfdata(interface, NULL);
2823    
2824     - if (!unplugged && (hif_dev->flags & HIF_USB_START))
2825     + /* If firmware was loaded we should drop it
2826     + * go back to first stage bootloader. */
2827     + if (!unplugged && (hif_dev->flags & HIF_USB_READY))
2828     ath9k_hif_usb_reboot(udev);
2829    
2830     kfree(hif_dev);
2831     diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
2832     index a47f5e0..3b202ff 100644
2833     --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
2834     +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
2835     @@ -846,6 +846,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
2836     if (error != 0)
2837     goto err_rx;
2838    
2839     + ath9k_hw_disable(priv->ah);
2840     #ifdef CONFIG_MAC80211_LEDS
2841     /* must be initialized before ieee80211_register_hw */
2842     priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
2843     diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
2844     index 727b1f5..d57e5be 100644
2845     --- a/drivers/net/wireless/ath/wil6210/debugfs.c
2846     +++ b/drivers/net/wireless/ath/wil6210/debugfs.c
2847     @@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
2848     le16_to_cpu(hdr.type), hdr.flags);
2849     if (len <= MAX_MBOXITEM_SIZE) {
2850     int n = 0;
2851     - unsigned char printbuf[16 * 3 + 2];
2852     + char printbuf[16 * 3 + 2];
2853     unsigned char databuf[MAX_MBOXITEM_SIZE];
2854     void __iomem *src = wmi_buffer(wil, d.addr) +
2855     sizeof(struct wil6210_mbox_hdr);
2856     @@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
2857     seq_printf(s, " SKB = %p\n", skb);
2858    
2859     if (skb) {
2860     - unsigned char printbuf[16 * 3 + 2];
2861     + char printbuf[16 * 3 + 2];
2862     int i = 0;
2863     int len = skb_headlen(skb);
2864     void *p = skb->data;
2865     diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
2866     index 74d7572..a8afc7b 100644
2867     --- a/drivers/net/wireless/iwlwifi/dvm/main.c
2868     +++ b/drivers/net/wireless/iwlwifi/dvm/main.c
2869     @@ -758,7 +758,7 @@ int iwl_alive_start(struct iwl_priv *priv)
2870     BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2871     if (ret)
2872     return ret;
2873     - } else {
2874     + } else if (priv->cfg->bt_params) {
2875     /*
2876     * default is 2-wire BT coexexistence support
2877     */
2878     diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2879     index b60d141..365095a 100644
2880     --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2881     +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2882     @@ -69,7 +69,6 @@
2883     /* Scan Commands, Responses, Notifications */
2884    
2885     /* Masks for iwl_scan_channel.type flags */
2886     -#define SCAN_CHANNEL_TYPE_PASSIVE 0
2887     #define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
2888     #define SCAN_CHANNEL_NARROW_BAND BIT(22)
2889    
2890     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2891     index a5eb8c8..b7e95b0 100644
2892     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2893     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2894     @@ -987,6 +987,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2895     mutex_lock(&mvm->mutex);
2896     if (old_state == IEEE80211_STA_NOTEXIST &&
2897     new_state == IEEE80211_STA_NONE) {
2898     + /*
2899     + * Firmware bug - it'll crash if the beacon interval is less
2900     + * than 16. We can't avoid connecting at all, so refuse the
2901     + * station state change, this will cause mac80211 to abandon
2902     + * attempts to connect to this AP, and eventually wpa_s will
2903     + * blacklist the AP...
2904     + */
2905     + if (vif->type == NL80211_IFTYPE_STATION &&
2906     + vif->bss_conf.beacon_int < 16) {
2907     + IWL_ERR(mvm,
2908     + "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2909     + sta->addr, vif->bss_conf.beacon_int);
2910     + ret = -EINVAL;
2911     + goto out_unlock;
2912     + }
2913     ret = iwl_mvm_add_sta(mvm, vif, sta);
2914     } else if (old_state == IEEE80211_STA_NONE &&
2915     new_state == IEEE80211_STA_AUTH) {
2916     @@ -1015,6 +1030,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2917     } else {
2918     ret = -EIO;
2919     }
2920     + out_unlock:
2921     mutex_unlock(&mvm->mutex);
2922    
2923     return ret;
2924     diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
2925     index 2476e43..8e1f6c0 100644
2926     --- a/drivers/net/wireless/iwlwifi/mvm/scan.c
2927     +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
2928     @@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
2929     {
2930     int fw_idx, req_idx;
2931    
2932     - fw_idx = 0;
2933     - for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
2934     + for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
2935     + req_idx--, fw_idx++) {
2936     cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
2937     cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
2938     memcpy(cmd->direct_scan[fw_idx].ssid,
2939     @@ -176,19 +176,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
2940     struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
2941     (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
2942     int i;
2943     - __le32 chan_type_value;
2944     -
2945     - if (req->n_ssids > 0)
2946     - chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
2947     - else
2948     - chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
2949    
2950     for (i = 0; i < cmd->channel_count; i++) {
2951     chan->channel = cpu_to_le16(req->channels[i]->hw_value);
2952     + chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
2953     if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
2954     - chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
2955     - else
2956     - chan->type = chan_type_value;
2957     + chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
2958     chan->active_dwell = cpu_to_le16(active_dwell);
2959     chan->passive_dwell = cpu_to_le16(passive_dwell);
2960     chan->iteration_count = cpu_to_le16(1);
2961     diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
2962     index 5c664ed..736b50b 100644
2963     --- a/drivers/net/wireless/iwlwifi/mvm/sta.c
2964     +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
2965     @@ -621,8 +621,12 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2966     cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2967     cmd.sta_id = mvm_sta->sta_id;
2968     cmd.add_modify = STA_MODE_MODIFY;
2969     - cmd.add_immediate_ba_tid = (u8) tid;
2970     - cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2971     + if (start) {
2972     + cmd.add_immediate_ba_tid = (u8) tid;
2973     + cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2974     + } else {
2975     + cmd.remove_immediate_ba_tid = (u8) tid;
2976     + }
2977     cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2978     STA_MODIFY_REMOVE_BA_TID;
2979    
2980     @@ -894,6 +898,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2981     struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
2982     struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2983     u16 txq_id;
2984     + enum iwl_mvm_agg_state old_state;
2985    
2986     /*
2987     * First set the agg state to OFF to avoid calling
2988     @@ -903,13 +908,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2989     txq_id = tid_data->txq_id;
2990     IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2991     mvmsta->sta_id, tid, txq_id, tid_data->state);
2992     + old_state = tid_data->state;
2993     tid_data->state = IWL_AGG_OFF;
2994     spin_unlock_bh(&mvmsta->lock);
2995    
2996     - if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
2997     - IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2998     + if (old_state >= IWL_AGG_ON) {
2999     + if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
3000     + IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3001     +
3002     + iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
3003     + }
3004    
3005     - iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
3006     mvm->queue_to_mac80211[tid_data->txq_id] =
3007     IWL_INVALID_MAC80211_QUEUE;
3008    
3009     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
3010     index 8cb53ec..5283b55 100644
3011     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
3012     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
3013     @@ -129,6 +129,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3014     {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
3015     {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
3016     {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
3017     + {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
3018    
3019     {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
3020     {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
3021     diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
3022     index e42b266..e7f7cdf 100644
3023     --- a/drivers/net/wireless/mwifiex/cfg80211.c
3024     +++ b/drivers/net/wireless/mwifiex/cfg80211.c
3025     @@ -1668,9 +1668,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
3026     struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
3027     int ret;
3028    
3029     - if (priv->bss_mode != NL80211_IFTYPE_STATION) {
3030     + if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
3031     wiphy_err(wiphy,
3032     - "%s: reject infra assoc request in non-STA mode\n",
3033     + "%s: reject infra assoc request in non-STA role\n",
3034     dev->name);
3035     return -EINVAL;
3036     }
3037     diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
3038     index 988552d..5178c46 100644
3039     --- a/drivers/net/wireless/mwifiex/cfp.c
3040     +++ b/drivers/net/wireless/mwifiex/cfp.c
3041     @@ -415,7 +415,8 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
3042     u32 k = 0;
3043     struct mwifiex_adapter *adapter = priv->adapter;
3044    
3045     - if (priv->bss_mode == NL80211_IFTYPE_STATION) {
3046     + if (priv->bss_mode == NL80211_IFTYPE_STATION ||
3047     + priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
3048     switch (adapter->config_bands) {
3049     case BAND_B:
3050     dev_dbg(adapter->dev, "info: infra band=%d "
3051     diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
3052     index 6bcb66e..96bda6c 100644
3053     --- a/drivers/net/wireless/mwifiex/join.c
3054     +++ b/drivers/net/wireless/mwifiex/join.c
3055     @@ -1290,8 +1290,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
3056     {
3057     u8 current_bssid[ETH_ALEN];
3058    
3059     - /* Return error if the adapter or table entry is not marked as infra */
3060     - if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
3061     + /* Return error if the adapter is not STA role or table entry
3062     + * is not marked as infra.
3063     + */
3064     + if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
3065     (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
3066     return -1;
3067    
3068     diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
3069     index 363ba31..139c958 100644
3070     --- a/drivers/net/wireless/mwifiex/sdio.c
3071     +++ b/drivers/net/wireless/mwifiex/sdio.c
3072     @@ -1441,8 +1441,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
3073     /* Allocate buffer and copy payload */
3074     blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
3075     buf_block_len = (pkt_len + blk_size - 1) / blk_size;
3076     - *(u16 *) &payload[0] = (u16) pkt_len;
3077     - *(u16 *) &payload[2] = type;
3078     + *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
3079     + *(__le16 *)&payload[2] = cpu_to_le16(type);
3080    
3081     /*
3082     * This is SDIO specific header
3083     diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
3084     index 2c12311..d955741 100644
3085     --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
3086     +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
3087     @@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
3088     spin_unlock_irqrestore(&queue->index_lock, irqflags);
3089     }
3090    
3091     -void rt2x00queue_pause_queue(struct data_queue *queue)
3092     +void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
3093     {
3094     - if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
3095     - !test_bit(QUEUE_STARTED, &queue->flags) ||
3096     - test_and_set_bit(QUEUE_PAUSED, &queue->flags))
3097     - return;
3098     -
3099     switch (queue->qid) {
3100     case QID_AC_VO:
3101     case QID_AC_VI:
3102     @@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
3103     break;
3104     }
3105     }
3106     +void rt2x00queue_pause_queue(struct data_queue *queue)
3107     +{
3108     + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
3109     + !test_bit(QUEUE_STARTED, &queue->flags) ||
3110     + test_and_set_bit(QUEUE_PAUSED, &queue->flags))
3111     + return;
3112     +
3113     + rt2x00queue_pause_queue_nocheck(queue);
3114     +}
3115     EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
3116    
3117     void rt2x00queue_unpause_queue(struct data_queue *queue)
3118     @@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
3119     return;
3120     }
3121    
3122     - rt2x00queue_pause_queue(queue);
3123     + rt2x00queue_pause_queue_nocheck(queue);
3124    
3125     queue->rt2x00dev->ops->lib->stop_queue(queue);
3126    
3127     diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
3128     index e79e006..9ee04b4 100644
3129     --- a/drivers/parisc/iosapic.c
3130     +++ b/drivers/parisc/iosapic.c
3131     @@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
3132     return pcidev->irq;
3133     }
3134    
3135     -static struct iosapic_info *first_isi = NULL;
3136     +static struct iosapic_info *iosapic_list;
3137    
3138     #ifdef CONFIG_64BIT
3139     -int iosapic_serial_irq(int num)
3140     +int iosapic_serial_irq(struct parisc_device *dev)
3141     {
3142     - struct iosapic_info *isi = first_isi;
3143     - struct irt_entry *irte = NULL; /* only used if PAT PDC */
3144     + struct iosapic_info *isi;
3145     + struct irt_entry *irte;
3146     struct vector_info *vi;
3147     - int isi_line; /* line used by device */
3148     + int cnt;
3149     + int intin;
3150     +
3151     + intin = (dev->mod_info >> 24) & 15;
3152    
3153     /* lookup IRT entry for isi/slot/pin set */
3154     - irte = &irt_cell[num];
3155     + for (cnt = 0; cnt < irt_num_entry; cnt++) {
3156     + irte = &irt_cell[cnt];
3157     + if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
3158     + irte->dest_iosapic_intin == intin)
3159     + break;
3160     + }
3161     + if (cnt >= irt_num_entry)
3162     + return 0; /* no irq found, force polling */
3163    
3164     DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
3165     irte,
3166     @@ -834,11 +844,17 @@ int iosapic_serial_irq(int num)
3167     irte->src_seg_id,
3168     irte->dest_iosapic_intin,
3169     (u32) irte->dest_iosapic_addr);
3170     - isi_line = irte->dest_iosapic_intin;
3171     +
3172     + /* search for iosapic */
3173     + for (isi = iosapic_list; isi; isi = isi->isi_next)
3174     + if (isi->isi_hpa == dev->mod0)
3175     + break;
3176     + if (!isi)
3177     + return 0; /* no iosapic found, force polling */
3178    
3179     /* get vector info for this input line */
3180     - vi = isi->isi_vector + isi_line;
3181     - DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi);
3182     + vi = isi->isi_vector + intin;
3183     + DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi);
3184    
3185     /* If this IRQ line has already been setup, skip it */
3186     if (vi->irte)
3187     @@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa)
3188     vip->irqline = (unsigned char) cnt;
3189     vip->iosapic = isi;
3190     }
3191     - if (!first_isi)
3192     - first_isi = isi;
3193     + isi->isi_next = iosapic_list;
3194     + iosapic_list = isi;
3195     return isi;
3196     }
3197    
3198     diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
3199     index aac7a40..0e0d0f7 100644
3200     --- a/drivers/pci/hotplug/pciehp_pci.c
3201     +++ b/drivers/pci/hotplug/pciehp_pci.c
3202     @@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot)
3203     if (ret)
3204     presence = 0;
3205    
3206     - list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
3207     + /*
3208     + * Stopping an SR-IOV PF device removes all the associated VFs,
3209     + * which will update the bus->devices list and confuse the
3210     + * iterator. Therefore, iterate in reverse so we remove the VFs
3211     + * first, then the PF. We do the same in pci_stop_bus_device().
3212     + */
3213     + list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
3214     + bus_list) {
3215     pci_dev_get(dev);
3216     if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
3217     pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
3218     diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
3219     index d254e23..64a7de2 100644
3220     --- a/drivers/pci/setup-bus.c
3221     +++ b/drivers/pci/setup-bus.c
3222     @@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head,
3223     }
3224     }
3225    
3226     +static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
3227     +{
3228     + struct pci_dev_resource *fail_res;
3229     + unsigned long mask = 0;
3230     +
3231     + /* check failed type */
3232     + list_for_each_entry(fail_res, fail_head, list)
3233     + mask |= fail_res->flags;
3234     +
3235     + /*
3236     + * one pref failed resource will set IORESOURCE_MEM,
3237     + * as we can allocate pref in non-pref range.
3238     + * Will release all assigned non-pref sibling resources
3239     + * according to that bit.
3240     + */
3241     + return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
3242     +}
3243     +
3244     +static bool pci_need_to_release(unsigned long mask, struct resource *res)
3245     +{
3246     + if (res->flags & IORESOURCE_IO)
3247     + return !!(mask & IORESOURCE_IO);
3248     +
3249     + /* check pref at first */
3250     + if (res->flags & IORESOURCE_PREFETCH) {
3251     + if (mask & IORESOURCE_PREFETCH)
3252     + return true;
3253     + /* count pref if its parent is non-pref */
3254     + else if ((mask & IORESOURCE_MEM) &&
3255     + !(res->parent->flags & IORESOURCE_PREFETCH))
3256     + return true;
3257     + else
3258     + return false;
3259     + }
3260     +
3261     + if (res->flags & IORESOURCE_MEM)
3262     + return !!(mask & IORESOURCE_MEM);
3263     +
3264     + return false; /* should not get here */
3265     +}
3266     +
3267     static void __assign_resources_sorted(struct list_head *head,
3268     struct list_head *realloc_head,
3269     struct list_head *fail_head)
3270     @@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head,
3271     * if could do that, could get out early.
3272     * if could not do that, we still try to assign requested at first,
3273     * then try to reassign add_size for some resources.
3274     + *
3275     + * Separate three resource type checking if we need to release
3276     + * assigned resource after requested + add_size try.
3277     + * 1. if there is io port assign fail, will release assigned
3278     + * io port.
3279     + * 2. if there is pref mmio assign fail, release assigned
3280     + * pref mmio.
3281     + * if assigned pref mmio's parent is non-pref mmio and there
3282     + * is non-pref mmio assign fail, will release that assigned
3283     + * pref mmio.
3284     + * 3. if there is non-pref mmio assign fail or pref mmio
3285     + * assigned fail, will release assigned non-pref mmio.
3286     */
3287     LIST_HEAD(save_head);
3288     LIST_HEAD(local_fail_head);
3289     struct pci_dev_resource *save_res;
3290     - struct pci_dev_resource *dev_res;
3291     + struct pci_dev_resource *dev_res, *tmp_res;
3292     + unsigned long fail_type;
3293    
3294     /* Check if optional add_size is there */
3295     if (!realloc_head || list_empty(realloc_head))
3296     @@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head,
3297     return;
3298     }
3299    
3300     + /* check failed type */
3301     + fail_type = pci_fail_res_type_mask(&local_fail_head);
3302     + /* remove not need to be released assigned res from head list etc */
3303     + list_for_each_entry_safe(dev_res, tmp_res, head, list)
3304     + if (dev_res->res->parent &&
3305     + !pci_need_to_release(fail_type, dev_res->res)) {
3306     + /* remove it from realloc_head list */
3307     + remove_from_list(realloc_head, dev_res->res);
3308     + remove_from_list(&save_head, dev_res->res);
3309     + list_del(&dev_res->list);
3310     + kfree(dev_res);
3311     + }
3312     +
3313     free_list(&local_fail_head);
3314     /* Release assigned resource */
3315     list_for_each_entry(dev_res, head, list)
3316     diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
3317     index 50b13c9..df0aacc 100644
3318     --- a/drivers/spi/spi-davinci.c
3319     +++ b/drivers/spi/spi-davinci.c
3320     @@ -610,7 +610,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
3321     else
3322     buf = (void *)t->tx_buf;
3323     t->tx_dma = dma_map_single(&spi->dev, buf,
3324     - t->len, DMA_FROM_DEVICE);
3325     + t->len, DMA_TO_DEVICE);
3326     if (!t->tx_dma) {
3327     ret = -EFAULT;
3328     goto err_tx_map;
3329     diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
3330     index e34e3fe..1742ce5 100644
3331     --- a/drivers/staging/zram/zram_drv.c
3332     +++ b/drivers/staging/zram/zram_drv.c
3333     @@ -272,8 +272,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
3334    
3335     if (page_zero_filled(uncmem)) {
3336     kunmap_atomic(user_mem);
3337     - if (is_partial_io(bvec))
3338     - kfree(uncmem);
3339     zram->stats.pages_zero++;
3340     zram_set_flag(meta, index, ZRAM_ZERO);
3341     ret = 0;
3342     @@ -422,13 +420,20 @@ out:
3343     */
3344     static inline int valid_io_request(struct zram *zram, struct bio *bio)
3345     {
3346     - if (unlikely(
3347     - (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
3348     - (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
3349     - (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
3350     + u64 start, end, bound;
3351    
3352     + /* unaligned request */
3353     + if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
3354     + return 0;
3355     + if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
3356     + return 0;
3357     +
3358     + start = bio->bi_sector;
3359     + end = start + (bio->bi_size >> SECTOR_SHIFT);
3360     + bound = zram->disksize >> SECTOR_SHIFT;
3361     + /* out of range range */
3362     + if (unlikely(start >= bound || end >= bound || start > end))
3363     return 0;
3364     - }
3365    
3366     /* I/O request is valid */
3367     return 1;
3368     @@ -582,7 +587,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
3369     struct zram *zram;
3370    
3371     zram = bdev->bd_disk->private_data;
3372     + down_write(&zram->lock);
3373     zram_free_page(zram, index);
3374     + up_write(&zram->lock);
3375     zram_stat64_inc(zram, &zram->stats.notify_free);
3376     }
3377    
3378     @@ -593,7 +600,7 @@ static const struct block_device_operations zram_devops = {
3379    
3380     static int create_device(struct zram *zram, int device_id)
3381     {
3382     - int ret = 0;
3383     + int ret = -ENOMEM;
3384    
3385     init_rwsem(&zram->lock);
3386     init_rwsem(&zram->init_lock);
3387     @@ -603,7 +610,6 @@ static int create_device(struct zram *zram, int device_id)
3388     if (!zram->queue) {
3389     pr_err("Error allocating disk queue for device %d\n",
3390     device_id);
3391     - ret = -ENOMEM;
3392     goto out;
3393     }
3394    
3395     @@ -613,11 +619,9 @@ static int create_device(struct zram *zram, int device_id)
3396     /* gendisk structure */
3397     zram->disk = alloc_disk(1);
3398     if (!zram->disk) {
3399     - blk_cleanup_queue(zram->queue);
3400     pr_warn("Error allocating disk structure for device %d\n",
3401     device_id);
3402     - ret = -ENOMEM;
3403     - goto out;
3404     + goto out_free_queue;
3405     }
3406    
3407     zram->disk->major = zram_major;
3408     @@ -646,11 +650,17 @@ static int create_device(struct zram *zram, int device_id)
3409     &zram_disk_attr_group);
3410     if (ret < 0) {
3411     pr_warn("Error creating sysfs group");
3412     - goto out;
3413     + goto out_free_disk;
3414     }
3415    
3416     zram->init_done = 0;
3417     + return 0;
3418    
3419     +out_free_disk:
3420     + del_gendisk(zram->disk);
3421     + put_disk(zram->disk);
3422     +out_free_queue:
3423     + blk_cleanup_queue(zram->queue);
3424     out:
3425     return ret;
3426     }
3427     @@ -727,8 +737,10 @@ static void __exit zram_exit(void)
3428     for (i = 0; i < num_devices; i++) {
3429     zram = &zram_devices[i];
3430    
3431     + get_disk(zram->disk);
3432     destroy_device(zram);
3433     zram_reset_device(zram);
3434     + put_disk(zram->disk);
3435     }
3436    
3437     unregister_blkdev(zram_major, "zram");
3438     diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
3439     index 2d1a3f1..d542eee 100644
3440     --- a/drivers/staging/zram/zram_drv.h
3441     +++ b/drivers/staging/zram/zram_drv.h
3442     @@ -93,8 +93,9 @@ struct zram_meta {
3443     struct zram {
3444     struct zram_meta *meta;
3445     spinlock_t stat64_lock; /* protect 64-bit stats */
3446     - struct rw_semaphore lock; /* protect compression buffers and table
3447     - * against concurrent read and writes */
3448     + struct rw_semaphore lock; /* protect compression buffers, table,
3449     + * 32bit stat counters against concurrent
3450     + * notifications, reads and writes */
3451     struct request_queue *queue;
3452     struct gendisk *disk;
3453     int init_done;
3454     diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
3455     index e6a929d..dc76a3d 100644
3456     --- a/drivers/staging/zram/zram_sysfs.c
3457     +++ b/drivers/staging/zram/zram_sysfs.c
3458     @@ -188,8 +188,10 @@ static ssize_t mem_used_total_show(struct device *dev,
3459     struct zram *zram = dev_to_zram(dev);
3460     struct zram_meta *meta = zram->meta;
3461    
3462     + down_read(&zram->init_lock);
3463     if (zram->init_done)
3464     val = zs_get_total_size_bytes(meta->mem_pool);
3465     + up_read(&zram->init_lock);
3466    
3467     return sprintf(buf, "%llu\n", val);
3468     }
3469     diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
3470     index bb91b47..2e3ea1a 100644
3471     --- a/drivers/tty/serial/8250/8250_gsc.c
3472     +++ b/drivers/tty/serial/8250/8250_gsc.c
3473     @@ -31,9 +31,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
3474     int err;
3475    
3476     #ifdef CONFIG_64BIT
3477     - extern int iosapic_serial_irq(int cellnum);
3478     if (!dev->irq && (dev->id.sversion == 0xad))
3479     - dev->irq = iosapic_serial_irq(dev->mod_index-1);
3480     + dev->irq = iosapic_serial_irq(dev);
3481     #endif
3482    
3483     if (!dev->irq) {
3484     diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
3485     index cbf1d15..22f280a 100644
3486     --- a/drivers/tty/serial/arc_uart.c
3487     +++ b/drivers/tty/serial/arc_uart.c
3488     @@ -773,6 +773,6 @@ module_init(arc_serial_init);
3489     module_exit(arc_serial_exit);
3490    
3491     MODULE_LICENSE("GPL");
3492     -MODULE_ALIAS("plat-arcfpga/uart");
3493     +MODULE_ALIAS("platform:" DRIVER_NAME);
3494     MODULE_AUTHOR("Vineet Gupta");
3495     MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
3496     diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
3497     index 4f5f161..f85b8e6 100644
3498     --- a/drivers/tty/serial/mxs-auart.c
3499     +++ b/drivers/tty/serial/mxs-auart.c
3500     @@ -678,11 +678,18 @@ static void mxs_auart_settermios(struct uart_port *u,
3501    
3502     static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
3503     {
3504     - u32 istatus, istat;
3505     + u32 istat;
3506     struct mxs_auart_port *s = context;
3507     u32 stat = readl(s->port.membase + AUART_STAT);
3508    
3509     - istatus = istat = readl(s->port.membase + AUART_INTR);
3510     + istat = readl(s->port.membase + AUART_INTR);
3511     +
3512     + /* ack irq */
3513     + writel(istat & (AUART_INTR_RTIS
3514     + | AUART_INTR_TXIS
3515     + | AUART_INTR_RXIS
3516     + | AUART_INTR_CTSMIS),
3517     + s->port.membase + AUART_INTR_CLR);
3518    
3519     if (istat & AUART_INTR_CTSMIS) {
3520     uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
3521     @@ -702,12 +709,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
3522     istat &= ~AUART_INTR_TXIS;
3523     }
3524    
3525     - writel(istatus & (AUART_INTR_RTIS
3526     - | AUART_INTR_TXIS
3527     - | AUART_INTR_RXIS
3528     - | AUART_INTR_CTSMIS),
3529     - s->port.membase + AUART_INTR_CLR);
3530     -
3531     return IRQ_HANDLED;
3532     }
3533    
3534     @@ -850,7 +851,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
3535     struct mxs_auart_port *s;
3536     struct uart_port *port;
3537     unsigned int old_ctrl0, old_ctrl2;
3538     - unsigned int to = 1000;
3539     + unsigned int to = 20000;
3540    
3541     if (co->index >= MXS_AUART_PORTS || co->index < 0)
3542     return;
3543     @@ -871,18 +872,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
3544    
3545     uart_console_write(port, str, count, mxs_auart_console_putchar);
3546    
3547     - /*
3548     - * Finally, wait for transmitter to become empty
3549     - * and restore the TCR
3550     - */
3551     + /* Finally, wait for transmitter to become empty ... */
3552     while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
3553     + udelay(1);
3554     if (!to--)
3555     break;
3556     - udelay(1);
3557     }
3558    
3559     - writel(old_ctrl0, port->membase + AUART_CTRL0);
3560     - writel(old_ctrl2, port->membase + AUART_CTRL2);
3561     + /*
3562     + * ... and restore the TCR if we waited long enough for the transmitter
3563     + * to be idle. This might keep the transmitter enabled although it is
3564     + * unused, but that is better than to disable it while it is still
3565     + * transmitting.
3566     + */
3567     + if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
3568     + writel(old_ctrl0, port->membase + AUART_CTRL0);
3569     + writel(old_ctrl2, port->membase + AUART_CTRL2);
3570     + }
3571    
3572     clk_disable(s->clk);
3573     }
3574     diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
3575     index 62b86a6..b92d333 100644
3576     --- a/drivers/usb/serial/mos7840.c
3577     +++ b/drivers/usb/serial/mos7840.c
3578     @@ -183,7 +183,10 @@
3579     #define LED_ON_MS 500
3580     #define LED_OFF_MS 500
3581    
3582     -static int device_type;
3583     +enum mos7840_flag {
3584     + MOS7840_FLAG_CTRL_BUSY,
3585     + MOS7840_FLAG_LED_BUSY,
3586     +};
3587    
3588     static const struct usb_device_id id_table[] = {
3589     {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
3590     @@ -238,9 +241,12 @@ struct moschip_port {
3591    
3592     /* For device(s) with LED indicator */
3593     bool has_led;
3594     - bool led_flag;
3595     struct timer_list led_timer1; /* Timer for LED on */
3596     struct timer_list led_timer2; /* Timer for LED off */
3597     + struct urb *led_urb;
3598     + struct usb_ctrlrequest *led_dr;
3599     +
3600     + unsigned long flags;
3601     };
3602    
3603     /*
3604     @@ -467,10 +473,10 @@ static void mos7840_control_callback(struct urb *urb)
3605     case -ESHUTDOWN:
3606     /* this urb is terminated, clean up */
3607     dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
3608     - return;
3609     + goto out;
3610     default:
3611     dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
3612     - return;
3613     + goto out;
3614     }
3615    
3616     dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
3617     @@ -483,6 +489,8 @@ static void mos7840_control_callback(struct urb *urb)
3618     mos7840_handle_new_msr(mos7840_port, regval);
3619     else if (mos7840_port->MsrLsr == 1)
3620     mos7840_handle_new_lsr(mos7840_port, regval);
3621     +out:
3622     + clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
3623     }
3624    
3625     static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
3626     @@ -493,6 +501,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
3627     unsigned char *buffer = mcs->ctrl_buf;
3628     int ret;
3629    
3630     + if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
3631     + return -EBUSY;
3632     +
3633     dr->bRequestType = MCS_RD_RTYPE;
3634     dr->bRequest = MCS_RDREQ;
3635     dr->wValue = cpu_to_le16(Wval); /* 0 */
3636     @@ -504,6 +515,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
3637     mos7840_control_callback, mcs);
3638     mcs->control_urb->transfer_buffer_length = 2;
3639     ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
3640     + if (ret)
3641     + clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
3642     +
3643     return ret;
3644     }
3645    
3646     @@ -530,7 +544,7 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
3647     __u16 reg)
3648     {
3649     struct usb_device *dev = mcs->port->serial->dev;
3650     - struct usb_ctrlrequest *dr = mcs->dr;
3651     + struct usb_ctrlrequest *dr = mcs->led_dr;
3652    
3653     dr->bRequestType = MCS_WR_RTYPE;
3654     dr->bRequest = MCS_WRREQ;
3655     @@ -538,10 +552,10 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
3656     dr->wIndex = cpu_to_le16(reg);
3657     dr->wLength = cpu_to_le16(0);
3658    
3659     - usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0),
3660     + usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
3661     (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
3662    
3663     - usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
3664     + usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
3665     }
3666    
3667     static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
3668     @@ -567,7 +581,19 @@ static void mos7840_led_flag_off(unsigned long arg)
3669     {
3670     struct moschip_port *mcs = (struct moschip_port *) arg;
3671    
3672     - mcs->led_flag = false;
3673     + clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
3674     +}
3675     +
3676     +static void mos7840_led_activity(struct usb_serial_port *port)
3677     +{
3678     + struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
3679     +
3680     + if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
3681     + return;
3682     +
3683     + mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
3684     + mod_timer(&mos7840_port->led_timer1,
3685     + jiffies + msecs_to_jiffies(LED_ON_MS));
3686     }
3687    
3688     /*****************************************************************************
3689     @@ -767,14 +793,8 @@ static void mos7840_bulk_in_callback(struct urb *urb)
3690     return;
3691     }
3692    
3693     - /* Turn on LED */
3694     - if (mos7840_port->has_led && !mos7840_port->led_flag) {
3695     - mos7840_port->led_flag = true;
3696     - mos7840_set_led_async(mos7840_port, 0x0301,
3697     - MODEM_CONTROL_REGISTER);
3698     - mod_timer(&mos7840_port->led_timer1,
3699     - jiffies + msecs_to_jiffies(LED_ON_MS));
3700     - }
3701     + if (mos7840_port->has_led)
3702     + mos7840_led_activity(port);
3703    
3704     mos7840_port->read_urb_busy = true;
3705     retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
3706     @@ -825,18 +845,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
3707     /************************************************************************/
3708     /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
3709     /************************************************************************/
3710     -#ifdef MCSSerialProbe
3711     -static int mos7840_serial_probe(struct usb_serial *serial,
3712     - const struct usb_device_id *id)
3713     -{
3714     -
3715     - /*need to implement the mode_reg reading and updating\
3716     - structures usb_serial_ device_type\
3717     - (i.e num_ports, num_bulkin,bulkout etc) */
3718     - /* Also we can update the changes attach */
3719     - return 1;
3720     -}
3721     -#endif
3722    
3723     /*****************************************************************************
3724     * mos7840_open
3725     @@ -1467,13 +1475,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
3726     data1 = urb->transfer_buffer;
3727     dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
3728    
3729     - /* Turn on LED */
3730     - if (mos7840_port->has_led && !mos7840_port->led_flag) {
3731     - mos7840_port->led_flag = true;
3732     - mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301);
3733     - mod_timer(&mos7840_port->led_timer1,
3734     - jiffies + msecs_to_jiffies(LED_ON_MS));
3735     - }
3736     + if (mos7840_port->has_led)
3737     + mos7840_led_activity(port);
3738    
3739     /* send it down the pipe */
3740     status = usb_submit_urb(urb, GFP_ATOMIC);
3741     @@ -2202,38 +2205,48 @@ static int mos7810_check(struct usb_serial *serial)
3742     return 0;
3743     }
3744    
3745     -static int mos7840_calc_num_ports(struct usb_serial *serial)
3746     +static int mos7840_probe(struct usb_serial *serial,
3747     + const struct usb_device_id *id)
3748     {
3749     - __u16 data = 0x00;
3750     + u16 product = serial->dev->descriptor.idProduct;
3751     u8 *buf;
3752     - int mos7840_num_ports;
3753     + int device_type;
3754     +
3755     + if (product == MOSCHIP_DEVICE_ID_7810 ||
3756     + product == MOSCHIP_DEVICE_ID_7820) {
3757     + device_type = product;
3758     + goto out;
3759     + }
3760    
3761     buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
3762     - if (buf) {
3763     - usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
3764     + if (!buf)
3765     + return -ENOMEM;
3766     +
3767     + usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
3768     MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
3769     VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
3770     - data = *buf;
3771     - kfree(buf);
3772     - }
3773    
3774     - if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
3775     - serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
3776     - device_type = serial->dev->descriptor.idProduct;
3777     - } else {
3778     - /* For a MCS7840 device GPIO0 must be set to 1 */
3779     - if ((data & 0x01) == 1)
3780     - device_type = MOSCHIP_DEVICE_ID_7840;
3781     - else if (mos7810_check(serial))
3782     - device_type = MOSCHIP_DEVICE_ID_7810;
3783     - else
3784     - device_type = MOSCHIP_DEVICE_ID_7820;
3785     - }
3786     + /* For a MCS7840 device GPIO0 must be set to 1 */
3787     + if (buf[0] & 0x01)
3788     + device_type = MOSCHIP_DEVICE_ID_7840;
3789     + else if (mos7810_check(serial))
3790     + device_type = MOSCHIP_DEVICE_ID_7810;
3791     + else
3792     + device_type = MOSCHIP_DEVICE_ID_7820;
3793     +
3794     + kfree(buf);
3795     +out:
3796     + usb_set_serial_data(serial, (void *)(unsigned long)device_type);
3797     +
3798     + return 0;
3799     +}
3800     +
3801     +static int mos7840_calc_num_ports(struct usb_serial *serial)
3802     +{
3803     + int device_type = (unsigned long)usb_get_serial_data(serial);
3804     + int mos7840_num_ports;
3805    
3806     mos7840_num_ports = (device_type >> 4) & 0x000F;
3807     - serial->num_bulk_in = mos7840_num_ports;
3808     - serial->num_bulk_out = mos7840_num_ports;
3809     - serial->num_ports = mos7840_num_ports;
3810    
3811     return mos7840_num_ports;
3812     }
3813     @@ -2241,6 +2254,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
3814     static int mos7840_port_probe(struct usb_serial_port *port)
3815     {
3816     struct usb_serial *serial = port->serial;
3817     + int device_type = (unsigned long)usb_get_serial_data(serial);
3818     struct moschip_port *mos7840_port;
3819     int status;
3820     int pnum;
3821     @@ -2418,6 +2432,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
3822     if (device_type == MOSCHIP_DEVICE_ID_7810) {
3823     mos7840_port->has_led = true;
3824    
3825     + mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
3826     + mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
3827     + GFP_KERNEL);
3828     + if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
3829     + status = -ENOMEM;
3830     + goto error;
3831     + }
3832     +
3833     init_timer(&mos7840_port->led_timer1);
3834     mos7840_port->led_timer1.function = mos7840_led_off;
3835     mos7840_port->led_timer1.expires =
3836     @@ -2430,8 +2452,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
3837     jiffies + msecs_to_jiffies(LED_OFF_MS);
3838     mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
3839    
3840     - mos7840_port->led_flag = false;
3841     -
3842     /* Turn off LED */
3843     mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
3844     }
3845     @@ -2453,6 +2473,8 @@ out:
3846     }
3847     return 0;
3848     error:
3849     + kfree(mos7840_port->led_dr);
3850     + usb_free_urb(mos7840_port->led_urb);
3851     kfree(mos7840_port->dr);
3852     kfree(mos7840_port->ctrl_buf);
3853     usb_free_urb(mos7840_port->control_urb);
3854     @@ -2473,6 +2495,10 @@ static int mos7840_port_remove(struct usb_serial_port *port)
3855    
3856     del_timer_sync(&mos7840_port->led_timer1);
3857     del_timer_sync(&mos7840_port->led_timer2);
3858     +
3859     + usb_kill_urb(mos7840_port->led_urb);
3860     + usb_free_urb(mos7840_port->led_urb);
3861     + kfree(mos7840_port->led_dr);
3862     }
3863     usb_kill_urb(mos7840_port->control_urb);
3864     usb_free_urb(mos7840_port->control_urb);
3865     @@ -2499,9 +2525,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
3866     .throttle = mos7840_throttle,
3867     .unthrottle = mos7840_unthrottle,
3868     .calc_num_ports = mos7840_calc_num_ports,
3869     -#ifdef MCSSerialProbe
3870     - .probe = mos7840_serial_probe,
3871     -#endif
3872     + .probe = mos7840_probe,
3873     .ioctl = mos7840_ioctl,
3874     .set_termios = mos7840_set_termios,
3875     .break_ctl = mos7840_break,
3876     diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
3877     index 7b417e2..b0a523b2 100644
3878     --- a/fs/btrfs/ulist.c
3879     +++ b/fs/btrfs/ulist.c
3880     @@ -205,6 +205,10 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
3881     u64 new_alloced = ulist->nodes_alloced + 128;
3882     struct ulist_node *new_nodes;
3883     void *old = NULL;
3884     + int i;
3885     +
3886     + for (i = 0; i < ulist->nnodes; i++)
3887     + rb_erase(&ulist->nodes[i].rb_node, &ulist->root);
3888    
3889     /*
3890     * if nodes_alloced == ULIST_SIZE no memory has been allocated
3891     @@ -224,6 +228,17 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
3892    
3893     ulist->nodes = new_nodes;
3894     ulist->nodes_alloced = new_alloced;
3895     +
3896     + /*
3897     + * krealloc actually uses memcpy, which does not copy rb_node
3898     + * pointers, so we have to do it ourselves. Otherwise we may
3899     + * be bitten by crashes.
3900     + */
3901     + for (i = 0; i < ulist->nnodes; i++) {
3902     + ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
3903     + if (ret < 0)
3904     + return ret;
3905     + }
3906     }
3907     ulist->nodes[ulist->nnodes].val = val;
3908     ulist->nodes[ulist->nnodes].aux = aux;
3909     diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
3910     index 6c80083..77cc85d 100644
3911     --- a/fs/notify/fanotify/fanotify_user.c
3912     +++ b/fs/notify/fanotify/fanotify_user.c
3913     @@ -122,6 +122,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
3914     metadata->event_len = FAN_EVENT_METADATA_LEN;
3915     metadata->metadata_len = FAN_EVENT_METADATA_LEN;
3916     metadata->vers = FANOTIFY_METADATA_VERSION;
3917     + metadata->reserved = 0;
3918     metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
3919     metadata->pid = pid_vnr(event->tgid);
3920     if (unlikely(event->mask & FAN_Q_OVERFLOW))
3921     diff --git a/include/linux/tick.h b/include/linux/tick.h
3922     index 9180f4b..62bd8b7 100644
3923     --- a/include/linux/tick.h
3924     +++ b/include/linux/tick.h
3925     @@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
3926     #endif
3927    
3928    
3929     -# ifdef CONFIG_CPU_IDLE_GOV_MENU
3930     -extern void menu_hrtimer_cancel(void);
3931     -# else
3932     -static inline void menu_hrtimer_cancel(void) {}
3933     -# endif /* CONFIG_CPU_IDLE_GOV_MENU */
3934     -
3935     #endif
3936     diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
3937     index b6b215f..14105c2 100644
3938     --- a/include/linux/user_namespace.h
3939     +++ b/include/linux/user_namespace.h
3940     @@ -23,6 +23,7 @@ struct user_namespace {
3941     struct uid_gid_map projid_map;
3942     atomic_t count;
3943     struct user_namespace *parent;
3944     + int level;
3945     kuid_t owner;
3946     kgid_t group;
3947     unsigned int proc_inum;
3948     diff --git a/include/net/ndisc.h b/include/net/ndisc.h
3949     index 745bf74..5043f8b 100644
3950     --- a/include/net/ndisc.h
3951     +++ b/include/net/ndisc.h
3952     @@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
3953     * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
3954     * also need a pad of 2.
3955     */
3956     -static int ndisc_addr_option_pad(unsigned short type)
3957     +static inline int ndisc_addr_option_pad(unsigned short type)
3958     {
3959     switch (type) {
3960     case ARPHRD_INFINIBAND: return 2;
3961     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
3962     index c6e77ef..2e9b387 100644
3963     --- a/kernel/cgroup.c
3964     +++ b/kernel/cgroup.c
3965     @@ -2769,13 +2769,17 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
3966     {
3967     LIST_HEAD(pending);
3968     struct cgroup *cgrp, *n;
3969     + struct super_block *sb = ss->root->sb;
3970    
3971     /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
3972     - if (cfts && ss->root != &rootnode) {
3973     + if (cfts && ss->root != &rootnode &&
3974     + atomic_inc_not_zero(&sb->s_active)) {
3975     list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
3976     dget(cgrp->dentry);
3977     list_add_tail(&cgrp->cft_q_node, &pending);
3978     }
3979     + } else {
3980     + sb = NULL;
3981     }
3982    
3983     mutex_unlock(&cgroup_mutex);
3984     @@ -2798,6 +2802,9 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
3985     dput(cgrp->dentry);
3986     }
3987    
3988     + if (sb)
3989     + deactivate_super(sb);
3990     +
3991     mutex_unlock(&cgroup_cft_mutex);
3992     }
3993    
3994     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
3995     index 0cf1c14..4251374 100644
3996     --- a/kernel/time/tick-sched.c
3997     +++ b/kernel/time/tick-sched.c
3998     @@ -832,13 +832,10 @@ void tick_nohz_irq_exit(void)
3999     {
4000     struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
4001    
4002     - if (ts->inidle) {
4003     - /* Cancel the timer because CPU already waken up from the C-states*/
4004     - menu_hrtimer_cancel();
4005     + if (ts->inidle)
4006     __tick_nohz_idle_enter(ts);
4007     - } else {
4008     + else
4009     tick_nohz_full_stop_tick(ts);
4010     - }
4011     }
4012    
4013     /**
4014     @@ -936,8 +933,6 @@ void tick_nohz_idle_exit(void)
4015    
4016     ts->inidle = 0;
4017    
4018     - /* Cancel the timer because CPU already waken up from the C-states*/
4019     - menu_hrtimer_cancel();
4020     if (ts->idle_active || ts->tick_stopped)
4021     now = ktime_get();
4022    
4023     diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
4024     index d8c30db..9064b91 100644
4025     --- a/kernel/user_namespace.c
4026     +++ b/kernel/user_namespace.c
4027     @@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
4028     kgid_t group = new->egid;
4029     int ret;
4030    
4031     + if (parent_ns->level > 32)
4032     + return -EUSERS;
4033     +
4034     /*
4035     * Verify that we can not violate the policy of which files
4036     * may be accessed that is specified by the root directory,
4037     @@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
4038     atomic_set(&ns->count, 1);
4039     /* Leave the new->user_ns reference with the new user namespace. */
4040     ns->parent = parent_ns;
4041     + ns->level = parent_ns->level + 1;
4042     ns->owner = owner;
4043     ns->group = group;
4044    
4045     @@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
4046     int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
4047     {
4048     struct cred *cred;
4049     + int err = -ENOMEM;
4050    
4051     if (!(unshare_flags & CLONE_NEWUSER))
4052     return 0;
4053    
4054     cred = prepare_creds();
4055     - if (!cred)
4056     - return -ENOMEM;
4057     + if (cred) {
4058     + err = create_user_ns(cred);
4059     + if (err)
4060     + put_cred(cred);
4061     + else
4062     + *new_cred = cred;
4063     + }
4064    
4065     - *new_cred = cred;
4066     - return create_user_ns(cred);
4067     + return err;
4068     }
4069    
4070     void free_user_ns(struct user_namespace *ns)
4071     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4072     index ee8e29a..6f01921 100644
4073     --- a/kernel/workqueue.c
4074     +++ b/kernel/workqueue.c
4075     @@ -3398,6 +3398,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
4076     {
4077     to->nice = from->nice;
4078     cpumask_copy(to->cpumask, from->cpumask);
4079     + /*
4080     + * Unlike hash and equality test, this function doesn't ignore
4081     + * ->no_numa as it is used for both pool and wq attrs. Instead,
4082     + * get_unbound_pool() explicitly clears ->no_numa after copying.
4083     + */
4084     + to->no_numa = from->no_numa;
4085     }
4086    
4087     /* hash value of the content of @attr */
4088     @@ -3565,6 +3571,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4089     lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
4090     copy_workqueue_attrs(pool->attrs, attrs);
4091    
4092     + /*
4093     + * no_numa isn't a worker_pool attribute, always clear it. See
4094     + * 'struct workqueue_attrs' comments for detail.
4095     + */
4096     + pool->attrs->no_numa = false;
4097     +
4098     /* if cpumask is contained inside a NUMA node, we belong to that node */
4099     if (wq_numa_enabled) {
4100     for_each_node(node) {
4101     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
4102     index fa2f63f..3f25e75 100644
4103     --- a/net/ipv4/sysctl_net_ipv4.c
4104     +++ b/net/ipv4/sysctl_net_ipv4.c
4105     @@ -36,6 +36,8 @@ static int tcp_adv_win_scale_min = -31;
4106     static int tcp_adv_win_scale_max = 31;
4107     static int ip_ttl_min = 1;
4108     static int ip_ttl_max = 255;
4109     +static int tcp_syn_retries_min = 1;
4110     +static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
4111     static int ip_ping_group_range_min[] = { 0, 0 };
4112     static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
4113    
4114     @@ -331,7 +333,9 @@ static struct ctl_table ipv4_table[] = {
4115     .data = &sysctl_tcp_syn_retries,
4116     .maxlen = sizeof(int),
4117     .mode = 0644,
4118     - .proc_handler = proc_dointvec
4119     + .proc_handler = proc_dointvec_minmax,
4120     + .extra1 = &tcp_syn_retries_min,
4121     + .extra2 = &tcp_syn_retries_max
4122     },
4123     {
4124     .procname = "tcp_synack_retries",
4125     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
4126     index 241fb8a..4b42124 100644
4127     --- a/net/ipv6/ip6mr.c
4128     +++ b/net/ipv6/ip6mr.c
4129     @@ -259,10 +259,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
4130     {
4131     struct mr6_table *mrt, *next;
4132    
4133     + rtnl_lock();
4134     list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
4135     list_del(&mrt->list);
4136     ip6mr_free_table(mrt);
4137     }
4138     + rtnl_unlock();
4139     fib_rules_unregister(net->ipv6.mr6_rules_ops);
4140     }
4141     #else
4142     @@ -289,7 +291,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
4143    
4144     static void __net_exit ip6mr_rules_exit(struct net *net)
4145     {
4146     + rtnl_lock();
4147     ip6mr_free_table(net->ipv6.mrt6);
4148     + net->ipv6.mrt6 = NULL;
4149     + rtnl_unlock();
4150     }
4151     #endif
4152    
4153     diff --git a/net/key/af_key.c b/net/key/af_key.c
4154     index 9da8620..ab8bd2c 100644
4155     --- a/net/key/af_key.c
4156     +++ b/net/key/af_key.c
4157     @@ -2081,6 +2081,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
4158     pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
4159     }
4160     pol->sadb_x_policy_dir = dir+1;
4161     + pol->sadb_x_policy_reserved = 0;
4162     pol->sadb_x_policy_id = xp->index;
4163     pol->sadb_x_policy_priority = xp->priority;
4164    
4165     @@ -3137,7 +3138,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
4166     pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
4167     pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
4168     pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
4169     + pol->sadb_x_policy_reserved = 0;
4170     pol->sadb_x_policy_id = xp->index;
4171     + pol->sadb_x_policy_priority = xp->priority;
4172    
4173     /* Set sadb_comb's. */
4174     if (x->id.proto == IPPROTO_AH)
4175     @@ -3525,6 +3528,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4176     pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
4177     pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
4178     pol->sadb_x_policy_dir = dir + 1;
4179     + pol->sadb_x_policy_reserved = 0;
4180     pol->sadb_x_policy_id = 0;
4181     pol->sadb_x_policy_priority = 0;
4182    
4183     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
4184     index 4fdb306e..ae36f8e 100644
4185     --- a/net/mac80211/cfg.c
4186     +++ b/net/mac80211/cfg.c
4187     @@ -652,6 +652,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
4188     if (sta->sdata->dev != dev)
4189     continue;
4190    
4191     + sinfo.filled = 0;
4192     + sta_set_sinfo(sta, &sinfo);
4193     i = 0;
4194     ADD_STA_STATS(sta);
4195     }
4196     diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
4197     index 7fc5d0d..3401262 100644
4198     --- a/net/mac80211/pm.c
4199     +++ b/net/mac80211/pm.c
4200     @@ -99,10 +99,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
4201     }
4202     mutex_unlock(&local->sta_mtx);
4203    
4204     - /* remove all interfaces */
4205     + /* remove all interfaces that were created in the driver */
4206     list_for_each_entry(sdata, &local->interfaces, list) {
4207     - if (!ieee80211_sdata_running(sdata))
4208     + if (!ieee80211_sdata_running(sdata) ||
4209     + sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
4210     + sdata->vif.type == NL80211_IFTYPE_MONITOR)
4211     continue;
4212     +
4213     drv_remove_interface(local, sdata);
4214     }
4215    
4216     diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
4217     index ac7ef54..e6512e2 100644
4218     --- a/net/mac80211/rc80211_minstrel.c
4219     +++ b/net/mac80211/rc80211_minstrel.c
4220     @@ -290,7 +290,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
4221     struct minstrel_rate *msr, *mr;
4222     unsigned int ndx;
4223     bool mrr_capable;
4224     - bool prev_sample = mi->prev_sample;
4225     + bool prev_sample;
4226     int delta;
4227     int sampling_ratio;
4228    
4229     @@ -314,6 +314,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
4230     (mi->sample_count + mi->sample_deferred / 2);
4231    
4232     /* delta < 0: no sampling required */
4233     + prev_sample = mi->prev_sample;
4234     mi->prev_sample = false;
4235     if (delta < 0 || (!mrr_capable && prev_sample))
4236     return;
4237     diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
4238     index 5b2d301..f5aed96 100644
4239     --- a/net/mac80211/rc80211_minstrel_ht.c
4240     +++ b/net/mac80211/rc80211_minstrel_ht.c
4241     @@ -804,10 +804,18 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
4242    
4243     sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
4244     info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
4245     + rate->count = 1;
4246     +
4247     + if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
4248     + int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
4249     + rate->idx = mp->cck_rates[idx];
4250     + rate->flags = 0;
4251     + return;
4252     + }
4253     +
4254     rate->idx = sample_idx % MCS_GROUP_RATES +
4255     (sample_group->streams - 1) * MCS_GROUP_RATES;
4256     rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
4257     - rate->count = 1;
4258     }
4259    
4260     static void
4261     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4262     index 8e29526..83f6d29 100644
4263     --- a/net/mac80211/rx.c
4264     +++ b/net/mac80211/rx.c
4265     @@ -932,8 +932,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
4266     struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
4267     struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4268    
4269     - /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
4270     - if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
4271     + /*
4272     + * Drop duplicate 802.11 retransmissions
4273     + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4274     + */
4275     + if (rx->skb->len >= 24 && rx->sta &&
4276     + !ieee80211_is_ctl(hdr->frame_control) &&
4277     + !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
4278     + !is_multicast_ether_addr(hdr->addr1)) {
4279     if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
4280     rx->sta->last_seq_ctrl[rx->seqno_idx] ==
4281     hdr->seq_ctrl)) {
4282     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
4283     index 2fd6dbe..1076fe1 100644
4284     --- a/net/netlink/genetlink.c
4285     +++ b/net/netlink/genetlink.c
4286     @@ -877,8 +877,10 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
4287     #ifdef CONFIG_MODULES
4288     if (res == NULL) {
4289     genl_unlock();
4290     + up_read(&cb_lock);
4291     request_module("net-pf-%d-proto-%d-family-%s",
4292     PF_NETLINK, NETLINK_GENERIC, name);
4293     + down_read(&cb_lock);
4294     genl_lock();
4295     res = genl_family_find_byname(name);
4296     }
4297     diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
4298     index ca8e0a5..1f9c314 100644
4299     --- a/net/sched/sch_atm.c
4300     +++ b/net/sched/sch_atm.c
4301     @@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
4302     struct sockaddr_atmpvc pvc;
4303     int state;
4304    
4305     + memset(&pvc, 0, sizeof(pvc));
4306     pvc.sap_family = AF_ATMPVC;
4307     pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
4308     pvc.sap_addr.vpi = flow->vcc->vpi;
4309     diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
4310     index 1bc210f..8ec1598 100644
4311     --- a/net/sched/sch_cbq.c
4312     +++ b/net/sched/sch_cbq.c
4313     @@ -1465,6 +1465,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
4314     unsigned char *b = skb_tail_pointer(skb);
4315     struct tc_cbq_wrropt opt;
4316    
4317     + memset(&opt, 0, sizeof(opt));
4318     opt.flags = 0;
4319     opt.allot = cl->allot;
4320     opt.priority = cl->priority + 1;
4321     diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
4322     index d304f41..af7ffd4 100644
4323     --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
4324     +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
4325     @@ -120,7 +120,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
4326     if (IS_ERR(clnt)) {
4327     dprintk("RPC: failed to create AF_LOCAL gssproxy "
4328     "client (errno %ld).\n", PTR_ERR(clnt));
4329     - result = -PTR_ERR(clnt);
4330     + result = PTR_ERR(clnt);
4331     *_clnt = NULL;
4332     goto out;
4333     }
4334     @@ -328,7 +328,6 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data)
4335     kfree(data->in_handle.data);
4336     kfree(data->out_handle.data);
4337     kfree(data->out_token.data);
4338     - kfree(data->mech_oid.data);
4339     free_svc_cred(&data->creds);
4340     }
4341    
4342     diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
4343     index 357f613..3c85d1c 100644
4344     --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
4345     +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
4346     @@ -430,7 +430,7 @@ static int dummy_enc_nameattr_array(struct xdr_stream *xdr,
4347     static int dummy_dec_nameattr_array(struct xdr_stream *xdr,
4348     struct gssx_name_attr_array *naa)
4349     {
4350     - struct gssx_name_attr dummy;
4351     + struct gssx_name_attr dummy = { .attr = {.len = 0} };
4352     u32 count, i;
4353     __be32 *p;
4354    
4355     @@ -493,12 +493,13 @@ static int gssx_enc_name(struct xdr_stream *xdr,
4356     return err;
4357     }
4358    
4359     +
4360     static int gssx_dec_name(struct xdr_stream *xdr,
4361     struct gssx_name *name)
4362     {
4363     - struct xdr_netobj dummy_netobj;
4364     - struct gssx_name_attr_array dummy_name_attr_array;
4365     - struct gssx_option_array dummy_option_array;
4366     + struct xdr_netobj dummy_netobj = { .len = 0 };
4367     + struct gssx_name_attr_array dummy_name_attr_array = { .count = 0 };
4368     + struct gssx_option_array dummy_option_array = { .count = 0 };
4369     int err;
4370    
4371     /* name->display_name */
4372     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
4373     index b14b7e3..db8ead9 100644
4374     --- a/net/wireless/nl80211.c
4375     +++ b/net/wireless/nl80211.c
4376     @@ -6588,12 +6588,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
4377    
4378     void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
4379     {
4380     + struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
4381     void *hdr = ((void **)skb->cb)[1];
4382     struct nlattr *data = ((void **)skb->cb)[2];
4383    
4384     nla_nest_end(skb, data);
4385     genlmsg_end(skb, hdr);
4386     - genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
4387     + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
4388     + nl80211_testmode_mcgrp.id, gfp);
4389     }
4390     EXPORT_SYMBOL(cfg80211_testmode_event);
4391     #endif
4392     @@ -10028,7 +10030,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
4393    
4394     genlmsg_end(msg, hdr);
4395    
4396     - genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
4397     + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
4398     + nl80211_mlme_mcgrp.id, gfp);
4399     return;
4400    
4401     nla_put_failure:
4402     diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
4403     index 99db892..9896954 100644
4404     --- a/sound/core/compress_offload.c
4405     +++ b/sound/core/compress_offload.c
4406     @@ -743,7 +743,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
4407     mutex_lock(&stream->device->lock);
4408     switch (_IOC_NR(cmd)) {
4409     case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
4410     - put_user(SNDRV_COMPRESS_VERSION,
4411     + retval = put_user(SNDRV_COMPRESS_VERSION,
4412     (int __user *)arg) ? -EFAULT : 0;
4413     break;
4414     case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
4415     diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
4416     index 7c11d46..48a9d00 100644
4417     --- a/sound/pci/hda/hda_auto_parser.c
4418     +++ b/sound/pci/hda/hda_auto_parser.c
4419     @@ -860,7 +860,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
4420     }
4421     }
4422     if (id < 0 && quirk) {
4423     - for (q = quirk; q->subvendor; q++) {
4424     + for (q = quirk; q->subvendor || q->subdevice; q++) {
4425     unsigned int vendorid =
4426     q->subdevice | (q->subvendor << 16);
4427     unsigned int mask = 0xffff0000 | q->subdevice_mask;
4428     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
4429     index e849e1e..dc4833f 100644
4430     --- a/sound/pci/hda/patch_sigmatel.c
4431     +++ b/sound/pci/hda/patch_sigmatel.c
4432     @@ -2815,6 +2815,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = {
4433    
4434     /* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
4435     static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
4436     + SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
4437     SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
4438     SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
4439     SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2),