Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.10/0102-3.10.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2253 - (hide annotations) (download)
Tue Aug 13 14:26:09 2013 UTC (10 years, 9 months ago) by niro
File size: 134013 byte(s)
3.10.6-magellan-r1
1 niro 2253 diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
2     index 1e6634f5..a370b204 100644
3     --- a/Documentation/i2c/busses/i2c-piix4
4     +++ b/Documentation/i2c/busses/i2c-piix4
5     @@ -13,7 +13,7 @@ Supported adapters:
6     * AMD SP5100 (SB700 derivative found on some server mainboards)
7     Datasheet: Publicly available at the AMD website
8     http://support.amd.com/us/Embedded_TechDocs/44413.pdf
9     - * AMD Hudson-2
10     + * AMD Hudson-2, CZ
11     Datasheet: Not publicly available
12     * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
13     Datasheet: Publicly available at the SMSC website http://www.smsc.com
14     diff --git a/Makefile b/Makefile
15     index 43367309..b5485529 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,6 +1,6 @@
19     VERSION = 3
20     PATCHLEVEL = 10
21     -SUBLEVEL = 2
22     +SUBLEVEL = 3
23     EXTRAVERSION =
24     NAME = Unicycling Gorilla
25    
26     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
27     index 1426468b..f51d669c 100644
28     --- a/arch/arm64/mm/fault.c
29     +++ b/arch/arm64/mm/fault.c
30     @@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
31     #define ESR_CM (1 << 8)
32     #define ESR_LNX_EXEC (1 << 24)
33    
34     -/*
35     - * Check that the permissions on the VMA allow for the fault which occurred.
36     - * If we encountered a write fault, we must have write permission, otherwise
37     - * we allow any permission.
38     - */
39     -static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
40     -{
41     - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
42     -
43     - if (esr & ESR_WRITE)
44     - mask = VM_WRITE;
45     - if (esr & ESR_LNX_EXEC)
46     - mask = VM_EXEC;
47     -
48     - return vma->vm_flags & mask ? false : true;
49     -}
50     -
51     static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
52     - unsigned int esr, unsigned int flags,
53     + unsigned int mm_flags, unsigned long vm_flags,
54     struct task_struct *tsk)
55     {
56     struct vm_area_struct *vma;
57     @@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
58     * it.
59     */
60     good_area:
61     - if (access_error(esr, vma)) {
62     + /*
63     + * Check that the permissions on the VMA allow for the fault which
64     + * occurred. If we encountered a write or exec fault, we must have
65     + * appropriate permissions, otherwise we allow any permission.
66     + */
67     + if (!(vma->vm_flags & vm_flags)) {
68     fault = VM_FAULT_BADACCESS;
69     goto out;
70     }
71    
72     - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
73     + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
74    
75     check_stack:
76     if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
77     @@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
78     struct task_struct *tsk;
79     struct mm_struct *mm;
80     int fault, sig, code;
81     - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
82     - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
83     - (write ? FAULT_FLAG_WRITE : 0);
84     + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
85     + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
86     +
87     + if (esr & ESR_LNX_EXEC) {
88     + vm_flags = VM_EXEC;
89     + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
90     + vm_flags = VM_WRITE;
91     + mm_flags |= FAULT_FLAG_WRITE;
92     + }
93    
94     tsk = current;
95     mm = tsk->mm;
96     @@ -248,7 +242,7 @@ retry:
97     #endif
98     }
99    
100     - fault = __do_page_fault(mm, addr, esr, flags, tsk);
101     + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
102    
103     /*
104     * If we need to retry but a fatal signal is pending, handle the
105     @@ -265,7 +259,7 @@ retry:
106     */
107    
108     perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
109     - if (flags & FAULT_FLAG_ALLOW_RETRY) {
110     + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
111     if (fault & VM_FAULT_MAJOR) {
112     tsk->maj_flt++;
113     perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
114     @@ -280,7 +274,7 @@ retry:
115     * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
116     * starvation.
117     */
118     - flags &= ~FAULT_FLAG_ALLOW_RETRY;
119     + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
120     goto retry;
121     }
122     }
123     diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
124     index 01b1b3f9..1e1e18c5 100644
125     --- a/arch/mips/cavium-octeon/setup.c
126     +++ b/arch/mips/cavium-octeon/setup.c
127     @@ -996,7 +996,7 @@ void __init plat_mem_setup(void)
128     cvmx_bootmem_unlock();
129     /* Add the memory region for the kernel. */
130     kernel_start = (unsigned long) _text;
131     - kernel_size = ALIGN(_end - _text, 0x100000);
132     + kernel_size = _end - _text;
133    
134     /* Adjust for physical offset. */
135     kernel_start &= ~0xffffffff80000000ULL;
136     diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
137     index 46793b58..07ca627e 100644
138     --- a/arch/powerpc/include/asm/exception-64s.h
139     +++ b/arch/powerpc/include/asm/exception-64s.h
140     @@ -358,12 +358,12 @@ label##_relon_pSeries: \
141     /* No guest interrupts come through here */ \
142     SET_SCRATCH0(r13); /* save r13 */ \
143     EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
144     - EXC_STD, KVMTEST_PR, vec)
145     + EXC_STD, NOTEST, vec)
146    
147     #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
148     .globl label##_relon_pSeries; \
149     label##_relon_pSeries: \
150     - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
151     + EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
152     EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
153    
154     #define STD_RELON_EXCEPTION_HV(loc, vec, label) \
155     @@ -374,12 +374,12 @@ label##_relon_hv: \
156     /* No guest interrupts come through here */ \
157     SET_SCRATCH0(r13); /* save r13 */ \
158     EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
159     - EXC_HV, KVMTEST, vec)
160     + EXC_HV, NOTEST, vec)
161    
162     #define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
163     .globl label##_relon_hv; \
164     label##_relon_hv: \
165     - EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
166     + EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec); \
167     EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
168    
169     /* This associate vector numbers with bits in paca->irq_happened */
170     diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
171     index 4a9e4086..362142b6 100644
172     --- a/arch/powerpc/include/asm/reg.h
173     +++ b/arch/powerpc/include/asm/reg.h
174     @@ -626,6 +626,7 @@
175     #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
176     #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
177     #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
178     +#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
179     #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
180     #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
181     #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
182     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
183     index 40e4a17c..4e00d223 100644
184     --- a/arch/powerpc/kernel/exceptions-64s.S
185     +++ b/arch/powerpc/kernel/exceptions-64s.S
186     @@ -341,10 +341,17 @@ vsx_unavailable_pSeries_1:
187     EXCEPTION_PROLOG_0(PACA_EXGEN)
188     b vsx_unavailable_pSeries
189    
190     +facility_unavailable_trampoline:
191     . = 0xf60
192     SET_SCRATCH0(r13)
193     EXCEPTION_PROLOG_0(PACA_EXGEN)
194     - b tm_unavailable_pSeries
195     + b facility_unavailable_pSeries
196     +
197     +hv_facility_unavailable_trampoline:
198     + . = 0xf80
199     + SET_SCRATCH0(r13)
200     + EXCEPTION_PROLOG_0(PACA_EXGEN)
201     + b facility_unavailable_hv
202    
203     #ifdef CONFIG_CBE_RAS
204     STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
205     @@ -522,8 +529,10 @@ denorm_done:
206     KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
207     STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
208     KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
209     - STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
210     + STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
211     KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
212     + STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
213     + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
214    
215     /*
216     * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
217     @@ -793,14 +802,10 @@ system_call_relon_pSeries:
218     STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
219    
220     . = 0x4e00
221     - SET_SCRATCH0(r13)
222     - EXCEPTION_PROLOG_0(PACA_EXGEN)
223     - b h_data_storage_relon_hv
224     + b . /* Can't happen, see v2.07 Book III-S section 6.5 */
225    
226     . = 0x4e20
227     - SET_SCRATCH0(r13)
228     - EXCEPTION_PROLOG_0(PACA_EXGEN)
229     - b h_instr_storage_relon_hv
230     + b . /* Can't happen, see v2.07 Book III-S section 6.5 */
231    
232     . = 0x4e40
233     SET_SCRATCH0(r13)
234     @@ -808,9 +813,7 @@ system_call_relon_pSeries:
235     b emulation_assist_relon_hv
236    
237     . = 0x4e60
238     - SET_SCRATCH0(r13)
239     - EXCEPTION_PROLOG_0(PACA_EXGEN)
240     - b hmi_exception_relon_hv
241     + b . /* Can't happen, see v2.07 Book III-S section 6.5 */
242    
243     . = 0x4e80
244     SET_SCRATCH0(r13)
245     @@ -835,11 +838,17 @@ vsx_unavailable_relon_pSeries_1:
246     EXCEPTION_PROLOG_0(PACA_EXGEN)
247     b vsx_unavailable_relon_pSeries
248    
249     -tm_unavailable_relon_pSeries_1:
250     +facility_unavailable_relon_trampoline:
251     . = 0x4f60
252     SET_SCRATCH0(r13)
253     EXCEPTION_PROLOG_0(PACA_EXGEN)
254     - b tm_unavailable_relon_pSeries
255     + b facility_unavailable_relon_pSeries
256     +
257     +hv_facility_unavailable_relon_trampoline:
258     + . = 0x4f80
259     + SET_SCRATCH0(r13)
260     + EXCEPTION_PROLOG_0(PACA_EXGEN)
261     + b facility_unavailable_relon_hv
262    
263     STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
264     #ifdef CONFIG_PPC_DENORMALISATION
265     @@ -1165,36 +1174,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
266     bl .vsx_unavailable_exception
267     b .ret_from_except
268    
269     - .align 7
270     - .globl tm_unavailable_common
271     -tm_unavailable_common:
272     - EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
273     - bl .save_nvgprs
274     - DISABLE_INTS
275     - addi r3,r1,STACK_FRAME_OVERHEAD
276     - bl .tm_unavailable_exception
277     - b .ret_from_except
278     + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
279    
280     .align 7
281     .globl __end_handlers
282     __end_handlers:
283    
284     /* Equivalents to the above handlers for relocation-on interrupt vectors */
285     - STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
286     - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
287     - STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
288     - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
289     STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
290     - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
291     - STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
292     - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
293     MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
294     - KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
295    
296     STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
297     STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
298     STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
299     - STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
300     + STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
301     + STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
302    
303     #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
304     /*
305     diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
306     index a949bdfc..f0b47d1a 100644
307     --- a/arch/powerpc/kernel/hw_breakpoint.c
308     +++ b/arch/powerpc/kernel/hw_breakpoint.c
309     @@ -176,7 +176,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
310     length_max = 512 ; /* 64 doublewords */
311     /* DAWR region can't cross 512 boundary */
312     if ((bp->attr.bp_addr >> 10) !=
313     - ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
314     + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
315     return -EINVAL;
316     }
317     if (info->len >
318     @@ -250,6 +250,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
319     * we still need to single-step the instruction, but we don't
320     * generate an event.
321     */
322     + info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
323     if (!((bp->attr.bp_addr <= dar) &&
324     (dar - bp->attr.bp_addr < bp->attr.bp_len)))
325     info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
326     diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
327     index 98c2fc19..64f7bd5b 100644
328     --- a/arch/powerpc/kernel/ptrace.c
329     +++ b/arch/powerpc/kernel/ptrace.c
330     @@ -1449,7 +1449,9 @@ static long ppc_set_hwdebug(struct task_struct *child,
331     */
332     if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
333     len = bp_info->addr2 - bp_info->addr;
334     - } else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
335     + } else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
336     + len = 1;
337     + else {
338     ptrace_put_breakpoints(child);
339     return -EINVAL;
340     }
341     diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
342     index e379d3fd..389fb807 100644
343     --- a/arch/powerpc/kernel/setup_64.c
344     +++ b/arch/powerpc/kernel/setup_64.c
345     @@ -76,7 +76,7 @@
346     #endif
347    
348     int boot_cpuid = 0;
349     -int __initdata spinning_secondaries;
350     +int spinning_secondaries;
351     u64 ppc64_pft_size;
352    
353     /* Pick defaults since we might want to patch instructions
354     diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
355     index 201385c3..0f83122e 100644
356     --- a/arch/powerpc/kernel/signal_32.c
357     +++ b/arch/powerpc/kernel/signal_32.c
358     @@ -407,7 +407,8 @@ inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
359     * altivec/spe instructions at some point.
360     */
361     static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
362     - int sigret, int ctx_has_vsx_region)
363     + struct mcontext __user *tm_frame, int sigret,
364     + int ctx_has_vsx_region)
365     {
366     unsigned long msr = regs->msr;
367    
368     @@ -475,6 +476,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
369    
370     if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
371     return 1;
372     + /* We need to write 0 the MSR top 32 bits in the tm frame so that we
373     + * can check it on the restore to see if TM is active
374     + */
375     + if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
376     + return 1;
377     +
378     if (sigret) {
379     /* Set up the sigreturn trampoline: li r0,sigret; sc */
380     if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
381     @@ -747,7 +754,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
382     struct mcontext __user *tm_sr)
383     {
384     long err;
385     - unsigned long msr;
386     + unsigned long msr, msr_hi;
387     #ifdef CONFIG_VSX
388     int i;
389     #endif
390     @@ -852,8 +859,11 @@ static long restore_tm_user_regs(struct pt_regs *regs,
391     tm_enable();
392     /* This loads the checkpointed FP/VEC state, if used */
393     tm_recheckpoint(&current->thread, msr);
394     - /* The task has moved into TM state S, so ensure MSR reflects this */
395     - regs->msr = (regs->msr & ~MSR_TS_MASK) | MSR_TS_S;
396     + /* Get the top half of the MSR */
397     + if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
398     + return 1;
399     + /* Pull in MSR TM from user context */
400     + regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
401    
402     /* This loads the speculative FP/VEC state, if used */
403     if (msr & MSR_FP) {
404     @@ -952,6 +962,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
405     {
406     struct rt_sigframe __user *rt_sf;
407     struct mcontext __user *frame;
408     + struct mcontext __user *tm_frame = NULL;
409     void __user *addr;
410     unsigned long newsp = 0;
411     int sigret;
412     @@ -985,23 +996,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
413     }
414    
415     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
416     + tm_frame = &rt_sf->uc_transact.uc_mcontext;
417     if (MSR_TM_ACTIVE(regs->msr)) {
418     - if (save_tm_user_regs(regs, &rt_sf->uc.uc_mcontext,
419     - &rt_sf->uc_transact.uc_mcontext, sigret))
420     + if (save_tm_user_regs(regs, frame, tm_frame, sigret))
421     goto badframe;
422     }
423     else
424     #endif
425     - if (save_user_regs(regs, frame, sigret, 1))
426     + {
427     + if (save_user_regs(regs, frame, tm_frame, sigret, 1))
428     goto badframe;
429     + }
430     regs->link = tramp;
431    
432     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
433     if (MSR_TM_ACTIVE(regs->msr)) {
434     if (__put_user((unsigned long)&rt_sf->uc_transact,
435     &rt_sf->uc.uc_link)
436     - || __put_user(to_user_ptr(&rt_sf->uc_transact.uc_mcontext),
437     - &rt_sf->uc_transact.uc_regs))
438     + || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
439     goto badframe;
440     }
441     else
442     @@ -1170,7 +1182,7 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
443     mctx = (struct mcontext __user *)
444     ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
445     if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
446     - || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
447     + || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
448     || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
449     || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
450     return -EFAULT;
451     @@ -1233,7 +1245,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
452     if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
453     goto bad;
454    
455     - if (MSR_TM_SUSPENDED(msr_hi<<32)) {
456     + if (MSR_TM_ACTIVE(msr_hi<<32)) {
457     /* We only recheckpoint on return if we're
458     * transaction.
459     */
460     @@ -1392,6 +1404,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
461     {
462     struct sigcontext __user *sc;
463     struct sigframe __user *frame;
464     + struct mcontext __user *tm_mctx = NULL;
465     unsigned long newsp = 0;
466     int sigret;
467     unsigned long tramp;
468     @@ -1425,6 +1438,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
469     }
470    
471     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
472     + tm_mctx = &frame->mctx_transact;
473     if (MSR_TM_ACTIVE(regs->msr)) {
474     if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
475     sigret))
476     @@ -1432,8 +1446,10 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
477     }
478     else
479     #endif
480     - if (save_user_regs(regs, &frame->mctx, sigret, 1))
481     + {
482     + if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
483     goto badframe;
484     + }
485    
486     regs->link = tramp;
487    
488     @@ -1481,16 +1497,22 @@ badframe:
489     long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
490     struct pt_regs *regs)
491     {
492     + struct sigframe __user *sf;
493     struct sigcontext __user *sc;
494     struct sigcontext sigctx;
495     struct mcontext __user *sr;
496     void __user *addr;
497     sigset_t set;
498     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
499     + struct mcontext __user *mcp, *tm_mcp;
500     + unsigned long msr_hi;
501     +#endif
502    
503     /* Always make any pending restarted system calls return -EINTR */
504     current_thread_info()->restart_block.fn = do_no_restart_syscall;
505    
506     - sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
507     + sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
508     + sc = &sf->sctx;
509     addr = sc;
510     if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
511     goto badframe;
512     @@ -1507,11 +1529,25 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
513     #endif
514     set_current_blocked(&set);
515    
516     - sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
517     - addr = sr;
518     - if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
519     - || restore_user_regs(regs, sr, 1))
520     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
521     + mcp = (struct mcontext __user *)&sf->mctx;
522     + tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
523     + if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
524     goto badframe;
525     + if (MSR_TM_ACTIVE(msr_hi<<32)) {
526     + if (!cpu_has_feature(CPU_FTR_TM))
527     + goto badframe;
528     + if (restore_tm_user_regs(regs, mcp, tm_mcp))
529     + goto badframe;
530     + } else
531     +#endif
532     + {
533     + sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
534     + addr = sr;
535     + if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
536     + || restore_user_regs(regs, sr, 1))
537     + goto badframe;
538     + }
539    
540     set_thread_flag(TIF_RESTOREALL);
541     return 0;
542     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
543     index 34594736..887e99d8 100644
544     --- a/arch/powerpc/kernel/signal_64.c
545     +++ b/arch/powerpc/kernel/signal_64.c
546     @@ -410,6 +410,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
547    
548     /* get MSR separately, transfer the LE bit if doing signal return */
549     err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
550     + /* pull in MSR TM from user context */
551     + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
552     +
553     + /* pull in MSR LE from user context */
554     regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
555    
556     /* The following non-GPR non-FPR non-VR state is also checkpointed: */
557     @@ -505,8 +509,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
558     tm_enable();
559     /* This loads the checkpointed FP/VEC state, if used */
560     tm_recheckpoint(&current->thread, msr);
561     - /* The task has moved into TM state S, so ensure MSR reflects this: */
562     - regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33);
563    
564     /* This loads the speculative FP/VEC state, if used */
565     if (msr & MSR_FP) {
566     @@ -654,7 +656,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
567     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
568     if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
569     goto badframe;
570     - if (MSR_TM_SUSPENDED(msr)) {
571     + if (MSR_TM_ACTIVE(msr)) {
572     /* We recheckpoint on return. */
573     struct ucontext __user *uc_transact;
574     if (__get_user(uc_transact, &uc->uc_link))
575     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
576     index c0e5caf8..e4f205a2 100644
577     --- a/arch/powerpc/kernel/traps.c
578     +++ b/arch/powerpc/kernel/traps.c
579     @@ -1282,25 +1282,50 @@ void vsx_unavailable_exception(struct pt_regs *regs)
580     die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
581     }
582    
583     -void tm_unavailable_exception(struct pt_regs *regs)
584     +void facility_unavailable_exception(struct pt_regs *regs)
585     {
586     + static char *facility_strings[] = {
587     + "FPU",
588     + "VMX/VSX",
589     + "DSCR",
590     + "PMU SPRs",
591     + "BHRB",
592     + "TM",
593     + "AT",
594     + "EBB",
595     + "TAR",
596     + };
597     + char *facility, *prefix;
598     + u64 value;
599     +
600     + if (regs->trap == 0xf60) {
601     + value = mfspr(SPRN_FSCR);
602     + prefix = "";
603     + } else {
604     + value = mfspr(SPRN_HFSCR);
605     + prefix = "Hypervisor ";
606     + }
607     +
608     + value = value >> 56;
609     +
610     /* We restore the interrupt state now */
611     if (!arch_irq_disabled_regs(regs))
612     local_irq_enable();
613    
614     - /* Currently we never expect a TMU exception. Catch
615     - * this and kill the process!
616     - */
617     - printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
618     - "(msr %lx)\n",
619     - regs->nip, regs->msr);
620     + if (value < ARRAY_SIZE(facility_strings))
621     + facility = facility_strings[value];
622     + else
623     + facility = "unknown";
624     +
625     + pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
626     + prefix, facility, regs->nip, regs->msr);
627    
628     if (user_mode(regs)) {
629     _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
630     return;
631     }
632    
633     - die("Unexpected TM unavailable exception", regs, SIGABRT);
634     + die("Unexpected facility unavailable exception", regs, SIGABRT);
635     }
636    
637     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
638     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
639     index 88c0425d..2859a1f5 100644
640     --- a/arch/powerpc/mm/numa.c
641     +++ b/arch/powerpc/mm/numa.c
642     @@ -1433,11 +1433,9 @@ static int update_cpu_topology(void *data)
643     if (cpu != update->cpu)
644     continue;
645    
646     - unregister_cpu_under_node(update->cpu, update->old_nid);
647     unmap_cpu_from_node(update->cpu);
648     map_cpu_to_node(update->cpu, update->new_nid);
649     vdso_getcpu_init();
650     - register_cpu_under_node(update->cpu, update->new_nid);
651     }
652    
653     return 0;
654     @@ -1485,6 +1483,9 @@ int arch_update_cpu_topology(void)
655     stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
656    
657     for (ud = &updates[0]; ud; ud = ud->next) {
658     + unregister_cpu_under_node(ud->cpu, ud->old_nid);
659     + register_cpu_under_node(ud->cpu, ud->new_nid);
660     +
661     dev = get_cpu_device(ud->cpu);
662     if (dev)
663     kobject_uevent(&dev->kobj, KOBJ_CHANGE);
664     diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
665     index 29c64828..d3ee2e50 100644
666     --- a/arch/powerpc/perf/core-book3s.c
667     +++ b/arch/powerpc/perf/core-book3s.c
668     @@ -75,6 +75,8 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
669    
670     #define MMCR0_FCHV 0
671     #define MMCR0_PMCjCE MMCR0_PMCnCE
672     +#define MMCR0_FC56 0
673     +#define MMCR0_PMAO 0
674    
675     #define SPRN_MMCRA SPRN_MMCR2
676     #define MMCRA_SAMPLE_ENABLE 0
677     @@ -852,7 +854,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
678     static void power_pmu_disable(struct pmu *pmu)
679     {
680     struct cpu_hw_events *cpuhw;
681     - unsigned long flags;
682     + unsigned long flags, val;
683    
684     if (!ppmu)
685     return;
686     @@ -860,9 +862,6 @@ static void power_pmu_disable(struct pmu *pmu)
687     cpuhw = &__get_cpu_var(cpu_hw_events);
688    
689     if (!cpuhw->disabled) {
690     - cpuhw->disabled = 1;
691     - cpuhw->n_added = 0;
692     -
693     /*
694     * Check if we ever enabled the PMU on this cpu.
695     */
696     @@ -872,6 +871,21 @@ static void power_pmu_disable(struct pmu *pmu)
697     }
698    
699     /*
700     + * Set the 'freeze counters' bit, clear PMAO/FC56.
701     + */
702     + val = mfspr(SPRN_MMCR0);
703     + val |= MMCR0_FC;
704     + val &= ~(MMCR0_PMAO | MMCR0_FC56);
705     +
706     + /*
707     + * The barrier is to make sure the mtspr has been
708     + * executed and the PMU has frozen the events etc.
709     + * before we return.
710     + */
711     + write_mmcr0(cpuhw, val);
712     + mb();
713     +
714     + /*
715     * Disable instruction sampling if it was enabled
716     */
717     if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
718     @@ -880,14 +894,8 @@ static void power_pmu_disable(struct pmu *pmu)
719     mb();
720     }
721    
722     - /*
723     - * Set the 'freeze counters' bit.
724     - * The barrier is to make sure the mtspr has been
725     - * executed and the PMU has frozen the events
726     - * before we return.
727     - */
728     - write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
729     - mb();
730     + cpuhw->disabled = 1;
731     + cpuhw->n_added = 0;
732     }
733     local_irq_restore(flags);
734     }
735     @@ -911,12 +919,18 @@ static void power_pmu_enable(struct pmu *pmu)
736    
737     if (!ppmu)
738     return;
739     +
740     local_irq_save(flags);
741     +
742     cpuhw = &__get_cpu_var(cpu_hw_events);
743     - if (!cpuhw->disabled) {
744     - local_irq_restore(flags);
745     - return;
746     + if (!cpuhw->disabled)
747     + goto out;
748     +
749     + if (cpuhw->n_events == 0) {
750     + ppc_set_pmu_inuse(0);
751     + goto out;
752     }
753     +
754     cpuhw->disabled = 0;
755    
756     /*
757     @@ -928,8 +942,6 @@ static void power_pmu_enable(struct pmu *pmu)
758     if (!cpuhw->n_added) {
759     mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
760     mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
761     - if (cpuhw->n_events == 0)
762     - ppc_set_pmu_inuse(0);
763     goto out_enable;
764     }
765    
766     diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
767     index f7d1c4ff..d59f5b2d 100644
768     --- a/arch/powerpc/perf/power8-pmu.c
769     +++ b/arch/powerpc/perf/power8-pmu.c
770     @@ -109,6 +109,16 @@
771     #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
772     #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
773    
774     +#define EVENT_VALID_MASK \
775     + ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
776     + (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
777     + (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
778     + (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
779     + (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
780     + (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
781     + (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
782     + EVENT_PSEL_MASK)
783     +
784     /* MMCRA IFM bits - POWER8 */
785     #define POWER8_MMCRA_IFM1 0x0000000040000000UL
786     #define POWER8_MMCRA_IFM2 0x0000000080000000UL
787     @@ -212,6 +222,9 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
788    
789     mask = value = 0;
790    
791     + if (event & ~EVENT_VALID_MASK)
792     + return -1;
793     +
794     pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
795     unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
796     cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
797     @@ -378,6 +391,10 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
798     if (pmc_inuse & 0x7c)
799     mmcr[0] |= MMCR0_PMCjCE;
800    
801     + /* If we're not using PMC 5 or 6, freeze them */
802     + if (!(pmc_inuse & 0x60))
803     + mmcr[0] |= MMCR0_FC56;
804     +
805     mmcr[1] = mmcr1;
806     mmcr[2] = mmcra;
807    
808     diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
809     index 9c9d15e4..7816beff 100644
810     --- a/arch/powerpc/platforms/powernv/pci-ioda.c
811     +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
812     @@ -441,6 +441,17 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
813     set_iommu_table_base(&pdev->dev, &pe->tce32_table);
814     }
815    
816     +static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
817     +{
818     + struct pci_dev *dev;
819     +
820     + list_for_each_entry(dev, &bus->devices, bus_list) {
821     + set_iommu_table_base(&dev->dev, &pe->tce32_table);
822     + if (dev->subordinate)
823     + pnv_ioda_setup_bus_dma(pe, dev->subordinate);
824     + }
825     +}
826     +
827     static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
828     u64 *startp, u64 *endp)
829     {
830     @@ -596,6 +607,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
831     }
832     iommu_init_table(tbl, phb->hose->node);
833    
834     + if (pe->pdev)
835     + set_iommu_table_base(&pe->pdev->dev, tbl);
836     + else
837     + pnv_ioda_setup_bus_dma(pe, pe->pbus);
838     +
839     return;
840     fail:
841     /* XXX Failure: Try to fallback to 64-bit only ? */
842     @@ -667,6 +683,11 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
843     }
844     iommu_init_table(tbl, phb->hose->node);
845    
846     + if (pe->pdev)
847     + set_iommu_table_base(&pe->pdev->dev, tbl);
848     + else
849     + pnv_ioda_setup_bus_dma(pe, pe->pbus);
850     +
851     return;
852     fail:
853     if (pe->tce32_seg >= 0)
854     diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
855     index ef12c0e6..7d740ebb 100644
856     --- a/arch/xtensa/kernel/head.S
857     +++ b/arch/xtensa/kernel/head.S
858     @@ -68,6 +68,15 @@ _SetupMMU:
859    
860     #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
861     initialize_mmu
862     +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
863     + rsr a2, excsave1
864     + movi a3, 0x08000000
865     + bgeu a2, a3, 1f
866     + movi a3, 0xd0000000
867     + add a2, a2, a3
868     + wsr a2, excsave1
869     +1:
870     +#endif
871     #endif
872     .end no-absolute-literals
873    
874     diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
875     index 6dd25ecd..14c6c3a6 100644
876     --- a/arch/xtensa/kernel/setup.c
877     +++ b/arch/xtensa/kernel/setup.c
878     @@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
879     {
880     meminfo_t* mi;
881     mi = (meminfo_t*)(tag->data);
882     - initrd_start = (void*)(mi->start);
883     - initrd_end = (void*)(mi->end);
884     + initrd_start = __va(mi->start);
885     + initrd_end = __va(mi->end);
886    
887     return 0;
888     }
889     @@ -164,7 +164,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
890    
891     static int __init parse_tag_fdt(const bp_tag_t *tag)
892     {
893     - dtb_start = (void *)(tag->data[0]);
894     + dtb_start = __va(tag->data[0]);
895     return 0;
896     }
897    
898     diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
899     index 9a8a674e..8eae6590 100644
900     --- a/drivers/ata/ata_piix.c
901     +++ b/drivers/ata/ata_piix.c
902     @@ -338,6 +338,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
903     /* SATA Controller IDE (BayTrail) */
904     { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
905     { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
906     + /* SATA Controller IDE (Coleto Creek) */
907     + { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
908    
909     { } /* terminate list */
910     };
911     diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
912     index 61c59ee4..1c41722b 100644
913     --- a/drivers/ata/libata-pmp.c
914     +++ b/drivers/ata/libata-pmp.c
915     @@ -389,9 +389,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
916     /* link reports offline after LPM */
917     link->flags |= ATA_LFLAG_NO_LPM;
918    
919     - /* Class code report is unreliable. */
920     + /*
921     + * Class code report is unreliable and SRST times
922     + * out under certain configurations.
923     + */
924     if (link->pmp < 5)
925     - link->flags |= ATA_LFLAG_ASSUME_ATA;
926     + link->flags |= ATA_LFLAG_NO_SRST |
927     + ATA_LFLAG_ASSUME_ATA;
928    
929     /* port 5 is for SEMB device and it doesn't like SRST */
930     if (link->pmp == 5)
931     @@ -399,20 +403,17 @@ static void sata_pmp_quirks(struct ata_port *ap)
932     ATA_LFLAG_ASSUME_SEMB;
933     }
934     } else if (vendor == 0x1095 && devid == 0x4723) {
935     - /* sil4723 quirks */
936     - ata_for_each_link(link, ap, EDGE) {
937     - /* link reports offline after LPM */
938     - link->flags |= ATA_LFLAG_NO_LPM;
939     -
940     - /* class code report is unreliable */
941     - if (link->pmp < 2)
942     - link->flags |= ATA_LFLAG_ASSUME_ATA;
943     -
944     - /* the config device at port 2 locks up on SRST */
945     - if (link->pmp == 2)
946     - link->flags |= ATA_LFLAG_NO_SRST |
947     - ATA_LFLAG_ASSUME_ATA;
948     - }
949     + /*
950     + * sil4723 quirks
951     + *
952     + * Link reports offline after LPM. Class code report is
953     + * unreliable. SIMG PMPs never got SRST reliable and the
954     + * config device at port 2 locks up on SRST.
955     + */
956     + ata_for_each_link(link, ap, EDGE)
957     + link->flags |= ATA_LFLAG_NO_LPM |
958     + ATA_LFLAG_NO_SRST |
959     + ATA_LFLAG_ASSUME_ATA;
960     } else if (vendor == 0x1095 && devid == 0x4726) {
961     /* sil4726 quirks */
962     ata_for_each_link(link, ap, EDGE) {
963     diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
964     index 90b159b7..cd8daf47 100644
965     --- a/drivers/ata/libata-zpodd.c
966     +++ b/drivers/ata/libata-zpodd.c
967     @@ -32,13 +32,14 @@ struct zpodd {
968    
969     static int eject_tray(struct ata_device *dev)
970     {
971     - struct ata_taskfile tf = {};
972     + struct ata_taskfile tf;
973     const char cdb[] = { GPCMD_START_STOP_UNIT,
974     0, 0, 0,
975     0x02, /* LoEj */
976     0, 0, 0, 0, 0, 0, 0,
977     };
978    
979     + ata_tf_init(dev, &tf);
980     tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
981     tf.command = ATA_CMD_PACKET;
982     tf.protocol = ATAPI_PROT_NODATA;
983     @@ -52,8 +53,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
984     char buf[16];
985     unsigned int ret;
986     struct rm_feature_desc *desc = (void *)(buf + 8);
987     - struct ata_taskfile tf = {};
988     -
989     + struct ata_taskfile tf;
990     char cdb[] = { GPCMD_GET_CONFIGURATION,
991     2, /* only 1 feature descriptor requested */
992     0, 3, /* 3, removable medium feature */
993     @@ -62,6 +62,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
994     0, 0, 0,
995     };
996    
997     + ata_tf_init(dev, &tf);
998     tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
999     tf.command = ATA_CMD_PACKET;
1000     tf.protocol = ATAPI_PROT_PIO;
1001     diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
1002     index b20aa96b..c846fd3c 100644
1003     --- a/drivers/ata/sata_highbank.c
1004     +++ b/drivers/ata/sata_highbank.c
1005     @@ -196,10 +196,26 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
1006     return 0;
1007     }
1008    
1009     +/*
1010     + * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
1011     + * Retrying the phy hard reset can work around the issue, but the drive
1012     + * may fail again. In less than 150 out of 15000 test runs, it took more
1013     + * than 10 tries for the link to be established (but never more than 35).
1014     + * Triple the maximum observed retry count to provide plenty of margin for
1015     + * rare events and to guarantee that the link is established.
1016     + *
1017     + * Also, the default 2 second time-out on a failed drive is too long in
1018     + * this situation. The uboot implementation of the same driver function
1019     + * uses a much shorter time-out period and never experiences a time out
1020     + * issue. Reducing the time-out to 500ms improves the responsiveness.
1021     + * The other timing constants were kept the same as the stock AHCI driver.
1022     + * This change was also tested 15000 times on 24 drives and none of them
1023     + * experienced a time out.
1024     + */
1025     static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
1026     unsigned long deadline)
1027     {
1028     - const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1029     + static const unsigned long timing[] = { 5, 100, 500};
1030     struct ata_port *ap = link->ap;
1031     struct ahci_port_priv *pp = ap->private_data;
1032     u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1033     @@ -207,7 +223,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
1034     bool online;
1035     u32 sstatus;
1036     int rc;
1037     - int retry = 10;
1038     + int retry = 100;
1039    
1040     ahci_stop_engine(ap);
1041    
1042     diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
1043     index ab09ed37..6b02eddc 100644
1044     --- a/drivers/clocksource/dw_apb_timer_of.c
1045     +++ b/drivers/clocksource/dw_apb_timer_of.c
1046     @@ -44,7 +44,7 @@ static void add_clockevent(struct device_node *event_timer)
1047     u32 irq, rate;
1048    
1049     irq = irq_of_parse_and_map(event_timer, 0);
1050     - if (irq == NO_IRQ)
1051     + if (irq == 0)
1052     panic("No IRQ for clock event timer");
1053    
1054     timer_get_base_and_rate(event_timer, &iobase, &rate);
1055     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1056     index 2d53f47d..178fe7a6 100644
1057     --- a/drivers/cpufreq/cpufreq.c
1058     +++ b/drivers/cpufreq/cpufreq.c
1059     @@ -1837,13 +1837,15 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1060     if (dev) {
1061     switch (action) {
1062     case CPU_ONLINE:
1063     + case CPU_ONLINE_FROZEN:
1064     cpufreq_add_dev(dev, NULL);
1065     break;
1066     case CPU_DOWN_PREPARE:
1067     - case CPU_UP_CANCELED_FROZEN:
1068     + case CPU_DOWN_PREPARE_FROZEN:
1069     __cpufreq_remove_dev(dev, NULL);
1070     break;
1071     case CPU_DOWN_FAILED:
1072     + case CPU_DOWN_FAILED_FROZEN:
1073     cpufreq_add_dev(dev, NULL);
1074     break;
1075     }
1076     diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1077     index dc9b72e2..5af40ad8 100644
1078     --- a/drivers/cpufreq/cpufreq_governor.c
1079     +++ b/drivers/cpufreq/cpufreq_governor.c
1080     @@ -26,7 +26,6 @@
1081     #include <linux/tick.h>
1082     #include <linux/types.h>
1083     #include <linux/workqueue.h>
1084     -#include <linux/cpu.h>
1085    
1086     #include "cpufreq_governor.h"
1087    
1088     @@ -181,10 +180,8 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
1089     if (!all_cpus) {
1090     __gov_queue_work(smp_processor_id(), dbs_data, delay);
1091     } else {
1092     - get_online_cpus();
1093     for_each_cpu(i, policy->cpus)
1094     __gov_queue_work(i, dbs_data, delay);
1095     - put_online_cpus();
1096     }
1097     }
1098     EXPORT_SYMBOL_GPL(gov_queue_work);
1099     diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
1100     index 591b6fb6..bfd6273f 100644
1101     --- a/drivers/cpufreq/cpufreq_stats.c
1102     +++ b/drivers/cpufreq/cpufreq_stats.c
1103     @@ -353,13 +353,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
1104     cpufreq_update_policy(cpu);
1105     break;
1106     case CPU_DOWN_PREPARE:
1107     + case CPU_DOWN_PREPARE_FROZEN:
1108     cpufreq_stats_free_sysfs(cpu);
1109     break;
1110     case CPU_DEAD:
1111     - cpufreq_stats_free_table(cpu);
1112     - break;
1113     - case CPU_UP_CANCELED_FROZEN:
1114     - cpufreq_stats_free_sysfs(cpu);
1115     + case CPU_DEAD_FROZEN:
1116     cpufreq_stats_free_table(cpu);
1117     break;
1118     }
1119     diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
1120     index cf919e36..239ef30f 100644
1121     --- a/drivers/gpu/drm/drm_gem.c
1122     +++ b/drivers/gpu/drm/drm_gem.c
1123     @@ -453,25 +453,21 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
1124     spin_lock(&dev->object_name_lock);
1125     if (!obj->name) {
1126     ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
1127     - obj->name = ret;
1128     - args->name = (uint64_t) obj->name;
1129     - spin_unlock(&dev->object_name_lock);
1130     - idr_preload_end();
1131     -
1132     if (ret < 0)
1133     goto err;
1134     - ret = 0;
1135     +
1136     + obj->name = ret;
1137    
1138     /* Allocate a reference for the name table. */
1139     drm_gem_object_reference(obj);
1140     - } else {
1141     - args->name = (uint64_t) obj->name;
1142     - spin_unlock(&dev->object_name_lock);
1143     - idr_preload_end();
1144     - ret = 0;
1145     }
1146    
1147     + args->name = (uint64_t) obj->name;
1148     + ret = 0;
1149     +
1150     err:
1151     + spin_unlock(&dev->object_name_lock);
1152     + idr_preload_end();
1153     drm_gem_object_unreference_unlocked(obj);
1154     return ret;
1155     }
1156     diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1157     index 9e35dafc..34118b0c 100644
1158     --- a/drivers/gpu/drm/i915/i915_gem.c
1159     +++ b/drivers/gpu/drm/i915/i915_gem.c
1160     @@ -1160,7 +1160,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1161     /* Manually manage the write flush as we may have not yet
1162     * retired the buffer.
1163     */
1164     - if (obj->last_write_seqno &&
1165     + if (ret == 0 &&
1166     + obj->last_write_seqno &&
1167     i915_seqno_passed(seqno, obj->last_write_seqno)) {
1168     obj->last_write_seqno = 0;
1169     obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1170     diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
1171     index a1e8ecb6..3bc8a58a 100644
1172     --- a/drivers/gpu/drm/i915/i915_gem_context.c
1173     +++ b/drivers/gpu/drm/i915/i915_gem_context.c
1174     @@ -113,7 +113,7 @@ static int get_context_size(struct drm_device *dev)
1175     case 7:
1176     reg = I915_READ(GEN7_CXT_SIZE);
1177     if (IS_HASWELL(dev))
1178     - ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
1179     + ret = HSW_CXT_TOTAL_SIZE;
1180     else
1181     ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
1182     break;
1183     diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1184     index 0aa2ef0d..e5e32869 100644
1185     --- a/drivers/gpu/drm/i915/i915_irq.c
1186     +++ b/drivers/gpu/drm/i915/i915_irq.c
1187     @@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
1188     [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
1189     };
1190    
1191     -static const u32 hpd_status_i965[] = {
1192     - [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
1193     - [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
1194     - [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
1195     - [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
1196     - [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
1197     - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
1198     -};
1199     -
1200     static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
1201     [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
1202     [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
1203     @@ -2952,13 +2943,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
1204     u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1205     u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
1206     HOTPLUG_INT_STATUS_G4X :
1207     - HOTPLUG_INT_STATUS_I965);
1208     + HOTPLUG_INT_STATUS_I915);
1209    
1210     DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1211     hotplug_status);
1212     if (hotplug_trigger) {
1213     if (hotplug_irq_storm_detect(dev, hotplug_trigger,
1214     - IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
1215     + IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
1216     i915_hpd_irq_setup(dev);
1217     queue_work(dev_priv->wq,
1218     &dev_priv->hotplug_work);
1219     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1220     index 2d6b62e4..80b0a662 100644
1221     --- a/drivers/gpu/drm/i915/i915_reg.h
1222     +++ b/drivers/gpu/drm/i915/i915_reg.h
1223     @@ -1535,14 +1535,13 @@
1224     GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1225     GEN7_CXT_GT1_SIZE(ctx_reg) + \
1226     GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1227     -#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
1228     -#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
1229     -#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
1230     -#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
1231     - HSW_CXT_RING_SIZE(ctx_reg) + \
1232     - HSW_CXT_RENDER_SIZE(ctx_reg) + \
1233     - GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1234     -
1235     +/* Haswell does have the CXT_SIZE register however it does not appear to be
1236     + * valid. Now, docs explain in dwords what is in the context object. The full
1237     + * size is 70720 bytes, however, the power context and execlist context will
1238     + * never be saved (power context is stored elsewhere, and execlists don't work
1239     + * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
1240     + */
1241     +#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1242    
1243     /*
1244     * Overlay regs
1245     @@ -1691,6 +1690,12 @@
1246     /* SDVO is different across gen3/4 */
1247     #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
1248     #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
1249     +/*
1250     + * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
1251     + * since reality corrobates that they're the same as on gen3. But keep these
1252     + * bits here (and the comment!) to help any other lost wanderers back onto the
1253     + * right tracks.
1254     + */
1255     #define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
1256     #define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
1257     #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
1258     @@ -1702,13 +1707,6 @@
1259     PORTC_HOTPLUG_INT_STATUS | \
1260     PORTD_HOTPLUG_INT_STATUS)
1261    
1262     -#define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \
1263     - SDVOB_HOTPLUG_INT_STATUS_I965 | \
1264     - SDVOC_HOTPLUG_INT_STATUS_I965 | \
1265     - PORTB_HOTPLUG_INT_STATUS | \
1266     - PORTC_HOTPLUG_INT_STATUS | \
1267     - PORTD_HOTPLUG_INT_STATUS)
1268     -
1269     #define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
1270     SDVOB_HOTPLUG_INT_STATUS_I915 | \
1271     SDVOC_HOTPLUG_INT_STATUS_I915 | \
1272     diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
1273     index bf29b2f4..988911af 100644
1274     --- a/drivers/gpu/drm/mgag200/mgag200_drv.h
1275     +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
1276     @@ -198,7 +198,8 @@ struct mga_device {
1277     struct ttm_bo_device bdev;
1278     } ttm;
1279    
1280     - u32 reg_1e24; /* SE model number */
1281     + /* SE model number stored in reg 0x1e24 */
1282     + u32 unique_rev_id;
1283     };
1284    
1285    
1286     diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
1287     index 99059237..dafe049f 100644
1288     --- a/drivers/gpu/drm/mgag200/mgag200_main.c
1289     +++ b/drivers/gpu/drm/mgag200/mgag200_main.c
1290     @@ -176,7 +176,7 @@ static int mgag200_device_init(struct drm_device *dev,
1291    
1292     /* stash G200 SE model number for later use */
1293     if (IS_G200_SE(mdev))
1294     - mdev->reg_1e24 = RREG32(0x1e24);
1295     + mdev->unique_rev_id = RREG32(0x1e24);
1296    
1297     ret = mga_vram_init(mdev);
1298     if (ret)
1299     diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1300     index ee66badc..99e07b68 100644
1301     --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1302     +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1303     @@ -1008,7 +1008,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1304    
1305    
1306     if (IS_G200_SE(mdev)) {
1307     - if (mdev->reg_1e24 >= 0x02) {
1308     + if (mdev->unique_rev_id >= 0x02) {
1309     u8 hi_pri_lvl;
1310     u32 bpp;
1311     u32 mb;
1312     @@ -1038,7 +1038,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
1313     WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
1314     } else {
1315     WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
1316     - if (mdev->reg_1e24 >= 0x01)
1317     + if (mdev->unique_rev_id >= 0x01)
1318     WREG8(MGAREG_CRTCEXT_DATA, 0x03);
1319     else
1320     WREG8(MGAREG_CRTCEXT_DATA, 0x04);
1321     @@ -1410,6 +1410,32 @@ static int mga_vga_get_modes(struct drm_connector *connector)
1322     return ret;
1323     }
1324    
1325     +static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
1326     + int bits_per_pixel)
1327     +{
1328     + uint32_t total_area, divisor;
1329     + int64_t active_area, pixels_per_second, bandwidth;
1330     + uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
1331     +
1332     + divisor = 1024;
1333     +
1334     + if (!mode->htotal || !mode->vtotal || !mode->clock)
1335     + return 0;
1336     +
1337     + active_area = mode->hdisplay * mode->vdisplay;
1338     + total_area = mode->htotal * mode->vtotal;
1339     +
1340     + pixels_per_second = active_area * mode->clock * 1000;
1341     + do_div(pixels_per_second, total_area);
1342     +
1343     + bandwidth = pixels_per_second * bytes_per_pixel * 100;
1344     + do_div(bandwidth, divisor);
1345     +
1346     + return (uint32_t)(bandwidth);
1347     +}
1348     +
1349     +#define MODE_BANDWIDTH MODE_BAD
1350     +
1351     static int mga_vga_mode_valid(struct drm_connector *connector,
1352     struct drm_display_mode *mode)
1353     {
1354     @@ -1421,7 +1447,45 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1355     int bpp = 32;
1356     int i = 0;
1357    
1358     - /* FIXME: Add bandwidth and g200se limitations */
1359     + if (IS_G200_SE(mdev)) {
1360     + if (mdev->unique_rev_id == 0x01) {
1361     + if (mode->hdisplay > 1600)
1362     + return MODE_VIRTUAL_X;
1363     + if (mode->vdisplay > 1200)
1364     + return MODE_VIRTUAL_Y;
1365     + if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1366     + > (24400 * 1024))
1367     + return MODE_BANDWIDTH;
1368     + } else if (mdev->unique_rev_id >= 0x02) {
1369     + if (mode->hdisplay > 1920)
1370     + return MODE_VIRTUAL_X;
1371     + if (mode->vdisplay > 1200)
1372     + return MODE_VIRTUAL_Y;
1373     + if (mga_vga_calculate_mode_bandwidth(mode, bpp)
1374     + > (30100 * 1024))
1375     + return MODE_BANDWIDTH;
1376     + }
1377     + } else if (mdev->type == G200_WB) {
1378     + if (mode->hdisplay > 1280)
1379     + return MODE_VIRTUAL_X;
1380     + if (mode->vdisplay > 1024)
1381     + return MODE_VIRTUAL_Y;
1382     + if (mga_vga_calculate_mode_bandwidth(mode,
1383     + bpp > (31877 * 1024)))
1384     + return MODE_BANDWIDTH;
1385     + } else if (mdev->type == G200_EV &&
1386     + (mga_vga_calculate_mode_bandwidth(mode, bpp)
1387     + > (32700 * 1024))) {
1388     + return MODE_BANDWIDTH;
1389     + } else if (mode->type == G200_EH &&
1390     + (mga_vga_calculate_mode_bandwidth(mode, bpp)
1391     + > (37500 * 1024))) {
1392     + return MODE_BANDWIDTH;
1393     + } else if (mode->type == G200_ER &&
1394     + (mga_vga_calculate_mode_bandwidth(mode,
1395     + bpp) > (55000 * 1024))) {
1396     + return MODE_BANDWIDTH;
1397     + }
1398    
1399     if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
1400     mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
1401     diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
1402     index f065fc24..db8c6fd4 100644
1403     --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
1404     +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
1405     @@ -55,6 +55,10 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
1406     nv_wr32(priv, 0x61c510 + soff, 0x00000000);
1407     nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
1408    
1409     + nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
1410     + nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
1411     + nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
1412     +
1413     /* ??? */
1414     nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
1415     nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
1416     diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1417     index 6a38402f..5680d3eb 100644
1418     --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1419     +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1420     @@ -1107,6 +1107,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1421     u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1422     u32 hval, hreg = 0x614200 + (head * 0x800);
1423     u32 oval, oreg;
1424     + u32 mask;
1425     u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
1426     if (conf != ~0) {
1427     if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1428     @@ -1133,6 +1134,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1429     oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
1430     oval = 0x00000000;
1431     hval = 0x00000000;
1432     + mask = 0xffffffff;
1433     } else
1434     if (!outp.location) {
1435     if (outp.type == DCB_OUTPUT_DP)
1436     @@ -1140,14 +1142,16 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1437     oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
1438     oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1439     hval = 0x00000000;
1440     + mask = 0x00000707;
1441     } else {
1442     oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
1443     oval = 0x00000001;
1444     hval = 0x00000001;
1445     + mask = 0x00000707;
1446     }
1447    
1448     nv_mask(priv, hreg, 0x0000000f, hval);
1449     - nv_mask(priv, oreg, 0x00000707, oval);
1450     + nv_mask(priv, oreg, mask, oval);
1451     }
1452     }
1453    
1454     diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
1455     index 77c67fc9..e66fb771 100644
1456     --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
1457     +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
1458     @@ -362,7 +362,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
1459     vm->fpde = offset >> (vmm->pgt_bits + 12);
1460     vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
1461    
1462     - vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
1463     + vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
1464     if (!vm->pgt) {
1465     kfree(vm);
1466     return -ENOMEM;
1467     @@ -371,7 +371,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
1468     ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
1469     block >> 12);
1470     if (ret) {
1471     - kfree(vm->pgt);
1472     + vfree(vm->pgt);
1473     kfree(vm);
1474     return ret;
1475     }
1476     @@ -446,7 +446,7 @@ nouveau_vm_del(struct nouveau_vm *vm)
1477     }
1478    
1479     nouveau_mm_fini(&vm->mm);
1480     - kfree(vm->pgt);
1481     + vfree(vm->pgt);
1482     kfree(vm);
1483     }
1484    
1485     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1486     index 8406c825..4120d355 100644
1487     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1488     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1489     @@ -186,6 +186,13 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
1490     u8 backlight_level;
1491     char bl_name[16];
1492    
1493     + /* Mac laptops with multiple GPUs use the gmux driver for backlight
1494     + * so don't register a backlight device
1495     + */
1496     + if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
1497     + (rdev->pdev->device == 0x6741))
1498     + return;
1499     +
1500     if (!radeon_encoder->enc_priv)
1501     return;
1502    
1503     diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1504     index ed7c8a76..b9c6f767 100644
1505     --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
1506     +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1507     @@ -128,14 +128,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
1508     struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1509     uint32_t offset = dig->afmt->offset;
1510     uint8_t *frame = buffer + 3;
1511     -
1512     - /* Our header values (type, version, length) should be alright, Intel
1513     - * is using the same. Checksum function also seems to be OK, it works
1514     - * fine for audio infoframe. However calculated value is always lower
1515     - * by 2 in comparison to fglrx. It breaks displaying anything in case
1516     - * of TVs that strictly check the checksum. Hack it manually here to
1517     - * workaround this issue. */
1518     - frame[0x0] += 2;
1519     + uint8_t *header = buffer;
1520    
1521     WREG32(AFMT_AVI_INFO0 + offset,
1522     frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1523     @@ -144,7 +137,7 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
1524     WREG32(AFMT_AVI_INFO2 + offset,
1525     frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1526     WREG32(AFMT_AVI_INFO3 + offset,
1527     - frame[0xC] | (frame[0xD] << 8));
1528     + frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1529     }
1530    
1531     static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1532     diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1533     index 456750a0..e73b2a73 100644
1534     --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1535     +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1536     @@ -133,14 +133,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
1537     struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1538     uint32_t offset = dig->afmt->offset;
1539     uint8_t *frame = buffer + 3;
1540     -
1541     - /* Our header values (type, version, length) should be alright, Intel
1542     - * is using the same. Checksum function also seems to be OK, it works
1543     - * fine for audio infoframe. However calculated value is always lower
1544     - * by 2 in comparison to fglrx. It breaks displaying anything in case
1545     - * of TVs that strictly check the checksum. Hack it manually here to
1546     - * workaround this issue. */
1547     - frame[0x0] += 2;
1548     + uint8_t *header = buffer;
1549    
1550     WREG32(HDMI0_AVI_INFO0 + offset,
1551     frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1552     @@ -149,7 +142,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
1553     WREG32(HDMI0_AVI_INFO2 + offset,
1554     frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1555     WREG32(HDMI0_AVI_INFO3 + offset,
1556     - frame[0xC] | (frame[0xD] << 8));
1557     + frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1558     }
1559    
1560     /*
1561     diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1562     index 04638aee..99cec182 100644
1563     --- a/drivers/hwmon/nct6775.c
1564     +++ b/drivers/hwmon/nct6775.c
1565     @@ -199,7 +199,7 @@ static const s8 NCT6775_ALARM_BITS[] = {
1566     0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
1567     17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
1568     -1, /* unused */
1569     - 6, 7, 11, 10, 23, /* fan1..fan5 */
1570     + 6, 7, 11, -1, -1, /* fan1..fan5 */
1571     -1, -1, -1, /* unused */
1572     4, 5, 13, -1, -1, -1, /* temp1..temp6 */
1573     12, -1 }; /* intrusion0, intrusion1 */
1574     @@ -625,6 +625,7 @@ struct nct6775_data {
1575     u8 has_fan_min; /* some fans don't have min register */
1576     bool has_fan_div;
1577    
1578     + u8 num_temp_alarms; /* 2 or 3 */
1579     u8 temp_fixed_num; /* 3 or 6 */
1580     u8 temp_type[NUM_TEMP_FIXED];
1581     s8 temp_offset[NUM_TEMP_FIXED];
1582     @@ -1193,6 +1194,42 @@ show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
1583     (unsigned int)((data->alarms >> nr) & 0x01));
1584     }
1585    
1586     +static int find_temp_source(struct nct6775_data *data, int index, int count)
1587     +{
1588     + int source = data->temp_src[index];
1589     + int nr;
1590     +
1591     + for (nr = 0; nr < count; nr++) {
1592     + int src;
1593     +
1594     + src = nct6775_read_value(data,
1595     + data->REG_TEMP_SOURCE[nr]) & 0x1f;
1596     + if (src == source)
1597     + return nr;
1598     + }
1599     + return -1;
1600     +}
1601     +
1602     +static ssize_t
1603     +show_temp_alarm(struct device *dev, struct device_attribute *attr, char *buf)
1604     +{
1605     + struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
1606     + struct nct6775_data *data = nct6775_update_device(dev);
1607     + unsigned int alarm = 0;
1608     + int nr;
1609     +
1610     + /*
1611     + * For temperatures, there is no fixed mapping from registers to alarm
1612     + * bits. Alarm bits are determined by the temperature source mapping.
1613     + */
1614     + nr = find_temp_source(data, sattr->index, data->num_temp_alarms);
1615     + if (nr >= 0) {
1616     + int bit = data->ALARM_BITS[nr + TEMP_ALARM_BASE];
1617     + alarm = (data->alarms >> bit) & 0x01;
1618     + }
1619     + return sprintf(buf, "%u\n", alarm);
1620     +}
1621     +
1622     static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in_reg, NULL, 0, 0);
1623     static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in_reg, NULL, 1, 0);
1624     static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in_reg, NULL, 2, 0);
1625     @@ -1874,22 +1911,18 @@ static struct sensor_device_attribute sda_temp_type[] = {
1626     };
1627    
1628     static struct sensor_device_attribute sda_temp_alarm[] = {
1629     - SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
1630     - TEMP_ALARM_BASE),
1631     - SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
1632     - TEMP_ALARM_BASE + 1),
1633     - SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
1634     - TEMP_ALARM_BASE + 2),
1635     - SENSOR_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL,
1636     - TEMP_ALARM_BASE + 3),
1637     - SENSOR_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL,
1638     - TEMP_ALARM_BASE + 4),
1639     - SENSOR_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL,
1640     - TEMP_ALARM_BASE + 5),
1641     + SENSOR_ATTR(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0),
1642     + SENSOR_ATTR(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 1),
1643     + SENSOR_ATTR(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 2),
1644     + SENSOR_ATTR(temp4_alarm, S_IRUGO, show_temp_alarm, NULL, 3),
1645     + SENSOR_ATTR(temp5_alarm, S_IRUGO, show_temp_alarm, NULL, 4),
1646     + SENSOR_ATTR(temp6_alarm, S_IRUGO, show_temp_alarm, NULL, 5),
1647     + SENSOR_ATTR(temp7_alarm, S_IRUGO, show_temp_alarm, NULL, 6),
1648     + SENSOR_ATTR(temp8_alarm, S_IRUGO, show_temp_alarm, NULL, 7),
1649     + SENSOR_ATTR(temp9_alarm, S_IRUGO, show_temp_alarm, NULL, 8),
1650     + SENSOR_ATTR(temp10_alarm, S_IRUGO, show_temp_alarm, NULL, 9),
1651     };
1652    
1653     -#define NUM_TEMP_ALARM ARRAY_SIZE(sda_temp_alarm)
1654     -
1655     static ssize_t
1656     show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
1657     {
1658     @@ -3215,13 +3248,11 @@ static void nct6775_device_remove_files(struct device *dev)
1659     device_remove_file(dev, &sda_temp_max[i].dev_attr);
1660     device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
1661     device_remove_file(dev, &sda_temp_crit[i].dev_attr);
1662     + device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
1663     if (!(data->have_temp_fixed & (1 << i)))
1664     continue;
1665     device_remove_file(dev, &sda_temp_type[i].dev_attr);
1666     device_remove_file(dev, &sda_temp_offset[i].dev_attr);
1667     - if (i >= NUM_TEMP_ALARM)
1668     - continue;
1669     - device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
1670     }
1671    
1672     device_remove_file(dev, &sda_caseopen[0].dev_attr);
1673     @@ -3419,6 +3450,7 @@ static int nct6775_probe(struct platform_device *pdev)
1674     data->auto_pwm_num = 6;
1675     data->has_fan_div = true;
1676     data->temp_fixed_num = 3;
1677     + data->num_temp_alarms = 3;
1678    
1679     data->ALARM_BITS = NCT6775_ALARM_BITS;
1680    
1681     @@ -3483,6 +3515,7 @@ static int nct6775_probe(struct platform_device *pdev)
1682     data->auto_pwm_num = 4;
1683     data->has_fan_div = false;
1684     data->temp_fixed_num = 3;
1685     + data->num_temp_alarms = 3;
1686    
1687     data->ALARM_BITS = NCT6776_ALARM_BITS;
1688    
1689     @@ -3547,6 +3580,7 @@ static int nct6775_probe(struct platform_device *pdev)
1690     data->auto_pwm_num = 4;
1691     data->has_fan_div = false;
1692     data->temp_fixed_num = 6;
1693     + data->num_temp_alarms = 2;
1694    
1695     data->ALARM_BITS = NCT6779_ALARM_BITS;
1696    
1697     @@ -3843,10 +3877,12 @@ static int nct6775_probe(struct platform_device *pdev)
1698     &sda_fan_input[i].dev_attr);
1699     if (err)
1700     goto exit_remove;
1701     - err = device_create_file(dev,
1702     - &sda_fan_alarm[i].dev_attr);
1703     - if (err)
1704     - goto exit_remove;
1705     + if (data->ALARM_BITS[FAN_ALARM_BASE + i] >= 0) {
1706     + err = device_create_file(dev,
1707     + &sda_fan_alarm[i].dev_attr);
1708     + if (err)
1709     + goto exit_remove;
1710     + }
1711     if (data->kind != nct6776 &&
1712     data->kind != nct6779) {
1713     err = device_create_file(dev,
1714     @@ -3897,6 +3933,12 @@ static int nct6775_probe(struct platform_device *pdev)
1715     if (err)
1716     goto exit_remove;
1717     }
1718     + if (find_temp_source(data, i, data->num_temp_alarms) >= 0) {
1719     + err = device_create_file(dev,
1720     + &sda_temp_alarm[i].dev_attr);
1721     + if (err)
1722     + goto exit_remove;
1723     + }
1724     if (!(data->have_temp_fixed & (1 << i)))
1725     continue;
1726     err = device_create_file(dev, &sda_temp_type[i].dev_attr);
1727     @@ -3905,12 +3947,6 @@ static int nct6775_probe(struct platform_device *pdev)
1728     err = device_create_file(dev, &sda_temp_offset[i].dev_attr);
1729     if (err)
1730     goto exit_remove;
1731     - if (i >= NUM_TEMP_ALARM ||
1732     - data->ALARM_BITS[TEMP_ALARM_BASE + i] < 0)
1733     - continue;
1734     - err = device_create_file(dev, &sda_temp_alarm[i].dev_attr);
1735     - if (err)
1736     - goto exit_remove;
1737     }
1738    
1739     for (i = 0; i < ARRAY_SIZE(sda_caseopen); i++) {
1740     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
1741     index 631736e2..4faf02b3 100644
1742     --- a/drivers/i2c/busses/Kconfig
1743     +++ b/drivers/i2c/busses/Kconfig
1744     @@ -150,6 +150,7 @@ config I2C_PIIX4
1745     ATI SB700/SP5100
1746     ATI SB800
1747     AMD Hudson-2
1748     + AMD CZ
1749     Serverworks OSB4
1750     Serverworks CSB5
1751     Serverworks CSB6
1752     diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
1753     index 39ab78c1..d05ad590 100644
1754     --- a/drivers/i2c/busses/i2c-piix4.c
1755     +++ b/drivers/i2c/busses/i2c-piix4.c
1756     @@ -22,7 +22,7 @@
1757     Intel PIIX4, 440MX
1758     Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
1759     ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
1760     - AMD Hudson-2
1761     + AMD Hudson-2, CZ
1762     SMSC Victory66
1763    
1764     Note: we assume there can only be one device, with one or more
1765     @@ -522,6 +522,7 @@ static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
1766     { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
1767     { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
1768     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
1769     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
1770     { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
1771     PCI_DEVICE_ID_SERVERWORKS_OSB4) },
1772     { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
1773     diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
1774     index 98ddc323..0cf5f8e0 100644
1775     --- a/drivers/iio/inkern.c
1776     +++ b/drivers/iio/inkern.c
1777     @@ -451,7 +451,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
1778     int ret;
1779    
1780     ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
1781     - if (ret == 0)
1782     + if (ret >= 0)
1783     raw64 += offset;
1784    
1785     scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
1786     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1787     index 21d02b0d..a3c33894 100644
1788     --- a/drivers/iommu/amd_iommu.c
1789     +++ b/drivers/iommu/amd_iommu.c
1790     @@ -1484,6 +1484,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
1791    
1792     /* Large PTE found which maps this address */
1793     unmap_size = PTE_PAGE_SIZE(*pte);
1794     +
1795     + /* Only unmap from the first pte in the page */
1796     + if ((unmap_size - 1) & bus_addr)
1797     + break;
1798     count = PAGE_SIZE_PTE_COUNT(unmap_size);
1799     for (i = 0; i < count; i++)
1800     pte[i] = 0ULL;
1801     @@ -1493,7 +1497,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
1802     unmapped += unmap_size;
1803     }
1804    
1805     - BUG_ON(!is_power_of_2(unmapped));
1806     + BUG_ON(unmapped && !is_power_of_2(unmapped));
1807    
1808     return unmapped;
1809     }
1810     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1811     index 6ddae250..d61eb7ea 100644
1812     --- a/drivers/md/raid10.c
1813     +++ b/drivers/md/raid10.c
1814     @@ -2075,11 +2075,17 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1815     * both 'first' and 'i', so we just compare them.
1816     * All vec entries are PAGE_SIZE;
1817     */
1818     - for (j = 0; j < vcnt; j++)
1819     + int sectors = r10_bio->sectors;
1820     + for (j = 0; j < vcnt; j++) {
1821     + int len = PAGE_SIZE;
1822     + if (sectors < (len / 512))
1823     + len = sectors * 512;
1824     if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1825     page_address(tbio->bi_io_vec[j].bv_page),
1826     - fbio->bi_io_vec[j].bv_len))
1827     + len))
1828     break;
1829     + sectors -= len/512;
1830     + }
1831     if (j == vcnt)
1832     continue;
1833     atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
1834     @@ -2909,14 +2915,13 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
1835     */
1836     if (mddev->bitmap == NULL &&
1837     mddev->recovery_cp == MaxSector &&
1838     + mddev->reshape_position == MaxSector &&
1839     + !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
1840     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1841     + !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1842     conf->fullsync == 0) {
1843     *skipped = 1;
1844     - max_sector = mddev->dev_sectors;
1845     - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
1846     - test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1847     - max_sector = mddev->resync_max_sectors;
1848     - return max_sector - sector_nr;
1849     + return mddev->dev_sectors - sector_nr;
1850     }
1851    
1852     skipped:
1853     @@ -3386,6 +3391,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
1854    
1855     if (bio->bi_end_io == end_sync_read) {
1856     md_sync_acct(bio->bi_bdev, nr_sectors);
1857     + set_bit(BIO_UPTODATE, &bio->bi_flags);
1858     generic_make_request(bio);
1859     }
1860     }
1861     @@ -3532,7 +3538,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
1862    
1863     /* FIXME calc properly */
1864     conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
1865     - max(0,mddev->delta_disks)),
1866     + max(0,-mddev->delta_disks)),
1867     GFP_KERNEL);
1868     if (!conf->mirrors)
1869     goto out;
1870     @@ -3691,7 +3697,7 @@ static int run(struct mddev *mddev)
1871     conf->geo.far_offset == 0)
1872     goto out_free_conf;
1873     if (conf->prev.far_copies != 1 &&
1874     - conf->geo.far_offset == 0)
1875     + conf->prev.far_offset == 0)
1876     goto out_free_conf;
1877     }
1878    
1879     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
1880     index e6b92ff2..25b8bbbe 100644
1881     --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
1882     +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
1883     @@ -3563,14 +3563,18 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
1884     {
1885     struct ath9k_hw_capabilities *pCap = &ah->caps;
1886     int chain;
1887     - u32 regval;
1888     + u32 regval, value;
1889     static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
1890     AR_PHY_SWITCH_CHAIN_0,
1891     AR_PHY_SWITCH_CHAIN_1,
1892     AR_PHY_SWITCH_CHAIN_2,
1893     };
1894    
1895     - u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
1896     + if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
1897     + ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485,
1898     + AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
1899     +
1900     + value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
1901    
1902     if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
1903     REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
1904     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
1905     index e7177419..5013c731 100644
1906     --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
1907     +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
1908     @@ -351,6 +351,8 @@
1909    
1910     #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
1911    
1912     +#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
1913     +
1914     /*
1915     * AGC Field Definitions
1916     */
1917     diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
1918     index 7304e758..5e8219a9 100644
1919     --- a/drivers/net/wireless/ath/ath9k/calib.c
1920     +++ b/drivers/net/wireless/ath/ath9k/calib.c
1921     @@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
1922    
1923     if (!caldata) {
1924     chan->noisefloor = nf;
1925     - ah->noise = ath9k_hw_getchan_noise(ah, chan);
1926     return false;
1927     }
1928    
1929     diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1930     index 15dfefcf..b1d5037b 100644
1931     --- a/drivers/net/wireless/ath/ath9k/hw.c
1932     +++ b/drivers/net/wireless/ath/ath9k/hw.c
1933     @@ -1872,7 +1872,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1934    
1935     ah->caldata = caldata;
1936     if (caldata && (chan->channel != caldata->channel ||
1937     - chan->channelFlags != caldata->channelFlags)) {
1938     + chan->channelFlags != caldata->channelFlags ||
1939     + chan->chanmode != caldata->chanmode)) {
1940     /* Operating channel changed, reset channel calibration data */
1941     memset(caldata, 0, sizeof(*caldata));
1942     ath9k_init_nfcal_hist_buffer(ah, chan);
1943     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1944     index 5092ecae..35ced100 100644
1945     --- a/drivers/net/wireless/ath/ath9k/main.c
1946     +++ b/drivers/net/wireless/ath/ath9k/main.c
1947     @@ -1211,13 +1211,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1948     ath_update_survey_stats(sc);
1949     spin_unlock_irqrestore(&common->cc_lock, flags);
1950    
1951     - /*
1952     - * Preserve the current channel values, before updating
1953     - * the same channel
1954     - */
1955     - if (ah->curchan && (old_pos == pos))
1956     - ath9k_hw_getnf(ah, ah->curchan);
1957     -
1958     ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1959     curchan, channel_type);
1960    
1961     diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
1962     index 078e6f34..13f91ac9 100644
1963     --- a/drivers/net/wireless/b43/Kconfig
1964     +++ b/drivers/net/wireless/b43/Kconfig
1965     @@ -28,7 +28,7 @@ config B43
1966    
1967     config B43_BCMA
1968     bool "Support for BCMA bus"
1969     - depends on B43 && BCMA
1970     + depends on B43 && (BCMA = y || BCMA = B43)
1971     default y
1972    
1973     config B43_BCMA_EXTRA
1974     @@ -39,7 +39,7 @@ config B43_BCMA_EXTRA
1975    
1976     config B43_SSB
1977     bool
1978     - depends on B43 && SSB
1979     + depends on B43 && (SSB = y || SSB = B43)
1980     default y
1981    
1982     # Auto-select SSB PCI-HOST support, if possible
1983     diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
1984     index 72f32e5c..705aa338 100644
1985     --- a/drivers/net/wireless/rt2x00/rt2800lib.c
1986     +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
1987     @@ -2392,7 +2392,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
1988     rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1989    
1990     rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
1991     - if (info->default_power1 > power_bound)
1992     + if (info->default_power2 > power_bound)
1993     rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
1994     else
1995     rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
1996     @@ -6056,8 +6056,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1997     default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
1998    
1999     for (i = 14; i < spec->num_channels; i++) {
2000     - info[i].default_power1 = default_power1[i];
2001     - info[i].default_power2 = default_power2[i];
2002     + info[i].default_power1 = default_power1[i - 14];
2003     + info[i].default_power2 = default_power2[i - 14];
2004     }
2005     }
2006    
2007     diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
2008     index 0dc8180e..883a54c8 100644
2009     --- a/drivers/net/wireless/rt2x00/rt61pci.c
2010     +++ b/drivers/net/wireless/rt2x00/rt61pci.c
2011     @@ -2825,7 +2825,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2012     tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2013     for (i = 14; i < spec->num_channels; i++) {
2014     info[i].max_power = MAX_TXPOWER;
2015     - info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2016     + info[i].default_power1 =
2017     + TXPOWER_FROM_DEV(tx_power[i - 14]);
2018     }
2019     }
2020    
2021     diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
2022     index 377e09bb..2bbca183 100644
2023     --- a/drivers/net/wireless/rt2x00/rt73usb.c
2024     +++ b/drivers/net/wireless/rt2x00/rt73usb.c
2025     @@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2026     tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2027     for (i = 14; i < spec->num_channels; i++) {
2028     info[i].max_power = MAX_TXPOWER;
2029     - info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2030     + info[i].default_power1 =
2031     + TXPOWER_FROM_DEV(tx_power[i - 14]);
2032     }
2033     }
2034    
2035     diff --git a/drivers/of/address.c b/drivers/of/address.c
2036     index 04da786c..7c8221d3 100644
2037     --- a/drivers/of/address.c
2038     +++ b/drivers/of/address.c
2039     @@ -106,8 +106,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
2040    
2041     static int of_bus_pci_match(struct device_node *np)
2042     {
2043     - /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
2044     - return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
2045     + /*
2046     + * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
2047     + * "ht" is hypertransport
2048     + */
2049     + return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
2050     + !strcmp(np->type, "ht");
2051     }
2052    
2053     static void of_bus_pci_count_cells(struct device_node *np,
2054     diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
2055     index f6adde44..3743ac93 100644
2056     --- a/drivers/s390/scsi/zfcp_aux.c
2057     +++ b/drivers/s390/scsi/zfcp_aux.c
2058     @@ -3,7 +3,7 @@
2059     *
2060     * Module interface and handling of zfcp data structures.
2061     *
2062     - * Copyright IBM Corp. 2002, 2010
2063     + * Copyright IBM Corp. 2002, 2013
2064     */
2065    
2066     /*
2067     @@ -23,6 +23,7 @@
2068     * Christof Schmitt
2069     * Martin Petermann
2070     * Sven Schuetz
2071     + * Steffen Maier
2072     */
2073    
2074     #define KMSG_COMPONENT "zfcp"
2075     @@ -415,6 +416,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
2076     adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
2077     adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
2078    
2079     + adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
2080     +
2081     if (!zfcp_scsi_adapter_register(adapter))
2082     return adapter;
2083    
2084     diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
2085     index c7e148f3..9152999a 100644
2086     --- a/drivers/s390/scsi/zfcp_fsf.c
2087     +++ b/drivers/s390/scsi/zfcp_fsf.c
2088     @@ -3,7 +3,7 @@
2089     *
2090     * Implementation of FSF commands.
2091     *
2092     - * Copyright IBM Corp. 2002, 2010
2093     + * Copyright IBM Corp. 2002, 2013
2094     */
2095    
2096     #define KMSG_COMPONENT "zfcp"
2097     @@ -483,12 +483,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
2098    
2099     fc_host_port_name(shost) = nsp->fl_wwpn;
2100     fc_host_node_name(shost) = nsp->fl_wwnn;
2101     - fc_host_port_id(shost) = ntoh24(bottom->s_id);
2102     - fc_host_speed(shost) =
2103     - zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
2104     fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
2105    
2106     - adapter->hydra_version = bottom->adapter_type;
2107     adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
2108     adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
2109     (u16)FSF_STATUS_READS_RECOM);
2110     @@ -496,6 +492,19 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
2111     if (fc_host_permanent_port_name(shost) == -1)
2112     fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2113    
2114     + zfcp_scsi_set_prot(adapter);
2115     +
2116     + /* no error return above here, otherwise must fix call chains */
2117     + /* do not evaluate invalid fields */
2118     + if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
2119     + return 0;
2120     +
2121     + fc_host_port_id(shost) = ntoh24(bottom->s_id);
2122     + fc_host_speed(shost) =
2123     + zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
2124     +
2125     + adapter->hydra_version = bottom->adapter_type;
2126     +
2127     switch (bottom->fc_topology) {
2128     case FSF_TOPO_P2P:
2129     adapter->peer_d_id = ntoh24(bottom->peer_d_id);
2130     @@ -517,8 +526,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
2131     return -EIO;
2132     }
2133    
2134     - zfcp_scsi_set_prot(adapter);
2135     -
2136     return 0;
2137     }
2138    
2139     @@ -563,8 +570,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
2140     fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
2141     adapter->hydra_version = 0;
2142    
2143     + /* avoids adapter shutdown to be able to recognize
2144     + * events such as LINK UP */
2145     + atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
2146     + &adapter->status);
2147     zfcp_fsf_link_down_info_eval(req,
2148     &qtcb->header.fsf_status_qual.link_down_info);
2149     + if (zfcp_fsf_exchange_config_evaluate(req))
2150     + return;
2151     break;
2152     default:
2153     zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
2154     diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2155     index 7b31e3f4..7b353647 100644
2156     --- a/drivers/s390/scsi/zfcp_scsi.c
2157     +++ b/drivers/s390/scsi/zfcp_scsi.c
2158     @@ -3,7 +3,7 @@
2159     *
2160     * Interface to Linux SCSI midlayer.
2161     *
2162     - * Copyright IBM Corp. 2002, 2010
2163     + * Copyright IBM Corp. 2002, 2013
2164     */
2165    
2166     #define KMSG_COMPONENT "zfcp"
2167     @@ -311,8 +311,12 @@ static struct scsi_host_template zfcp_scsi_host_template = {
2168     .proc_name = "zfcp",
2169     .can_queue = 4096,
2170     .this_id = -1,
2171     - .sg_tablesize = 1, /* adjusted later */
2172     - .max_sectors = 8, /* adjusted later */
2173     + .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
2174     + * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
2175     + /* GCD, adjusted later */
2176     + .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
2177     + * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
2178     + /* GCD, adjusted later */
2179     .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
2180     .cmd_per_lun = 1,
2181     .use_clustering = 1,
2182     diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
2183     index 0f56d8d7..7e171076 100644
2184     --- a/drivers/scsi/aacraid/src.c
2185     +++ b/drivers/scsi/aacraid/src.c
2186     @@ -93,6 +93,9 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
2187     int send_it = 0;
2188     extern int aac_sync_mode;
2189    
2190     + src_writel(dev, MUnit.ODR_C, bellbits);
2191     + src_readl(dev, MUnit.ODR_C);
2192     +
2193     if (!aac_sync_mode) {
2194     src_writel(dev, MUnit.ODR_C, bellbits);
2195     src_readl(dev, MUnit.ODR_C);
2196     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2197     index bcb23d28..c76b18bb 100644
2198     --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2199     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2200     @@ -80,10 +80,6 @@ static int msix_disable = -1;
2201     module_param(msix_disable, int, 0);
2202     MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
2203    
2204     -static int missing_delay[2] = {-1, -1};
2205     -module_param_array(missing_delay, int, NULL, 0);
2206     -MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
2207     -
2208     static int mpt2sas_fwfault_debug;
2209     MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
2210     "and halt firmware - (default=0)");
2211     @@ -2199,7 +2195,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2212     }
2213    
2214     /**
2215     - * _base_update_missing_delay - change the missing delay timers
2216     + * mpt2sas_base_update_missing_delay - change the missing delay timers
2217     * @ioc: per adapter object
2218     * @device_missing_delay: amount of time till device is reported missing
2219     * @io_missing_delay: interval IO is returned when there is a missing device
2220     @@ -2210,8 +2206,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2221     * delay, as well as the io missing delay. This should be called at driver
2222     * load time.
2223     */
2224     -static void
2225     -_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2226     +void
2227     +mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2228     u16 device_missing_delay, u8 io_missing_delay)
2229     {
2230     u16 dmd, dmd_new, dmd_orignal;
2231     @@ -4407,9 +4403,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
2232     if (r)
2233     goto out_free_resources;
2234    
2235     - if (missing_delay[0] != -1 && missing_delay[1] != -1)
2236     - _base_update_missing_delay(ioc, missing_delay[0],
2237     - missing_delay[1]);
2238     ioc->non_operational_loop = 0;
2239    
2240     return 0;
2241     diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
2242     index 4caaac13..11301974 100644
2243     --- a/drivers/scsi/mpt2sas/mpt2sas_base.h
2244     +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
2245     @@ -1055,6 +1055,9 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
2246    
2247     void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
2248    
2249     +void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
2250     + u16 device_missing_delay, u8 io_missing_delay);
2251     +
2252     int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
2253    
2254     /* scsih shared API */
2255     diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2256     index c6bdc926..8dbe500c 100644
2257     --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2258     +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2259     @@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF;
2260     module_param(max_sectors, ushort, 0);
2261     MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
2262    
2263     +static int missing_delay[2] = {-1, -1};
2264     +module_param_array(missing_delay, int, NULL, 0);
2265     +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
2266     +
2267     /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
2268     #define MPT2SAS_MAX_LUN (16895)
2269     static int max_lun = MPT2SAS_MAX_LUN;
2270     @@ -3994,11 +3998,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2271     else
2272     mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2273     } else
2274     -/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
2275     -/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
2276     - */
2277     - mpi_control |= (0x500);
2278     -
2279     + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2280     } else
2281     mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2282     /* Make sure Device is not raid volume.
2283     @@ -7303,7 +7303,9 @@ _firmware_event_work(struct work_struct *work)
2284     case MPT2SAS_PORT_ENABLE_COMPLETE:
2285     ioc->start_scan = 0;
2286    
2287     -
2288     + if (missing_delay[0] != -1 && missing_delay[1] != -1)
2289     + mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
2290     + missing_delay[1]);
2291    
2292     dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
2293     "from worker thread\n", ioc->name));
2294     diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
2295     index 2c0d0ec8..3b1ea34e 100644
2296     --- a/drivers/scsi/scsi.c
2297     +++ b/drivers/scsi/scsi.c
2298     @@ -1070,8 +1070,8 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
2299     * @opcode: opcode for command to look up
2300     *
2301     * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
2302     - * opcode. Returns 0 if RSOC fails or if the command opcode is
2303     - * unsupported. Returns 1 if the device claims to support the command.
2304     + * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
2305     + * unsupported and 1 if the device claims to support the command.
2306     */
2307     int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
2308     unsigned int len, unsigned char opcode)
2309     @@ -1081,7 +1081,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
2310     int result;
2311    
2312     if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
2313     - return 0;
2314     + return -EINVAL;
2315    
2316     memset(cmd, 0, 16);
2317     cmd[0] = MAINTENANCE_IN;
2318     @@ -1097,7 +1097,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
2319     if (result && scsi_sense_valid(&sshdr) &&
2320     sshdr.sense_key == ILLEGAL_REQUEST &&
2321     (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
2322     - return 0;
2323     + return -EINVAL;
2324    
2325     if ((buffer[1] & 3) == 3) /* Command supported */
2326     return 1;
2327     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2328     index 6f6a1b48..1b1125e6 100644
2329     --- a/drivers/scsi/sd.c
2330     +++ b/drivers/scsi/sd.c
2331     @@ -442,8 +442,10 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
2332    
2333     if (max == 0)
2334     sdp->no_write_same = 1;
2335     - else if (max <= SD_MAX_WS16_BLOCKS)
2336     + else if (max <= SD_MAX_WS16_BLOCKS) {
2337     + sdp->no_write_same = 0;
2338     sdkp->max_ws_blocks = max;
2339     + }
2340    
2341     sd_config_write_same(sdkp);
2342    
2343     @@ -740,7 +742,6 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
2344     {
2345     struct request_queue *q = sdkp->disk->queue;
2346     unsigned int logical_block_size = sdkp->device->sector_size;
2347     - unsigned int blocks = 0;
2348    
2349     if (sdkp->device->no_write_same) {
2350     sdkp->max_ws_blocks = 0;
2351     @@ -752,18 +753,20 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
2352     * blocks per I/O unless the device explicitly advertises a
2353     * bigger limit.
2354     */
2355     - if (sdkp->max_ws_blocks == 0)
2356     - sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
2357     -
2358     - if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
2359     - blocks = min_not_zero(sdkp->max_ws_blocks,
2360     - (u32)SD_MAX_WS16_BLOCKS);
2361     - else
2362     - blocks = min_not_zero(sdkp->max_ws_blocks,
2363     - (u32)SD_MAX_WS10_BLOCKS);
2364     + if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
2365     + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
2366     + (u32)SD_MAX_WS16_BLOCKS);
2367     + else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
2368     + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
2369     + (u32)SD_MAX_WS10_BLOCKS);
2370     + else {
2371     + sdkp->device->no_write_same = 1;
2372     + sdkp->max_ws_blocks = 0;
2373     + }
2374    
2375     out:
2376     - blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
2377     + blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
2378     + (logical_block_size >> 9));
2379     }
2380    
2381     /**
2382     @@ -2635,9 +2638,24 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp)
2383    
2384     static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2385     {
2386     - if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
2387     - WRITE_SAME_16))
2388     + struct scsi_device *sdev = sdkp->device;
2389     +
2390     + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
2391     + sdev->no_report_opcodes = 1;
2392     +
2393     + /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
2394     + * CODES is unsupported and the device has an ATA
2395     + * Information VPD page (SAT).
2396     + */
2397     + if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
2398     + sdev->no_write_same = 1;
2399     + }
2400     +
2401     + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
2402     sdkp->ws16 = 1;
2403     +
2404     + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
2405     + sdkp->ws10 = 1;
2406     }
2407    
2408     static int sd_try_extended_inquiry(struct scsi_device *sdp)
2409     diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
2410     index 2386aeb4..7a049de2 100644
2411     --- a/drivers/scsi/sd.h
2412     +++ b/drivers/scsi/sd.h
2413     @@ -84,6 +84,7 @@ struct scsi_disk {
2414     unsigned lbpws : 1;
2415     unsigned lbpws10 : 1;
2416     unsigned lbpvpd : 1;
2417     + unsigned ws10 : 1;
2418     unsigned ws16 : 1;
2419     };
2420     #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
2421     diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
2422     index 02f77d74..a7856bad 100644
2423     --- a/drivers/staging/line6/pcm.c
2424     +++ b/drivers/staging/line6/pcm.c
2425     @@ -385,8 +385,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
2426     */
2427     static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
2428     {
2429     - if (substream->runtime && snd_pcm_running(substream))
2430     + if (substream->runtime && snd_pcm_running(substream)) {
2431     + snd_pcm_stream_lock_irq(substream);
2432     snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
2433     + snd_pcm_stream_unlock_irq(substream);
2434     + }
2435     }
2436    
2437     /*
2438     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2439     index bd3ae324..71af7b5a 100644
2440     --- a/drivers/virtio/virtio_balloon.c
2441     +++ b/drivers/virtio/virtio_balloon.c
2442     @@ -191,7 +191,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
2443     * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
2444     * is true, we *have* to do it in this order
2445     */
2446     - tell_host(vb, vb->deflate_vq);
2447     + if (vb->num_pfns != 0)
2448     + tell_host(vb, vb->deflate_vq);
2449     mutex_unlock(&vb->balloon_lock);
2450     release_pages_by_pfn(vb->pfns, vb->num_pfns);
2451     }
2452     diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
2453     index 282e2702..a5d52eea 100644
2454     --- a/include/linux/cpu_cooling.h
2455     +++ b/include/linux/cpu_cooling.h
2456     @@ -41,7 +41,7 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus);
2457     */
2458     void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
2459    
2460     -unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
2461     +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
2462     #else /* !CONFIG_CPU_THERMAL */
2463     static inline struct thermal_cooling_device *
2464     cpufreq_cooling_register(const struct cpumask *clip_cpus)
2465     @@ -54,7 +54,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
2466     return;
2467     }
2468     static inline
2469     -unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
2470     +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
2471     {
2472     return THERMAL_CSTATE_INVALID;
2473     }
2474     diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
2475     index 8d171f42..3d35b702 100644
2476     --- a/include/linux/iio/iio.h
2477     +++ b/include/linux/iio/iio.h
2478     @@ -211,8 +211,8 @@ struct iio_chan_spec {
2479     static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
2480     enum iio_chan_info_enum type)
2481     {
2482     - return (chan->info_mask_separate & type) |
2483     - (chan->info_mask_shared_by_type & type);
2484     + return (chan->info_mask_separate & BIT(type)) |
2485     + (chan->info_mask_shared_by_type & BIT(type));
2486     }
2487    
2488     #define IIO_ST(si, rb, sb, sh) \
2489     diff --git a/kernel/events/core.c b/kernel/events/core.c
2490     index b391907d..e76e4959 100644
2491     --- a/kernel/events/core.c
2492     +++ b/kernel/events/core.c
2493     @@ -761,8 +761,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
2494     {
2495     struct perf_event_context *ctx;
2496    
2497     - rcu_read_lock();
2498     retry:
2499     + /*
2500     + * One of the few rules of preemptible RCU is that one cannot do
2501     + * rcu_read_unlock() while holding a scheduler (or nested) lock when
2502     + * part of the read side critical section was preemptible -- see
2503     + * rcu_read_unlock_special().
2504     + *
2505     + * Since ctx->lock nests under rq->lock we must ensure the entire read
2506     + * side critical section is non-preemptible.
2507     + */
2508     + preempt_disable();
2509     + rcu_read_lock();
2510     ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
2511     if (ctx) {
2512     /*
2513     @@ -778,6 +788,8 @@ retry:
2514     raw_spin_lock_irqsave(&ctx->lock, *flags);
2515     if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2516     raw_spin_unlock_irqrestore(&ctx->lock, *flags);
2517     + rcu_read_unlock();
2518     + preempt_enable();
2519     goto retry;
2520     }
2521    
2522     @@ -787,6 +799,7 @@ retry:
2523     }
2524     }
2525     rcu_read_unlock();
2526     + preempt_enable();
2527     return ctx;
2528     }
2529    
2530     @@ -1761,7 +1774,16 @@ static int __perf_event_enable(void *info)
2531     struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2532     int err;
2533    
2534     - if (WARN_ON_ONCE(!ctx->is_active))
2535     + /*
2536     + * There's a time window between 'ctx->is_active' check
2537     + * in perf_event_enable function and this place having:
2538     + * - IRQs on
2539     + * - ctx->lock unlocked
2540     + *
2541     + * where the task could be killed and 'ctx' deactivated
2542     + * by perf_event_exit_task.
2543     + */
2544     + if (!ctx->is_active)
2545     return -EINVAL;
2546    
2547     raw_spin_lock(&ctx->lock);
2548     @@ -7228,7 +7250,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
2549     * child.
2550     */
2551    
2552     - child_ctx = alloc_perf_context(event->pmu, child);
2553     + child_ctx = alloc_perf_context(parent_ctx->pmu, child);
2554     if (!child_ctx)
2555     return -ENOMEM;
2556    
2557     diff --git a/kernel/printk.c b/kernel/printk.c
2558     index 8212c1ae..d37d45c9 100644
2559     --- a/kernel/printk.c
2560     +++ b/kernel/printk.c
2561     @@ -1369,9 +1369,9 @@ static int console_trylock_for_printk(unsigned int cpu)
2562     }
2563     }
2564     logbuf_cpu = UINT_MAX;
2565     + raw_spin_unlock(&logbuf_lock);
2566     if (wake)
2567     up(&console_sem);
2568     - raw_spin_unlock(&logbuf_lock);
2569     return retval;
2570     }
2571    
2572     diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
2573     index 20d6fba7..297b90b5 100644
2574     --- a/kernel/time/tick-broadcast.c
2575     +++ b/kernel/time/tick-broadcast.c
2576     @@ -29,6 +29,7 @@
2577    
2578     static struct tick_device tick_broadcast_device;
2579     static cpumask_var_t tick_broadcast_mask;
2580     +static cpumask_var_t tick_broadcast_on;
2581     static cpumask_var_t tmpmask;
2582     static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
2583     static int tick_broadcast_force;
2584     @@ -123,8 +124,9 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
2585     */
2586     int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
2587     {
2588     + struct clock_event_device *bc = tick_broadcast_device.evtdev;
2589     unsigned long flags;
2590     - int ret = 0;
2591     + int ret;
2592    
2593     raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
2594    
2595     @@ -138,20 +140,59 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
2596     dev->event_handler = tick_handle_periodic;
2597     tick_device_setup_broadcast_func(dev);
2598     cpumask_set_cpu(cpu, tick_broadcast_mask);
2599     - tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
2600     + tick_broadcast_start_periodic(bc);
2601     ret = 1;
2602     } else {
2603     /*
2604     - * When the new device is not affected by the stop
2605     - * feature and the cpu is marked in the broadcast mask
2606     - * then clear the broadcast bit.
2607     + * Clear the broadcast bit for this cpu if the
2608     + * device is not power state affected.
2609     */
2610     - if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
2611     - int cpu = smp_processor_id();
2612     + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
2613     cpumask_clear_cpu(cpu, tick_broadcast_mask);
2614     - tick_broadcast_clear_oneshot(cpu);
2615     - } else {
2616     + else
2617     tick_device_setup_broadcast_func(dev);
2618     +
2619     + /*
2620     + * Clear the broadcast bit if the CPU is not in
2621     + * periodic broadcast on state.
2622     + */
2623     + if (!cpumask_test_cpu(cpu, tick_broadcast_on))
2624     + cpumask_clear_cpu(cpu, tick_broadcast_mask);
2625     +
2626     + switch (tick_broadcast_device.mode) {
2627     + case TICKDEV_MODE_ONESHOT:
2628     + /*
2629     + * If the system is in oneshot mode we can
2630     + * unconditionally clear the oneshot mask bit,
2631     + * because the CPU is running and therefore
2632     + * not in an idle state which causes the power
2633     + * state affected device to stop. Let the
2634     + * caller initialize the device.
2635     + */
2636     + tick_broadcast_clear_oneshot(cpu);
2637     + ret = 0;
2638     + break;
2639     +
2640     + case TICKDEV_MODE_PERIODIC:
2641     + /*
2642     + * If the system is in periodic mode, check
2643     + * whether the broadcast device can be
2644     + * switched off now.
2645     + */
2646     + if (cpumask_empty(tick_broadcast_mask) && bc)
2647     + clockevents_shutdown(bc);
2648     + /*
2649     + * If we kept the cpu in the broadcast mask,
2650     + * tell the caller to leave the per cpu device
2651     + * in shutdown state. The periodic interrupt
2652     + * is delivered by the broadcast device.
2653     + */
2654     + ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
2655     + break;
2656     + default:
2657     + /* Nothing to do */
2658     + ret = 0;
2659     + break;
2660     }
2661     }
2662     raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
2663     @@ -281,6 +322,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
2664     switch (*reason) {
2665     case CLOCK_EVT_NOTIFY_BROADCAST_ON:
2666     case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
2667     + cpumask_set_cpu(cpu, tick_broadcast_on);
2668     if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
2669     if (tick_broadcast_device.mode ==
2670     TICKDEV_MODE_PERIODIC)
2671     @@ -290,8 +332,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
2672     tick_broadcast_force = 1;
2673     break;
2674     case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
2675     - if (!tick_broadcast_force &&
2676     - cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
2677     + if (tick_broadcast_force)
2678     + break;
2679     + cpumask_clear_cpu(cpu, tick_broadcast_on);
2680     + if (!tick_device_is_functional(dev))
2681     + break;
2682     + if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
2683     if (tick_broadcast_device.mode ==
2684     TICKDEV_MODE_PERIODIC)
2685     tick_setup_periodic(dev, 0);
2686     @@ -349,6 +395,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
2687    
2688     bc = tick_broadcast_device.evtdev;
2689     cpumask_clear_cpu(cpu, tick_broadcast_mask);
2690     + cpumask_clear_cpu(cpu, tick_broadcast_on);
2691    
2692     if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
2693     if (bc && cpumask_empty(tick_broadcast_mask))
2694     @@ -475,7 +522,15 @@ void tick_check_oneshot_broadcast(int cpu)
2695     if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
2696     struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
2697    
2698     - clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
2699     + /*
2700     + * We might be in the middle of switching over from
2701     + * periodic to oneshot. If the CPU has not yet
2702     + * switched over, leave the device alone.
2703     + */
2704     + if (td->mode == TICKDEV_MODE_ONESHOT) {
2705     + clockevents_set_mode(td->evtdev,
2706     + CLOCK_EVT_MODE_ONESHOT);
2707     + }
2708     }
2709     }
2710    
2711     @@ -792,6 +847,7 @@ bool tick_broadcast_oneshot_available(void)
2712     void __init tick_broadcast_init(void)
2713     {
2714     zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
2715     + zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
2716     zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
2717     #ifdef CONFIG_TICK_ONESHOT
2718     zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
2719     diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2720     index 5d3fb100..7ce5e5a4 100644
2721     --- a/kernel/time/tick-common.c
2722     +++ b/kernel/time/tick-common.c
2723     @@ -194,7 +194,8 @@ static void tick_setup_device(struct tick_device *td,
2724     * When global broadcasting is active, check if the current
2725     * device is registered as a placeholder for broadcast mode.
2726     * This allows us to handle this x86 misfeature in a generic
2727     - * way.
2728     + * way. This function also returns !=0 when we keep the
2729     + * current active broadcast state for this CPU.
2730     */
2731     if (tick_device_uses_broadcast(newdev, cpu))
2732     return;
2733     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2734     index e71a8be4..0b936d80 100644
2735     --- a/kernel/trace/trace.c
2736     +++ b/kernel/trace/trace.c
2737     @@ -193,6 +193,37 @@ static struct trace_array global_trace;
2738    
2739     LIST_HEAD(ftrace_trace_arrays);
2740    
2741     +int trace_array_get(struct trace_array *this_tr)
2742     +{
2743     + struct trace_array *tr;
2744     + int ret = -ENODEV;
2745     +
2746     + mutex_lock(&trace_types_lock);
2747     + list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2748     + if (tr == this_tr) {
2749     + tr->ref++;
2750     + ret = 0;
2751     + break;
2752     + }
2753     + }
2754     + mutex_unlock(&trace_types_lock);
2755     +
2756     + return ret;
2757     +}
2758     +
2759     +static void __trace_array_put(struct trace_array *this_tr)
2760     +{
2761     + WARN_ON(!this_tr->ref);
2762     + this_tr->ref--;
2763     +}
2764     +
2765     +void trace_array_put(struct trace_array *this_tr)
2766     +{
2767     + mutex_lock(&trace_types_lock);
2768     + __trace_array_put(this_tr);
2769     + mutex_unlock(&trace_types_lock);
2770     +}
2771     +
2772     int filter_current_check_discard(struct ring_buffer *buffer,
2773     struct ftrace_event_call *call, void *rec,
2774     struct ring_buffer_event *event)
2775     @@ -240,7 +271,7 @@ static struct tracer *trace_types __read_mostly;
2776     /*
2777     * trace_types_lock is used to protect the trace_types list.
2778     */
2779     -static DEFINE_MUTEX(trace_types_lock);
2780     +DEFINE_MUTEX(trace_types_lock);
2781    
2782     /*
2783     * serialize the access of the ring buffer
2784     @@ -2768,10 +2799,9 @@ static const struct seq_operations tracer_seq_ops = {
2785     };
2786    
2787     static struct trace_iterator *
2788     -__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2789     +__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
2790     + struct inode *inode, struct file *file, bool snapshot)
2791     {
2792     - struct trace_cpu *tc = inode->i_private;
2793     - struct trace_array *tr = tc->tr;
2794     struct trace_iterator *iter;
2795     int cpu;
2796    
2797     @@ -2850,8 +2880,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
2798     tracing_iter_reset(iter, cpu);
2799     }
2800    
2801     - tr->ref++;
2802     -
2803     mutex_unlock(&trace_types_lock);
2804    
2805     return iter;
2806     @@ -2874,6 +2902,43 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
2807     return 0;
2808     }
2809    
2810     +/*
2811     + * Open and update trace_array ref count.
2812     + * Must have the current trace_array passed to it.
2813     + */
2814     +int tracing_open_generic_tr(struct inode *inode, struct file *filp)
2815     +{
2816     + struct trace_array *tr = inode->i_private;
2817     +
2818     + if (tracing_disabled)
2819     + return -ENODEV;
2820     +
2821     + if (trace_array_get(tr) < 0)
2822     + return -ENODEV;
2823     +
2824     + filp->private_data = inode->i_private;
2825     +
2826     + return 0;
2827     +
2828     +}
2829     +
2830     +int tracing_open_generic_tc(struct inode *inode, struct file *filp)
2831     +{
2832     + struct trace_cpu *tc = inode->i_private;
2833     + struct trace_array *tr = tc->tr;
2834     +
2835     + if (tracing_disabled)
2836     + return -ENODEV;
2837     +
2838     + if (trace_array_get(tr) < 0)
2839     + return -ENODEV;
2840     +
2841     + filp->private_data = inode->i_private;
2842     +
2843     + return 0;
2844     +
2845     +}
2846     +
2847     static int tracing_release(struct inode *inode, struct file *file)
2848     {
2849     struct seq_file *m = file->private_data;
2850     @@ -2881,17 +2946,20 @@ static int tracing_release(struct inode *inode, struct file *file)
2851     struct trace_array *tr;
2852     int cpu;
2853    
2854     - if (!(file->f_mode & FMODE_READ))
2855     + /* Writes do not use seq_file, need to grab tr from inode */
2856     + if (!(file->f_mode & FMODE_READ)) {
2857     + struct trace_cpu *tc = inode->i_private;
2858     +
2859     + trace_array_put(tc->tr);
2860     return 0;
2861     + }
2862    
2863     iter = m->private;
2864     tr = iter->tr;
2865     + trace_array_put(tr);
2866    
2867     mutex_lock(&trace_types_lock);
2868    
2869     - WARN_ON(!tr->ref);
2870     - tr->ref--;
2871     -
2872     for_each_tracing_cpu(cpu) {
2873     if (iter->buffer_iter[cpu])
2874     ring_buffer_read_finish(iter->buffer_iter[cpu]);
2875     @@ -2910,20 +2978,49 @@ static int tracing_release(struct inode *inode, struct file *file)
2876     kfree(iter->trace);
2877     kfree(iter->buffer_iter);
2878     seq_release_private(inode, file);
2879     +
2880     + return 0;
2881     +}
2882     +
2883     +static int tracing_release_generic_tr(struct inode *inode, struct file *file)
2884     +{
2885     + struct trace_array *tr = inode->i_private;
2886     +
2887     + trace_array_put(tr);
2888     return 0;
2889     }
2890    
2891     +static int tracing_release_generic_tc(struct inode *inode, struct file *file)
2892     +{
2893     + struct trace_cpu *tc = inode->i_private;
2894     + struct trace_array *tr = tc->tr;
2895     +
2896     + trace_array_put(tr);
2897     + return 0;
2898     +}
2899     +
2900     +static int tracing_single_release_tr(struct inode *inode, struct file *file)
2901     +{
2902     + struct trace_array *tr = inode->i_private;
2903     +
2904     + trace_array_put(tr);
2905     +
2906     + return single_release(inode, file);
2907     +}
2908     +
2909     static int tracing_open(struct inode *inode, struct file *file)
2910     {
2911     + struct trace_cpu *tc = inode->i_private;
2912     + struct trace_array *tr = tc->tr;
2913     struct trace_iterator *iter;
2914     int ret = 0;
2915    
2916     + if (trace_array_get(tr) < 0)
2917     + return -ENODEV;
2918     +
2919     /* If this file was open for write, then erase contents */
2920     if ((file->f_mode & FMODE_WRITE) &&
2921     (file->f_flags & O_TRUNC)) {
2922     - struct trace_cpu *tc = inode->i_private;
2923     - struct trace_array *tr = tc->tr;
2924     -
2925     if (tc->cpu == RING_BUFFER_ALL_CPUS)
2926     tracing_reset_online_cpus(&tr->trace_buffer);
2927     else
2928     @@ -2931,12 +3028,16 @@ static int tracing_open(struct inode *inode, struct file *file)
2929     }
2930    
2931     if (file->f_mode & FMODE_READ) {
2932     - iter = __tracing_open(inode, file, false);
2933     + iter = __tracing_open(tr, tc, inode, file, false);
2934     if (IS_ERR(iter))
2935     ret = PTR_ERR(iter);
2936     else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2937     iter->iter_flags |= TRACE_FILE_LAT_FMT;
2938     }
2939     +
2940     + if (ret < 0)
2941     + trace_array_put(tr);
2942     +
2943     return ret;
2944     }
2945    
2946     @@ -3293,9 +3394,14 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2947    
2948     static int tracing_trace_options_open(struct inode *inode, struct file *file)
2949     {
2950     + struct trace_array *tr = inode->i_private;
2951     +
2952     if (tracing_disabled)
2953     return -ENODEV;
2954    
2955     + if (trace_array_get(tr) < 0)
2956     + return -ENODEV;
2957     +
2958     return single_open(file, tracing_trace_options_show, inode->i_private);
2959     }
2960    
2961     @@ -3303,7 +3409,7 @@ static const struct file_operations tracing_iter_fops = {
2962     .open = tracing_trace_options_open,
2963     .read = seq_read,
2964     .llseek = seq_lseek,
2965     - .release = single_release,
2966     + .release = tracing_single_release_tr,
2967     .write = tracing_trace_options_write,
2968     };
2969    
2970     @@ -3791,6 +3897,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
2971     if (tracing_disabled)
2972     return -ENODEV;
2973    
2974     + if (trace_array_get(tr) < 0)
2975     + return -ENODEV;
2976     +
2977     mutex_lock(&trace_types_lock);
2978    
2979     /* create a buffer to store the information to pass to userspace */
2980     @@ -3843,6 +3952,7 @@ out:
2981     fail:
2982     kfree(iter->trace);
2983     kfree(iter);
2984     + __trace_array_put(tr);
2985     mutex_unlock(&trace_types_lock);
2986     return ret;
2987     }
2988     @@ -3850,6 +3960,8 @@ fail:
2989     static int tracing_release_pipe(struct inode *inode, struct file *file)
2990     {
2991     struct trace_iterator *iter = file->private_data;
2992     + struct trace_cpu *tc = inode->i_private;
2993     + struct trace_array *tr = tc->tr;
2994    
2995     mutex_lock(&trace_types_lock);
2996    
2997     @@ -3863,6 +3975,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
2998     kfree(iter->trace);
2999     kfree(iter);
3000    
3001     + trace_array_put(tr);
3002     +
3003     return 0;
3004     }
3005    
3006     @@ -4320,6 +4434,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
3007     /* resize the ring buffer to 0 */
3008     tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
3009    
3010     + trace_array_put(tr);
3011     +
3012     return 0;
3013     }
3014    
3015     @@ -4328,6 +4444,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3016     size_t cnt, loff_t *fpos)
3017     {
3018     unsigned long addr = (unsigned long)ubuf;
3019     + struct trace_array *tr = filp->private_data;
3020     struct ring_buffer_event *event;
3021     struct ring_buffer *buffer;
3022     struct print_entry *entry;
3023     @@ -4387,7 +4504,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3024    
3025     local_save_flags(irq_flags);
3026     size = sizeof(*entry) + cnt + 2; /* possible \n added */
3027     - buffer = global_trace.trace_buffer.buffer;
3028     + buffer = tr->trace_buffer.buffer;
3029     event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3030     irq_flags, preempt_count());
3031     if (!event) {
3032     @@ -4495,10 +4612,20 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3033    
3034     static int tracing_clock_open(struct inode *inode, struct file *file)
3035     {
3036     + struct trace_array *tr = inode->i_private;
3037     + int ret;
3038     +
3039     if (tracing_disabled)
3040     return -ENODEV;
3041    
3042     - return single_open(file, tracing_clock_show, inode->i_private);
3043     + if (trace_array_get(tr))
3044     + return -ENODEV;
3045     +
3046     + ret = single_open(file, tracing_clock_show, inode->i_private);
3047     + if (ret < 0)
3048     + trace_array_put(tr);
3049     +
3050     + return ret;
3051     }
3052    
3053     struct ftrace_buffer_info {
3054     @@ -4511,12 +4638,16 @@ struct ftrace_buffer_info {
3055     static int tracing_snapshot_open(struct inode *inode, struct file *file)
3056     {
3057     struct trace_cpu *tc = inode->i_private;
3058     + struct trace_array *tr = tc->tr;
3059     struct trace_iterator *iter;
3060     struct seq_file *m;
3061     int ret = 0;
3062    
3063     + if (trace_array_get(tr) < 0)
3064     + return -ENODEV;
3065     +
3066     if (file->f_mode & FMODE_READ) {
3067     - iter = __tracing_open(inode, file, true);
3068     + iter = __tracing_open(tr, tc, inode, file, true);
3069     if (IS_ERR(iter))
3070     ret = PTR_ERR(iter);
3071     } else {
3072     @@ -4529,13 +4660,16 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
3073     kfree(m);
3074     return -ENOMEM;
3075     }
3076     - iter->tr = tc->tr;
3077     + iter->tr = tr;
3078     iter->trace_buffer = &tc->tr->max_buffer;
3079     iter->cpu_file = tc->cpu;
3080     m->private = iter;
3081     file->private_data = m;
3082     }
3083    
3084     + if (ret < 0)
3085     + trace_array_put(tr);
3086     +
3087     return ret;
3088     }
3089    
3090     @@ -4616,9 +4750,12 @@ out:
3091     static int tracing_snapshot_release(struct inode *inode, struct file *file)
3092     {
3093     struct seq_file *m = file->private_data;
3094     + int ret;
3095     +
3096     + ret = tracing_release(inode, file);
3097    
3098     if (file->f_mode & FMODE_READ)
3099     - return tracing_release(inode, file);
3100     + return ret;
3101    
3102     /* If write only, the seq_file is just a stub */
3103     if (m)
3104     @@ -4684,34 +4821,38 @@ static const struct file_operations tracing_pipe_fops = {
3105     };
3106    
3107     static const struct file_operations tracing_entries_fops = {
3108     - .open = tracing_open_generic,
3109     + .open = tracing_open_generic_tc,
3110     .read = tracing_entries_read,
3111     .write = tracing_entries_write,
3112     .llseek = generic_file_llseek,
3113     + .release = tracing_release_generic_tc,
3114     };
3115    
3116     static const struct file_operations tracing_total_entries_fops = {
3117     - .open = tracing_open_generic,
3118     + .open = tracing_open_generic_tr,
3119     .read = tracing_total_entries_read,
3120     .llseek = generic_file_llseek,
3121     + .release = tracing_release_generic_tr,
3122     };
3123    
3124     static const struct file_operations tracing_free_buffer_fops = {
3125     + .open = tracing_open_generic_tr,
3126     .write = tracing_free_buffer_write,
3127     .release = tracing_free_buffer_release,
3128     };
3129    
3130     static const struct file_operations tracing_mark_fops = {
3131     - .open = tracing_open_generic,
3132     + .open = tracing_open_generic_tr,
3133     .write = tracing_mark_write,
3134     .llseek = generic_file_llseek,
3135     + .release = tracing_release_generic_tr,
3136     };
3137    
3138     static const struct file_operations trace_clock_fops = {
3139     .open = tracing_clock_open,
3140     .read = seq_read,
3141     .llseek = seq_lseek,
3142     - .release = single_release,
3143     + .release = tracing_single_release_tr,
3144     .write = tracing_clock_write,
3145     };
3146    
3147     @@ -4739,13 +4880,19 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
3148     struct trace_cpu *tc = inode->i_private;
3149     struct trace_array *tr = tc->tr;
3150     struct ftrace_buffer_info *info;
3151     + int ret;
3152    
3153     if (tracing_disabled)
3154     return -ENODEV;
3155    
3156     + if (trace_array_get(tr) < 0)
3157     + return -ENODEV;
3158     +
3159     info = kzalloc(sizeof(*info), GFP_KERNEL);
3160     - if (!info)
3161     + if (!info) {
3162     + trace_array_put(tr);
3163     return -ENOMEM;
3164     + }
3165    
3166     mutex_lock(&trace_types_lock);
3167    
3168     @@ -4763,7 +4910,11 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
3169    
3170     mutex_unlock(&trace_types_lock);
3171    
3172     - return nonseekable_open(inode, filp);
3173     + ret = nonseekable_open(inode, filp);
3174     + if (ret < 0)
3175     + trace_array_put(tr);
3176     +
3177     + return ret;
3178     }
3179    
3180     static unsigned int
3181     @@ -4863,8 +5014,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
3182    
3183     mutex_lock(&trace_types_lock);
3184    
3185     - WARN_ON(!iter->tr->ref);
3186     - iter->tr->ref--;
3187     + __trace_array_put(iter->tr);
3188    
3189     if (info->spare)
3190     ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
3191     @@ -5659,9 +5809,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
3192     }
3193    
3194     static const struct file_operations rb_simple_fops = {
3195     - .open = tracing_open_generic,
3196     + .open = tracing_open_generic_tr,
3197     .read = rb_simple_read,
3198     .write = rb_simple_write,
3199     + .release = tracing_release_generic_tr,
3200     .llseek = default_llseek,
3201     };
3202    
3203     diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
3204     index 20572ed8..51b44483 100644
3205     --- a/kernel/trace/trace.h
3206     +++ b/kernel/trace/trace.h
3207     @@ -224,6 +224,11 @@ enum {
3208    
3209     extern struct list_head ftrace_trace_arrays;
3210    
3211     +extern struct mutex trace_types_lock;
3212     +
3213     +extern int trace_array_get(struct trace_array *tr);
3214     +extern void trace_array_put(struct trace_array *tr);
3215     +
3216     /*
3217     * The global tracer (top) should be the first trace array added,
3218     * but we check the flag anyway.
3219     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3220     index 27963e2b..6dfd48b5 100644
3221     --- a/kernel/trace/trace_events.c
3222     +++ b/kernel/trace/trace_events.c
3223     @@ -41,6 +41,23 @@ static LIST_HEAD(ftrace_common_fields);
3224     static struct kmem_cache *field_cachep;
3225     static struct kmem_cache *file_cachep;
3226    
3227     +#define SYSTEM_FL_FREE_NAME (1 << 31)
3228     +
3229     +static inline int system_refcount(struct event_subsystem *system)
3230     +{
3231     + return system->ref_count & ~SYSTEM_FL_FREE_NAME;
3232     +}
3233     +
3234     +static int system_refcount_inc(struct event_subsystem *system)
3235     +{
3236     + return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
3237     +}
3238     +
3239     +static int system_refcount_dec(struct event_subsystem *system)
3240     +{
3241     + return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
3242     +}
3243     +
3244     /* Double loops, do not use break, only goto's work */
3245     #define do_for_each_event_file(tr, file) \
3246     list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
3247     @@ -349,8 +366,8 @@ static void __put_system(struct event_subsystem *system)
3248     {
3249     struct event_filter *filter = system->filter;
3250    
3251     - WARN_ON_ONCE(system->ref_count == 0);
3252     - if (--system->ref_count)
3253     + WARN_ON_ONCE(system_refcount(system) == 0);
3254     + if (system_refcount_dec(system))
3255     return;
3256    
3257     list_del(&system->list);
3258     @@ -359,13 +376,15 @@ static void __put_system(struct event_subsystem *system)
3259     kfree(filter->filter_string);
3260     kfree(filter);
3261     }
3262     + if (system->ref_count & SYSTEM_FL_FREE_NAME)
3263     + kfree(system->name);
3264     kfree(system);
3265     }
3266    
3267     static void __get_system(struct event_subsystem *system)
3268     {
3269     - WARN_ON_ONCE(system->ref_count == 0);
3270     - system->ref_count++;
3271     + WARN_ON_ONCE(system_refcount(system) == 0);
3272     + system_refcount_inc(system);
3273     }
3274    
3275     static void __get_system_dir(struct ftrace_subsystem_dir *dir)
3276     @@ -379,7 +398,7 @@ static void __put_system_dir(struct ftrace_subsystem_dir *dir)
3277     {
3278     WARN_ON_ONCE(dir->ref_count == 0);
3279     /* If the subsystem is about to be freed, the dir must be too */
3280     - WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
3281     + WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
3282    
3283     __put_system(dir->subsystem);
3284     if (!--dir->ref_count)
3285     @@ -394,16 +413,45 @@ static void put_system(struct ftrace_subsystem_dir *dir)
3286     }
3287    
3288     /*
3289     + * Open and update trace_array ref count.
3290     + * Must have the current trace_array passed to it.
3291     + */
3292     +static int tracing_open_generic_file(struct inode *inode, struct file *filp)
3293     +{
3294     + struct ftrace_event_file *file = inode->i_private;
3295     + struct trace_array *tr = file->tr;
3296     + int ret;
3297     +
3298     + if (trace_array_get(tr) < 0)
3299     + return -ENODEV;
3300     +
3301     + ret = tracing_open_generic(inode, filp);
3302     + if (ret < 0)
3303     + trace_array_put(tr);
3304     + return ret;
3305     +}
3306     +
3307     +static int tracing_release_generic_file(struct inode *inode, struct file *filp)
3308     +{
3309     + struct ftrace_event_file *file = inode->i_private;
3310     + struct trace_array *tr = file->tr;
3311     +
3312     + trace_array_put(tr);
3313     +
3314     + return 0;
3315     +}
3316     +
3317     +/*
3318     * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
3319     */
3320     -static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
3321     - const char *sub, const char *event, int set)
3322     +static int
3323     +__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
3324     + const char *sub, const char *event, int set)
3325     {
3326     struct ftrace_event_file *file;
3327     struct ftrace_event_call *call;
3328     int ret = -EINVAL;
3329    
3330     - mutex_lock(&event_mutex);
3331     list_for_each_entry(file, &tr->events, list) {
3332    
3333     call = file->event_call;
3334     @@ -429,6 +477,17 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
3335    
3336     ret = 0;
3337     }
3338     +
3339     + return ret;
3340     +}
3341     +
3342     +static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
3343     + const char *sub, const char *event, int set)
3344     +{
3345     + int ret;
3346     +
3347     + mutex_lock(&event_mutex);
3348     + ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
3349     mutex_unlock(&event_mutex);
3350    
3351     return ret;
3352     @@ -992,6 +1051,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
3353     int ret;
3354    
3355     /* Make sure the system still exists */
3356     + mutex_lock(&trace_types_lock);
3357     mutex_lock(&event_mutex);
3358     list_for_each_entry(tr, &ftrace_trace_arrays, list) {
3359     list_for_each_entry(dir, &tr->systems, list) {
3360     @@ -1007,6 +1067,7 @@ static int subsystem_open(struct inode *inode, struct file *filp)
3361     }
3362     exit_loop:
3363     mutex_unlock(&event_mutex);
3364     + mutex_unlock(&trace_types_lock);
3365    
3366     if (!system)
3367     return -ENODEV;
3368     @@ -1014,9 +1075,17 @@ static int subsystem_open(struct inode *inode, struct file *filp)
3369     /* Some versions of gcc think dir can be uninitialized here */
3370     WARN_ON(!dir);
3371    
3372     + /* Still need to increment the ref count of the system */
3373     + if (trace_array_get(tr) < 0) {
3374     + put_system(dir);
3375     + return -ENODEV;
3376     + }
3377     +
3378     ret = tracing_open_generic(inode, filp);
3379     - if (ret < 0)
3380     + if (ret < 0) {
3381     + trace_array_put(tr);
3382     put_system(dir);
3383     + }
3384    
3385     return ret;
3386     }
3387     @@ -1027,16 +1096,23 @@ static int system_tr_open(struct inode *inode, struct file *filp)
3388     struct trace_array *tr = inode->i_private;
3389     int ret;
3390    
3391     + if (trace_array_get(tr) < 0)
3392     + return -ENODEV;
3393     +
3394     /* Make a temporary dir that has no system but points to tr */
3395     dir = kzalloc(sizeof(*dir), GFP_KERNEL);
3396     - if (!dir)
3397     + if (!dir) {
3398     + trace_array_put(tr);
3399     return -ENOMEM;
3400     + }
3401    
3402     dir->tr = tr;
3403    
3404     ret = tracing_open_generic(inode, filp);
3405     - if (ret < 0)
3406     + if (ret < 0) {
3407     + trace_array_put(tr);
3408     kfree(dir);
3409     + }
3410    
3411     filp->private_data = dir;
3412    
3413     @@ -1047,6 +1123,8 @@ static int subsystem_release(struct inode *inode, struct file *file)
3414     {
3415     struct ftrace_subsystem_dir *dir = file->private_data;
3416    
3417     + trace_array_put(dir->tr);
3418     +
3419     /*
3420     * If dir->subsystem is NULL, then this is a temporary
3421     * descriptor that was made for a trace_array to enable
3422     @@ -1174,9 +1252,10 @@ static const struct file_operations ftrace_set_event_fops = {
3423     };
3424    
3425     static const struct file_operations ftrace_enable_fops = {
3426     - .open = tracing_open_generic,
3427     + .open = tracing_open_generic_file,
3428     .read = event_enable_read,
3429     .write = event_enable_write,
3430     + .release = tracing_release_generic_file,
3431     .llseek = default_llseek,
3432     };
3433    
3434     @@ -1279,7 +1358,15 @@ create_new_subsystem(const char *name)
3435     return NULL;
3436    
3437     system->ref_count = 1;
3438     - system->name = name;
3439     +
3440     + /* Only allocate if dynamic (kprobes and modules) */
3441     + if (!core_kernel_data((unsigned long)name)) {
3442     + system->ref_count |= SYSTEM_FL_FREE_NAME;
3443     + system->name = kstrdup(name, GFP_KERNEL);
3444     + if (!system->name)
3445     + goto out_free;
3446     + } else
3447     + system->name = name;
3448    
3449     system->filter = NULL;
3450    
3451     @@ -1292,6 +1379,8 @@ create_new_subsystem(const char *name)
3452     return system;
3453    
3454     out_free:
3455     + if (system->ref_count & SYSTEM_FL_FREE_NAME)
3456     + kfree(system->name);
3457     kfree(system);
3458     return NULL;
3459     }
3460     @@ -1591,6 +1680,7 @@ static void __add_event_to_tracers(struct ftrace_event_call *call,
3461     int trace_add_event_call(struct ftrace_event_call *call)
3462     {
3463     int ret;
3464     + mutex_lock(&trace_types_lock);
3465     mutex_lock(&event_mutex);
3466    
3467     ret = __register_event(call, NULL);
3468     @@ -1598,11 +1688,13 @@ int trace_add_event_call(struct ftrace_event_call *call)
3469     __add_event_to_tracers(call, NULL);
3470    
3471     mutex_unlock(&event_mutex);
3472     + mutex_unlock(&trace_types_lock);
3473     return ret;
3474     }
3475    
3476     /*
3477     - * Must be called under locking both of event_mutex and trace_event_sem.
3478     + * Must be called under locking of trace_types_lock, event_mutex and
3479     + * trace_event_sem.
3480     */
3481     static void __trace_remove_event_call(struct ftrace_event_call *call)
3482     {
3483     @@ -1614,11 +1706,13 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
3484     /* Remove an event_call */
3485     void trace_remove_event_call(struct ftrace_event_call *call)
3486     {
3487     + mutex_lock(&trace_types_lock);
3488     mutex_lock(&event_mutex);
3489     down_write(&trace_event_sem);
3490     __trace_remove_event_call(call);
3491     up_write(&trace_event_sem);
3492     mutex_unlock(&event_mutex);
3493     + mutex_unlock(&trace_types_lock);
3494     }
3495    
3496     #define for_each_event(event, start, end) \
3497     @@ -1762,6 +1856,7 @@ static int trace_module_notify(struct notifier_block *self,
3498     {
3499     struct module *mod = data;
3500    
3501     + mutex_lock(&trace_types_lock);
3502     mutex_lock(&event_mutex);
3503     switch (val) {
3504     case MODULE_STATE_COMING:
3505     @@ -1772,6 +1867,7 @@ static int trace_module_notify(struct notifier_block *self,
3506     break;
3507     }
3508     mutex_unlock(&event_mutex);
3509     + mutex_unlock(&trace_types_lock);
3510    
3511     return 0;
3512     }
3513     @@ -2329,11 +2425,11 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3514    
3515     int event_trace_del_tracer(struct trace_array *tr)
3516     {
3517     - /* Disable any running events */
3518     - __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3519     -
3520     mutex_lock(&event_mutex);
3521    
3522     + /* Disable any running events */
3523     + __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3524     +
3525     down_write(&trace_event_sem);
3526     __trace_remove_event_dirs(tr);
3527     debugfs_remove_recursive(tr->event_dir);
3528     diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
3529     index 8f2ac73c..322e1646 100644
3530     --- a/kernel/trace/trace_syscalls.c
3531     +++ b/kernel/trace/trace_syscalls.c
3532     @@ -306,6 +306,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
3533     struct syscall_metadata *sys_data;
3534     struct ring_buffer_event *event;
3535     struct ring_buffer *buffer;
3536     + unsigned long irq_flags;
3537     + int pc;
3538     int syscall_nr;
3539     int size;
3540    
3541     @@ -321,9 +323,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
3542    
3543     size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
3544    
3545     + local_save_flags(irq_flags);
3546     + pc = preempt_count();
3547     +
3548     buffer = tr->trace_buffer.buffer;
3549     event = trace_buffer_lock_reserve(buffer,
3550     - sys_data->enter_event->event.type, size, 0, 0);
3551     + sys_data->enter_event->event.type, size, irq_flags, pc);
3552     if (!event)
3553     return;
3554    
3555     @@ -333,7 +338,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
3556    
3557     if (!filter_current_check_discard(buffer, sys_data->enter_event,
3558     entry, event))
3559     - trace_current_buffer_unlock_commit(buffer, event, 0, 0);
3560     + trace_current_buffer_unlock_commit(buffer, event,
3561     + irq_flags, pc);
3562     }
3563    
3564     static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
3565     @@ -343,6 +349,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
3566     struct syscall_metadata *sys_data;
3567     struct ring_buffer_event *event;
3568     struct ring_buffer *buffer;
3569     + unsigned long irq_flags;
3570     + int pc;
3571     int syscall_nr;
3572    
3573     syscall_nr = trace_get_syscall_nr(current, regs);
3574     @@ -355,9 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
3575     if (!sys_data)
3576     return;
3577    
3578     + local_save_flags(irq_flags);
3579     + pc = preempt_count();
3580     +
3581     buffer = tr->trace_buffer.buffer;
3582     event = trace_buffer_lock_reserve(buffer,
3583     - sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
3584     + sys_data->exit_event->event.type, sizeof(*entry),
3585     + irq_flags, pc);
3586     if (!event)
3587     return;
3588    
3589     @@ -367,7 +379,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
3590    
3591     if (!filter_current_check_discard(buffer, sys_data->exit_event,
3592     entry, event))
3593     - trace_current_buffer_unlock_commit(buffer, event, 0, 0);
3594     + trace_current_buffer_unlock_commit(buffer, event,
3595     + irq_flags, pc);
3596     }
3597    
3598     static int reg_event_syscall_enter(struct ftrace_event_file *file,
3599     diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
3600     index 32494fb0..d5d0cd36 100644
3601     --- a/kernel/trace/trace_uprobe.c
3602     +++ b/kernel/trace/trace_uprobe.c
3603     @@ -283,8 +283,10 @@ static int create_trace_uprobe(int argc, char **argv)
3604     return -EINVAL;
3605     }
3606     arg = strchr(argv[1], ':');
3607     - if (!arg)
3608     + if (!arg) {
3609     + ret = -EINVAL;
3610     goto fail_address_parse;
3611     + }
3612    
3613     *arg++ = '\0';
3614     filename = argv[1];
3615     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
3616     index 98d20c0f..514e90f4 100644
3617     --- a/net/mac80211/iface.c
3618     +++ b/net/mac80211/iface.c
3619     @@ -1726,6 +1726,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
3620     if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
3621     dev_close(sdata->dev);
3622    
3623     + /*
3624     + * Close all AP_VLAN interfaces first, as otherwise they
3625     + * might be closed while the AP interface they belong to
3626     + * is closed, causing unregister_netdevice_many() to crash.
3627     + */
3628     + list_for_each_entry(sdata, &local->interfaces, list)
3629     + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
3630     + dev_close(sdata->dev);
3631     +
3632     mutex_lock(&local->iflist_mtx);
3633     list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
3634     list_del(&sdata->list);
3635     diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
3636     index 06bdf5a1..1583c8a4 100644
3637     --- a/net/sunrpc/svcauth_unix.c
3638     +++ b/net/sunrpc/svcauth_unix.c
3639     @@ -493,8 +493,6 @@ static int unix_gid_parse(struct cache_detail *cd,
3640     if (rv)
3641     return -EINVAL;
3642     uid = make_kuid(&init_user_ns, id);
3643     - if (!uid_valid(uid))
3644     - return -EINVAL;
3645     ug.uid = uid;
3646    
3647     expiry = get_expiry(&mesg);
3648     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
3649     index 0f679df7..305374d4 100644
3650     --- a/net/sunrpc/svcsock.c
3651     +++ b/net/sunrpc/svcsock.c
3652     @@ -917,7 +917,10 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
3653     len = svsk->sk_datalen;
3654     npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3655     for (i = 0; i < npages; i++) {
3656     - BUG_ON(svsk->sk_pages[i] == NULL);
3657     + if (svsk->sk_pages[i] == NULL) {
3658     + WARN_ON_ONCE(1);
3659     + continue;
3660     + }
3661     put_page(svsk->sk_pages[i]);
3662     svsk->sk_pages[i] = NULL;
3663     }
3664     @@ -1092,8 +1095,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
3665     goto err_noclose;
3666     }
3667    
3668     - if (svc_sock_reclen(svsk) < 8)
3669     + if (svsk->sk_datalen < 8) {
3670     + svsk->sk_datalen = 0;
3671     goto err_delete; /* client is nuts. */
3672     + }
3673    
3674     rqstp->rq_arg.len = svsk->sk_datalen;
3675     rqstp->rq_arg.page_base = 0;
3676     diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
3677     index 76e0d569..823359ed 100644
3678     --- a/sound/arm/pxa2xx-pcm-lib.c
3679     +++ b/sound/arm/pxa2xx-pcm-lib.c
3680     @@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
3681     } else {
3682     printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
3683     rtd->params->name, dma_ch, dcsr);
3684     + snd_pcm_stream_lock(substream);
3685     snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
3686     + snd_pcm_stream_unlock(substream);
3687     }
3688     }
3689     EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
3690     diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
3691     index e3cb46fe..b3f39b5e 100644
3692     --- a/sound/core/seq/oss/seq_oss_init.c
3693     +++ b/sound/core/seq/oss/seq_oss_init.c
3694     @@ -31,6 +31,7 @@
3695     #include <linux/export.h>
3696     #include <linux/moduleparam.h>
3697     #include <linux/slab.h>
3698     +#include <linux/workqueue.h>
3699    
3700     /*
3701     * common variables
3702     @@ -60,6 +61,14 @@ static void free_devinfo(void *private);
3703     #define call_ctl(type,rec) snd_seq_kernel_client_ctl(system_client, type, rec)
3704    
3705    
3706     +/* call snd_seq_oss_midi_lookup_ports() asynchronously */
3707     +static void async_call_lookup_ports(struct work_struct *work)
3708     +{
3709     + snd_seq_oss_midi_lookup_ports(system_client);
3710     +}
3711     +
3712     +static DECLARE_WORK(async_lookup_work, async_call_lookup_ports);
3713     +
3714     /*
3715     * create sequencer client for OSS sequencer
3716     */
3717     @@ -85,9 +94,6 @@ snd_seq_oss_create_client(void)
3718     system_client = rc;
3719     debug_printk(("new client = %d\n", rc));
3720    
3721     - /* look up midi devices */
3722     - snd_seq_oss_midi_lookup_ports(system_client);
3723     -
3724     /* create annoucement receiver port */
3725     memset(port, 0, sizeof(*port));
3726     strcpy(port->name, "Receiver");
3727     @@ -115,6 +121,9 @@ snd_seq_oss_create_client(void)
3728     }
3729     rc = 0;
3730    
3731     + /* look up midi devices */
3732     + schedule_work(&async_lookup_work);
3733     +
3734     __error:
3735     kfree(port);
3736     return rc;
3737     @@ -160,6 +169,7 @@ receive_announce(struct snd_seq_event *ev, int direct, void *private, int atomic
3738     int
3739     snd_seq_oss_delete_client(void)
3740     {
3741     + cancel_work_sync(&async_lookup_work);
3742     if (system_client >= 0)
3743     snd_seq_delete_kernel_client(system_client);
3744    
3745     diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
3746     index 677dc845..862d8489 100644
3747     --- a/sound/core/seq/oss/seq_oss_midi.c
3748     +++ b/sound/core/seq/oss/seq_oss_midi.c
3749     @@ -72,7 +72,7 @@ static int send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev,
3750     * look up the existing ports
3751     * this looks a very exhausting job.
3752     */
3753     -int __init
3754     +int
3755     snd_seq_oss_midi_lookup_ports(int client)
3756     {
3757     struct snd_seq_client_info *clinfo;
3758     diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
3759     index fbc17203..a471d821 100644
3760     --- a/sound/pci/asihpi/asihpi.c
3761     +++ b/sound/pci/asihpi/asihpi.c
3762     @@ -769,7 +769,10 @@ static void snd_card_asihpi_timer_function(unsigned long data)
3763     s->number);
3764     ds->drained_count++;
3765     if (ds->drained_count > 20) {
3766     + unsigned long flags;
3767     + snd_pcm_stream_lock_irqsave(s, flags);
3768     snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
3769     + snd_pcm_stream_unlock_irqrestore(s, flags);
3770     continue;
3771     }
3772     } else {
3773     diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
3774     index 6e78c678..819430ac 100644
3775     --- a/sound/pci/atiixp.c
3776     +++ b/sound/pci/atiixp.c
3777     @@ -689,7 +689,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
3778     if (! dma->substream || ! dma->running)
3779     return;
3780     snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
3781     + snd_pcm_stream_lock(dma->substream);
3782     snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
3783     + snd_pcm_stream_unlock(dma->substream);
3784     }
3785    
3786     /*
3787     diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
3788     index d0bec7ba..57f41820 100644
3789     --- a/sound/pci/atiixp_modem.c
3790     +++ b/sound/pci/atiixp_modem.c
3791     @@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
3792     if (! dma->substream || ! dma->running)
3793     return;
3794     snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
3795     + snd_pcm_stream_lock(dma->substream);
3796     snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
3797     + snd_pcm_stream_unlock(dma->substream);
3798     }
3799    
3800     /*
3801     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
3802     index 4b1524a8..24400cff 100644
3803     --- a/sound/pci/hda/hda_generic.c
3804     +++ b/sound/pci/hda/hda_generic.c
3805     @@ -840,7 +840,7 @@ static int add_control_with_pfx(struct hda_gen_spec *spec, int type,
3806     const char *pfx, const char *dir,
3807     const char *sfx, int cidx, unsigned long val)
3808     {
3809     - char name[32];
3810     + char name[44];
3811     snprintf(name, sizeof(name), "%s %s %s", pfx, dir, sfx);
3812     if (!add_control(spec, type, name, cidx, val))
3813     return -ENOMEM;
3814     diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
3815     index e0bf7534..2e7493ef 100644
3816     --- a/sound/pci/hda/hda_local.h
3817     +++ b/sound/pci/hda/hda_local.h
3818     @@ -562,6 +562,14 @@ static inline unsigned int get_wcaps_channels(u32 wcaps)
3819     return chans;
3820     }
3821    
3822     +static inline void snd_hda_override_wcaps(struct hda_codec *codec,
3823     + hda_nid_t nid, u32 val)
3824     +{
3825     + if (nid >= codec->start_nid &&
3826     + nid < codec->start_nid + codec->num_nodes)
3827     + codec->wcaps[nid - codec->start_nid] = val;
3828     +}
3829     +
3830     u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction);
3831     int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
3832     unsigned int caps);
3833     @@ -667,7 +675,7 @@ snd_hda_check_power_state(struct hda_codec *codec, hda_nid_t nid,
3834     if (state & AC_PWRST_ERROR)
3835     return true;
3836     state = (state >> 4) & 0x0f;
3837     - return (state != target_state);
3838     + return (state == target_state);
3839     }
3840    
3841     unsigned int snd_hda_codec_eapd_power_filter(struct hda_codec *codec,
3842     diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
3843     index 977b0d87..d97f0d61 100644
3844     --- a/sound/pci/hda/patch_analog.c
3845     +++ b/sound/pci/hda/patch_analog.c
3846     @@ -2112,6 +2112,9 @@ static void ad_vmaster_eapd_hook(void *private_data, int enabled)
3847     {
3848     struct hda_codec *codec = private_data;
3849     struct ad198x_spec *spec = codec->spec;
3850     +
3851     + if (!spec->eapd_nid)
3852     + return;
3853     snd_hda_codec_update_cache(codec, spec->eapd_nid, 0,
3854     AC_VERB_SET_EAPD_BTLENABLE,
3855     enabled ? 0x02 : 0x00);
3856     @@ -3601,13 +3604,16 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
3857     {
3858     struct ad198x_spec *spec = codec->spec;
3859    
3860     - if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3861     + switch (action) {
3862     + case HDA_FIXUP_ACT_PRE_PROBE:
3863     + spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
3864     + break;
3865     + case HDA_FIXUP_ACT_PROBE:
3866     if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
3867     spec->eapd_nid = spec->gen.autocfg.line_out_pins[0];
3868     else
3869     spec->eapd_nid = spec->gen.autocfg.speaker_pins[0];
3870     - if (spec->eapd_nid)
3871     - spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
3872     + break;
3873     }
3874     }
3875    
3876     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3877     index e12f7a03..496d7f21 100644
3878     --- a/sound/pci/hda/patch_hdmi.c
3879     +++ b/sound/pci/hda/patch_hdmi.c
3880     @@ -1146,7 +1146,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
3881     per_cvt->assigned = 1;
3882     hinfo->nid = per_cvt->cvt_nid;
3883    
3884     - snd_hda_codec_write(codec, per_pin->pin_nid, 0,
3885     + snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
3886     AC_VERB_SET_CONNECT_SEL,
3887     mux_idx);
3888     snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
3889     @@ -2536,6 +2536,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3890     { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
3891     { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
3892     { .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
3893     +{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
3894     { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
3895     { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3896     { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
3897     @@ -2588,6 +2589,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0042");
3898     MODULE_ALIAS("snd-hda-codec-id:10de0043");
3899     MODULE_ALIAS("snd-hda-codec-id:10de0044");
3900     MODULE_ALIAS("snd-hda-codec-id:10de0051");
3901     +MODULE_ALIAS("snd-hda-codec-id:10de0060");
3902     MODULE_ALIAS("snd-hda-codec-id:10de0067");
3903     MODULE_ALIAS("snd-hda-codec-id:10de8001");
3904     MODULE_ALIAS("snd-hda-codec-id:11069f80");
3905     diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
3906     index e5245544..aed19c3f 100644
3907     --- a/sound/pci/hda/patch_via.c
3908     +++ b/sound/pci/hda/patch_via.c
3909     @@ -910,6 +910,8 @@ static const struct hda_verb vt1708S_init_verbs[] = {
3910     static void override_mic_boost(struct hda_codec *codec, hda_nid_t pin,
3911     int offset, int num_steps, int step_size)
3912     {
3913     + snd_hda_override_wcaps(codec, pin,
3914     + get_wcaps(codec, pin) | AC_WCAP_IN_AMP);
3915     snd_hda_override_amp_caps(codec, pin, HDA_INPUT,
3916     (offset << AC_AMPCAP_OFFSET_SHIFT) |
3917     (num_steps << AC_AMPCAP_NUM_STEPS_SHIFT) |
3918     diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
3919     index 1d38fd0b..d1282652 100644
3920     --- a/sound/soc/atmel/atmel-pcm-dma.c
3921     +++ b/sound/soc/atmel/atmel-pcm-dma.c
3922     @@ -81,7 +81,9 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
3923    
3924     /* stop RX and capture: will be enabled again at restart */
3925     ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
3926     + snd_pcm_stream_lock(substream);
3927     snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
3928     + snd_pcm_stream_unlock(substream);
3929    
3930     /* now drain RHR and read status to remove xrun condition */
3931     ssc_readx(prtd->ssc->regs, SSC_RHR);
3932     diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
3933     index 8a9f4353..d3a68bbf 100644
3934     --- a/sound/soc/codecs/sgtl5000.h
3935     +++ b/sound/soc/codecs/sgtl5000.h
3936     @@ -347,7 +347,7 @@
3937     #define SGTL5000_PLL_INT_DIV_MASK 0xf800
3938     #define SGTL5000_PLL_INT_DIV_SHIFT 11
3939     #define SGTL5000_PLL_INT_DIV_WIDTH 5
3940     -#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700
3941     +#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff
3942     #define SGTL5000_PLL_FRAC_DIV_SHIFT 0
3943     #define SGTL5000_PLL_FRAC_DIV_WIDTH 11
3944    
3945     diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
3946     index 1358c7de..d0740a76 100644
3947     --- a/sound/soc/s6000/s6000-pcm.c
3948     +++ b/sound/soc/s6000/s6000-pcm.c
3949     @@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data)
3950     substream->runtime &&
3951     snd_pcm_running(substream)) {
3952     dev_dbg(pcm->dev, "xrun\n");
3953     + snd_pcm_stream_lock(substream);
3954     snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
3955     + snd_pcm_stream_unlock(substream);
3956     ret = IRQ_HANDLED;
3957     }
3958    
3959     diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
3960     index 40dd50a8..8221ff2f 100644
3961     --- a/sound/usb/6fire/pcm.c
3962     +++ b/sound/usb/6fire/pcm.c
3963     @@ -641,17 +641,25 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
3964     void usb6fire_pcm_abort(struct sfire_chip *chip)
3965     {
3966     struct pcm_runtime *rt = chip->pcm;
3967     + unsigned long flags;
3968     int i;
3969    
3970     if (rt) {
3971     rt->panic = true;
3972    
3973     - if (rt->playback.instance)
3974     + if (rt->playback.instance) {
3975     + snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
3976     snd_pcm_stop(rt->playback.instance,
3977     SNDRV_PCM_STATE_XRUN);
3978     - if (rt->capture.instance)
3979     + snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
3980     + }
3981     +
3982     + if (rt->capture.instance) {
3983     + snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
3984     snd_pcm_stop(rt->capture.instance,
3985     SNDRV_PCM_STATE_XRUN);
3986     + snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
3987     + }
3988    
3989     for (i = 0; i < PCM_N_URBS; i++) {
3990     usb_poison_urb(&rt->in_urbs[i].instance);
3991     diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
3992     index 6ad617b9..76d83290 100644
3993     --- a/sound/usb/misc/ua101.c
3994     +++ b/sound/usb/misc/ua101.c
3995     @@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua)
3996    
3997     static void abort_alsa_capture(struct ua101 *ua)
3998     {
3999     - if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
4000     + unsigned long flags;
4001     +
4002     + if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
4003     + snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
4004     snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
4005     + snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
4006     + }
4007     }
4008    
4009     static void abort_alsa_playback(struct ua101 *ua)
4010     {
4011     - if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
4012     + unsigned long flags;
4013     +
4014     + if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
4015     + snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
4016     snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
4017     + snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
4018     + }
4019     }
4020    
4021     static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
4022     diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
4023     index b3765324..0ce90337 100644
4024     --- a/sound/usb/usx2y/usbusx2yaudio.c
4025     +++ b/sound/usb/usx2y/usbusx2yaudio.c
4026     @@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
4027     struct snd_usX2Y_substream *subs = usX2Y->subs[s];
4028     if (subs) {
4029     if (atomic_read(&subs->state) >= state_PRERUNNING) {
4030     + unsigned long flags;
4031     +
4032     + snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
4033     snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
4034     + snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
4035     }
4036     for (u = 0; u < NRURBS; u++) {
4037     struct urb *urb = subs->urb[u];