Contents of /trunk/kernel-magellan/patches-3.7/0109-3.7.10-all-fixes.patch
Parent Directory | Revision Log
Revision 2091 -
(show annotations)
(download)
Tue Mar 5 09:03:17 2013 UTC (11 years, 6 months ago) by niro
File size: 78860 byte(s)
Tue Mar 5 09:03:17 2013 UTC (11 years, 6 months ago) by niro
File size: 78860 byte(s)
linux-3.7.10
1 | diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c |
2 | index 38883f0..051a35f 100644 |
3 | --- a/arch/s390/kvm/kvm-s390.c |
4 | +++ b/arch/s390/kvm/kvm-s390.c |
5 | @@ -763,6 +763,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
6 | } else |
7 | prefix = 0; |
8 | |
9 | + /* |
10 | + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy |
11 | + * copying in vcpu load/put. Lets update our copies before we save |
12 | + * it into the save area |
13 | + */ |
14 | + save_fp_regs(&vcpu->arch.guest_fpregs); |
15 | + save_access_regs(vcpu->run->s.regs.acrs); |
16 | + |
17 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), |
18 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) |
19 | return -EFAULT; |
20 | diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig |
21 | index 9f2edb5..cb9c333 100644 |
22 | --- a/arch/sparc/Kconfig |
23 | +++ b/arch/sparc/Kconfig |
24 | @@ -61,6 +61,7 @@ config SPARC64 |
25 | select HAVE_MEMBLOCK |
26 | select HAVE_MEMBLOCK_NODE_MAP |
27 | select HAVE_SYSCALL_WRAPPERS |
28 | + select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
29 | select HAVE_DYNAMIC_FTRACE |
30 | select HAVE_FTRACE_MCOUNT_RECORD |
31 | select HAVE_SYSCALL_TRACEPOINTS |
32 | diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h |
33 | index 9661e9b..7eb57d2 100644 |
34 | --- a/arch/sparc/include/asm/hugetlb.h |
35 | +++ b/arch/sparc/include/asm/hugetlb.h |
36 | @@ -12,7 +12,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
37 | |
38 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
39 | { |
40 | - hugetlb_setup(mm); |
41 | } |
42 | |
43 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
44 | diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h |
45 | index 4b39f74..e155388 100644 |
46 | --- a/arch/sparc/include/asm/page_64.h |
47 | +++ b/arch/sparc/include/asm/page_64.h |
48 | @@ -27,8 +27,8 @@ |
49 | #ifndef __ASSEMBLY__ |
50 | |
51 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
52 | -struct mm_struct; |
53 | -extern void hugetlb_setup(struct mm_struct *mm); |
54 | +struct pt_regs; |
55 | +extern void hugetlb_setup(struct pt_regs *regs); |
56 | #endif |
57 | |
58 | #define WANT_PAGE_VIRTUAL |
59 | diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h |
60 | index 95515f1..c07268d 100644 |
61 | --- a/arch/sparc/include/asm/pgtable_64.h |
62 | +++ b/arch/sparc/include/asm/pgtable_64.h |
63 | @@ -71,7 +71,6 @@ |
64 | #define PMD_PADDR _AC(0xfffffffe,UL) |
65 | #define PMD_PADDR_SHIFT _AC(11,UL) |
66 | |
67 | -#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
68 | #define PMD_ISHUGE _AC(0x00000001,UL) |
69 | |
70 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge |
71 | @@ -86,7 +85,6 @@ |
72 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) |
73 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) |
74 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) |
75 | -#endif |
76 | |
77 | /* PGDs point to PMD tables which are 8K aligned. */ |
78 | #define PGD_PADDR _AC(0xfffffffc,UL) |
79 | @@ -622,6 +620,12 @@ static inline unsigned long pte_special(pte_t pte) |
80 | return pte_val(pte) & _PAGE_SPECIAL; |
81 | } |
82 | |
83 | +static inline int pmd_large(pmd_t pmd) |
84 | +{ |
85 | + return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == |
86 | + (PMD_ISHUGE | PMD_HUGE_PRESENT); |
87 | +} |
88 | + |
89 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
90 | static inline int pmd_young(pmd_t pmd) |
91 | { |
92 | @@ -640,12 +644,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd) |
93 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); |
94 | } |
95 | |
96 | -static inline int pmd_large(pmd_t pmd) |
97 | -{ |
98 | - return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == |
99 | - (PMD_ISHUGE | PMD_HUGE_PRESENT); |
100 | -} |
101 | - |
102 | static inline int pmd_trans_splitting(pmd_t pmd) |
103 | { |
104 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == |
105 | diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h |
106 | index b4c258d..e696432 100644 |
107 | --- a/arch/sparc/include/asm/tsb.h |
108 | +++ b/arch/sparc/include/asm/tsb.h |
109 | @@ -157,17 +157,26 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
110 | andn REG2, 0x7, REG2; \ |
111 | add REG1, REG2, REG1; |
112 | |
113 | - /* This macro exists only to make the PMD translator below easier |
114 | - * to read. It hides the ELF section switch for the sun4v code |
115 | - * patching. |
116 | + /* These macros exists only to make the PMD translator below |
117 | + * easier to read. It hides the ELF section switch for the |
118 | + * sun4v code patching. |
119 | */ |
120 | -#define OR_PTE_BIT(REG, NAME) \ |
121 | +#define OR_PTE_BIT_1INSN(REG, NAME) \ |
122 | 661: or REG, _PAGE_##NAME##_4U, REG; \ |
123 | .section .sun4v_1insn_patch, "ax"; \ |
124 | .word 661b; \ |
125 | or REG, _PAGE_##NAME##_4V, REG; \ |
126 | .previous; |
127 | |
128 | +#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \ |
129 | +661: sethi %hi(_PAGE_##NAME##_4U), TMP; \ |
130 | + or REG, TMP, REG; \ |
131 | + .section .sun4v_2insn_patch, "ax"; \ |
132 | + .word 661b; \ |
133 | + mov -1, TMP; \ |
134 | + or REG, _PAGE_##NAME##_4V, REG; \ |
135 | + .previous; |
136 | + |
137 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ |
138 | #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ |
139 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ |
140 | @@ -214,12 +223,13 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
141 | andn REG1, PMD_HUGE_PROTBITS, REG2; \ |
142 | sllx REG2, PMD_PADDR_SHIFT, REG2; \ |
143 | /* REG2 now holds PFN << PAGE_SHIFT */ \ |
144 | - andcc REG1, PMD_HUGE_EXEC, %g0; \ |
145 | - bne,a,pt %xcc, 1f; \ |
146 | - OR_PTE_BIT(REG2, EXEC); \ |
147 | -1: andcc REG1, PMD_HUGE_WRITE, %g0; \ |
148 | + andcc REG1, PMD_HUGE_WRITE, %g0; \ |
149 | bne,a,pt %xcc, 1f; \ |
150 | - OR_PTE_BIT(REG2, W); \ |
151 | + OR_PTE_BIT_1INSN(REG2, W); \ |
152 | +1: andcc REG1, PMD_HUGE_EXEC, %g0; \ |
153 | + be,pt %xcc, 1f; \ |
154 | + nop; \ |
155 | + OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ |
156 | /* REG1 can now be clobbered, build final PTE */ \ |
157 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ |
158 | ba,pt %xcc, PTE_LABEL; \ |
159 | diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S |
160 | index d4bdc7a..a313e4a 100644 |
161 | --- a/arch/sparc/kernel/tsb.S |
162 | +++ b/arch/sparc/kernel/tsb.S |
163 | @@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath: |
164 | nop |
165 | |
166 | /* It is a huge page, use huge page TSB entry address we |
167 | - * calculated above. |
168 | + * calculated above. If the huge page TSB has not been |
169 | + * allocated, setup a trap stack and call hugetlb_setup() |
170 | + * to do so, then return from the trap to replay the TLB |
171 | + * miss. |
172 | + * |
173 | + * This is necessary to handle the case of transparent huge |
174 | + * pages where we don't really have a non-atomic context |
175 | + * in which to allocate the hugepage TSB hash table. When |
176 | + * the 'mm' faults in the hugepage for the first time, we |
177 | + * thus handle it here. This also makes sure that we can |
178 | + * allocate the TSB hash table on the correct NUMA node. |
179 | */ |
180 | TRAP_LOAD_TRAP_BLOCK(%g7, %g2) |
181 | - ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 |
182 | - cmp %g2, -1 |
183 | - movne %xcc, %g2, %g1 |
184 | + ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 |
185 | + cmp %g1, -1 |
186 | + bne,pt %xcc, 60f |
187 | + nop |
188 | + |
189 | +661: rdpr %pstate, %g5 |
190 | + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate |
191 | + .section .sun4v_2insn_patch, "ax" |
192 | + .word 661b |
193 | + SET_GL(1) |
194 | + nop |
195 | + .previous |
196 | + |
197 | + rdpr %tl, %g3 |
198 | + cmp %g3, 1 |
199 | + bne,pn %xcc, winfix_trampoline |
200 | + nop |
201 | + ba,pt %xcc, etrap |
202 | + rd %pc, %g7 |
203 | + call hugetlb_setup |
204 | + add %sp, PTREGS_OFF, %o0 |
205 | + ba,pt %xcc, rtrap |
206 | + nop |
207 | + |
208 | 60: |
209 | #endif |
210 | |
211 | diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c |
212 | index 097aee7..5062ff3 100644 |
213 | --- a/arch/sparc/mm/fault_64.c |
214 | +++ b/arch/sparc/mm/fault_64.c |
215 | @@ -472,8 +472,13 @@ good_area: |
216 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
217 | mm_rss = mm->context.huge_pte_count; |
218 | if (unlikely(mm_rss > |
219 | - mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) |
220 | - tsb_grow(mm, MM_TSB_HUGE, mm_rss); |
221 | + mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { |
222 | + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) |
223 | + tsb_grow(mm, MM_TSB_HUGE, mm_rss); |
224 | + else |
225 | + hugetlb_setup(regs); |
226 | + |
227 | + } |
228 | #endif |
229 | return; |
230 | |
231 | diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c |
232 | index 42c55df..01ee23d 100644 |
233 | --- a/arch/sparc/mm/gup.c |
234 | +++ b/arch/sparc/mm/gup.c |
235 | @@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, |
236 | return 1; |
237 | } |
238 | |
239 | +static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
240 | + unsigned long end, int write, struct page **pages, |
241 | + int *nr) |
242 | +{ |
243 | + struct page *head, *page, *tail; |
244 | + u32 mask; |
245 | + int refs; |
246 | + |
247 | + mask = PMD_HUGE_PRESENT; |
248 | + if (write) |
249 | + mask |= PMD_HUGE_WRITE; |
250 | + if ((pmd_val(pmd) & mask) != mask) |
251 | + return 0; |
252 | + |
253 | + refs = 0; |
254 | + head = pmd_page(pmd); |
255 | + page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
256 | + tail = page; |
257 | + do { |
258 | + VM_BUG_ON(compound_head(page) != head); |
259 | + pages[*nr] = page; |
260 | + (*nr)++; |
261 | + page++; |
262 | + refs++; |
263 | + } while (addr += PAGE_SIZE, addr != end); |
264 | + |
265 | + if (!page_cache_add_speculative(head, refs)) { |
266 | + *nr -= refs; |
267 | + return 0; |
268 | + } |
269 | + |
270 | + if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { |
271 | + *nr -= refs; |
272 | + while (refs--) |
273 | + put_page(head); |
274 | + return 0; |
275 | + } |
276 | + |
277 | + /* Any tail page need their mapcount reference taken before we |
278 | + * return. |
279 | + */ |
280 | + while (refs--) { |
281 | + if (PageTail(tail)) |
282 | + get_huge_page_tail(tail); |
283 | + tail++; |
284 | + } |
285 | + |
286 | + return 1; |
287 | +} |
288 | + |
289 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
290 | int write, struct page **pages, int *nr) |
291 | { |
292 | @@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
293 | pmd_t pmd = *pmdp; |
294 | |
295 | next = pmd_addr_end(addr, end); |
296 | - if (pmd_none(pmd)) |
297 | + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
298 | return 0; |
299 | - if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
300 | + if (unlikely(pmd_large(pmd))) { |
301 | + if (!gup_huge_pmd(pmdp, pmd, addr, next, |
302 | + write, pages, nr)) |
303 | + return 0; |
304 | + } else if (!gup_pte_range(pmd, addr, next, write, |
305 | + pages, nr)) |
306 | return 0; |
307 | } while (pmdp++, addr = next, addr != end); |
308 | |
309 | diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c |
310 | index 9e28a11..c202a6c 100644 |
311 | --- a/arch/sparc/mm/init_64.c |
312 | +++ b/arch/sparc/mm/init_64.c |
313 | @@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde |
314 | struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; |
315 | unsigned long tag; |
316 | |
317 | + if (unlikely(!tsb)) |
318 | + return; |
319 | + |
320 | tsb += ((address >> tsb_hash_shift) & |
321 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); |
322 | tag = (address >> 22UL); |
323 | tsb_insert(tsb, tag, tte); |
324 | } |
325 | |
326 | +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
327 | +static inline bool is_hugetlb_pte(pte_t pte) |
328 | +{ |
329 | + if ((tlb_type == hypervisor && |
330 | + (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || |
331 | + (tlb_type != hypervisor && |
332 | + (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) |
333 | + return true; |
334 | + return false; |
335 | +} |
336 | +#endif |
337 | + |
338 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
339 | { |
340 | - unsigned long tsb_index, tsb_hash_shift, flags; |
341 | struct mm_struct *mm; |
342 | + unsigned long flags; |
343 | pte_t pte = *ptep; |
344 | |
345 | if (tlb_type != hypervisor) { |
346 | @@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * |
347 | |
348 | mm = vma->vm_mm; |
349 | |
350 | - tsb_index = MM_TSB_BASE; |
351 | - tsb_hash_shift = PAGE_SHIFT; |
352 | - |
353 | spin_lock_irqsave(&mm->context.lock, flags); |
354 | |
355 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
356 | - if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { |
357 | - if ((tlb_type == hypervisor && |
358 | - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || |
359 | - (tlb_type != hypervisor && |
360 | - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { |
361 | - tsb_index = MM_TSB_HUGE; |
362 | - tsb_hash_shift = HPAGE_SHIFT; |
363 | - } |
364 | - } |
365 | + if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) |
366 | + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, |
367 | + address, pte_val(pte)); |
368 | + else |
369 | #endif |
370 | - |
371 | - __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, |
372 | - address, pte_val(pte)); |
373 | + __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, |
374 | + address, pte_val(pte)); |
375 | |
376 | spin_unlock_irqrestore(&mm->context.lock, flags); |
377 | } |
378 | @@ -2712,14 +2718,28 @@ static void context_reload(void *__data) |
379 | load_secondary_context(mm); |
380 | } |
381 | |
382 | -void hugetlb_setup(struct mm_struct *mm) |
383 | +void hugetlb_setup(struct pt_regs *regs) |
384 | { |
385 | - struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; |
386 | + struct mm_struct *mm = current->mm; |
387 | + struct tsb_config *tp; |
388 | |
389 | - if (likely(tp->tsb != NULL)) |
390 | - return; |
391 | + if (in_atomic() || !mm) { |
392 | + const struct exception_table_entry *entry; |
393 | + |
394 | + entry = search_exception_tables(regs->tpc); |
395 | + if (entry) { |
396 | + regs->tpc = entry->fixup; |
397 | + regs->tnpc = regs->tpc + 4; |
398 | + return; |
399 | + } |
400 | + pr_alert("Unexpected HugeTLB setup in atomic context.\n"); |
401 | + die_if_kernel("HugeTSB in atomic", regs); |
402 | + } |
403 | + |
404 | + tp = &mm->context.tsb_block[MM_TSB_HUGE]; |
405 | + if (likely(tp->tsb == NULL)) |
406 | + tsb_grow(mm, MM_TSB_HUGE, 0); |
407 | |
408 | - tsb_grow(mm, MM_TSB_HUGE, 0); |
409 | tsb_context_switch(mm); |
410 | smp_tsb_sync(mm); |
411 | |
412 | diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c |
413 | index 3e8fec3..ba6ae7f 100644 |
414 | --- a/arch/sparc/mm/tlb.c |
415 | +++ b/arch/sparc/mm/tlb.c |
416 | @@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
417 | mm->context.huge_pte_count++; |
418 | else |
419 | mm->context.huge_pte_count--; |
420 | - if (mm->context.huge_pte_count == 1) |
421 | - hugetlb_setup(mm); |
422 | + |
423 | + /* Do not try to allocate the TSB hash table if we |
424 | + * don't have one already. We have various locks held |
425 | + * and thus we'll end up doing a GFP_KERNEL allocation |
426 | + * in an atomic context. |
427 | + * |
428 | + * Instead, we let the first TLB miss on a hugepage |
429 | + * take care of this. |
430 | + */ |
431 | } |
432 | |
433 | if (!pmd_none(orig)) { |
434 | diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c |
435 | index 7f64743..428982b 100644 |
436 | --- a/arch/sparc/mm/tsb.c |
437 | +++ b/arch/sparc/mm/tsb.c |
438 | @@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) |
439 | retry_tsb_alloc: |
440 | gfp_flags = GFP_KERNEL; |
441 | if (new_size > (PAGE_SIZE * 2)) |
442 | - gfp_flags = __GFP_NOWARN | __GFP_NORETRY; |
443 | + gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; |
444 | |
445 | new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], |
446 | gfp_flags, numa_node_id()); |
447 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
448 | index 46c3bff..e957352 100644 |
449 | --- a/arch/x86/Kconfig |
450 | +++ b/arch/x86/Kconfig |
451 | @@ -1255,10 +1255,6 @@ config NODES_SHIFT |
452 | Specify the maximum number of NUMA Nodes available on the target |
453 | system. Increases memory reserved to accommodate various tables. |
454 | |
455 | -config HAVE_ARCH_ALLOC_REMAP |
456 | - def_bool y |
457 | - depends on X86_32 && NUMA |
458 | - |
459 | config ARCH_HAVE_MEMORY_PRESENT |
460 | def_bool y |
461 | depends on X86_32 && DISCONTIGMEM |
462 | diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h |
463 | index eb05fb3..8a9b3e2 100644 |
464 | --- a/arch/x86/include/asm/mmzone_32.h |
465 | +++ b/arch/x86/include/asm/mmzone_32.h |
466 | @@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; |
467 | |
468 | #include <asm/numaq.h> |
469 | |
470 | -extern void resume_map_numa_kva(pgd_t *pgd); |
471 | - |
472 | -#else /* !CONFIG_NUMA */ |
473 | - |
474 | -static inline void resume_map_numa_kva(pgd_t *pgd) {} |
475 | - |
476 | #endif /* CONFIG_NUMA */ |
477 | |
478 | #ifdef CONFIG_DISCONTIGMEM |
479 | diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c |
480 | index 0a630dd..646d192 100644 |
481 | --- a/arch/x86/kernel/cpu/mshyperv.c |
482 | +++ b/arch/x86/kernel/cpu/mshyperv.c |
483 | @@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void) |
484 | printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", |
485 | ms_hyperv.features, ms_hyperv.hints); |
486 | |
487 | - clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); |
488 | + if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
489 | + clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); |
490 | } |
491 | |
492 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { |
493 | diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c |
494 | index 2d125be..8504f36 100644 |
495 | --- a/arch/x86/mm/numa.c |
496 | +++ b/arch/x86/mm/numa.c |
497 | @@ -193,7 +193,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) |
498 | static void __init setup_node_data(int nid, u64 start, u64 end) |
499 | { |
500 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
501 | - bool remapped = false; |
502 | u64 nd_pa; |
503 | void *nd; |
504 | int tnid; |
505 | @@ -205,37 +204,28 @@ static void __init setup_node_data(int nid, u64 start, u64 end) |
506 | if (end && (end - start) < NODE_MIN_SIZE) |
507 | return; |
508 | |
509 | - /* initialize remap allocator before aligning to ZONE_ALIGN */ |
510 | - init_alloc_remap(nid, start, end); |
511 | - |
512 | start = roundup(start, ZONE_ALIGN); |
513 | |
514 | printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", |
515 | nid, start, end - 1); |
516 | |
517 | /* |
518 | - * Allocate node data. Try remap allocator first, node-local |
519 | - * memory and then any node. Never allocate in DMA zone. |
520 | + * Allocate node data. Try node-local memory and then any node. |
521 | + * Never allocate in DMA zone. |
522 | */ |
523 | - nd = alloc_remap(nid, nd_size); |
524 | - if (nd) { |
525 | - nd_pa = __pa(nd); |
526 | - remapped = true; |
527 | - } else { |
528 | - nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); |
529 | - if (!nd_pa) { |
530 | - pr_err("Cannot find %zu bytes in node %d\n", |
531 | - nd_size, nid); |
532 | - return; |
533 | - } |
534 | - nd = __va(nd_pa); |
535 | + nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); |
536 | + if (!nd_pa) { |
537 | + pr_err("Cannot find %zu bytes in node %d\n", |
538 | + nd_size, nid); |
539 | + return; |
540 | } |
541 | + nd = __va(nd_pa); |
542 | |
543 | /* report and initialize */ |
544 | - printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n", |
545 | - nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); |
546 | + printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n", |
547 | + nd_pa, nd_pa + nd_size - 1); |
548 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); |
549 | - if (!remapped && tnid != nid) |
550 | + if (tnid != nid) |
551 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); |
552 | |
553 | node_data[nid] = nd; |
554 | diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c |
555 | index 534255a..73a6d73 100644 |
556 | --- a/arch/x86/mm/numa_32.c |
557 | +++ b/arch/x86/mm/numa_32.c |
558 | @@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, |
559 | |
560 | extern unsigned long highend_pfn, highstart_pfn; |
561 | |
562 | -#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
563 | - |
564 | -static void *node_remap_start_vaddr[MAX_NUMNODES]; |
565 | -void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
566 | - |
567 | -/* |
568 | - * Remap memory allocator |
569 | - */ |
570 | -static unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
571 | -static void *node_remap_end_vaddr[MAX_NUMNODES]; |
572 | -static void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
573 | - |
574 | -/** |
575 | - * alloc_remap - Allocate remapped memory |
576 | - * @nid: NUMA node to allocate memory from |
577 | - * @size: The size of allocation |
578 | - * |
579 | - * Allocate @size bytes from the remap area of NUMA node @nid. The |
580 | - * size of the remap area is predetermined by init_alloc_remap() and |
581 | - * only the callers considered there should call this function. For |
582 | - * more info, please read the comment on top of init_alloc_remap(). |
583 | - * |
584 | - * The caller must be ready to handle allocation failure from this |
585 | - * function and fall back to regular memory allocator in such cases. |
586 | - * |
587 | - * CONTEXT: |
588 | - * Single CPU early boot context. |
589 | - * |
590 | - * RETURNS: |
591 | - * Pointer to the allocated memory on success, %NULL on failure. |
592 | - */ |
593 | -void *alloc_remap(int nid, unsigned long size) |
594 | -{ |
595 | - void *allocation = node_remap_alloc_vaddr[nid]; |
596 | - |
597 | - size = ALIGN(size, L1_CACHE_BYTES); |
598 | - |
599 | - if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) |
600 | - return NULL; |
601 | - |
602 | - node_remap_alloc_vaddr[nid] += size; |
603 | - memset(allocation, 0, size); |
604 | - |
605 | - return allocation; |
606 | -} |
607 | - |
608 | -#ifdef CONFIG_HIBERNATION |
609 | -/** |
610 | - * resume_map_numa_kva - add KVA mapping to the temporary page tables created |
611 | - * during resume from hibernation |
612 | - * @pgd_base - temporary resume page directory |
613 | - */ |
614 | -void resume_map_numa_kva(pgd_t *pgd_base) |
615 | -{ |
616 | - int node; |
617 | - |
618 | - for_each_online_node(node) { |
619 | - unsigned long start_va, start_pfn, nr_pages, pfn; |
620 | - |
621 | - start_va = (unsigned long)node_remap_start_vaddr[node]; |
622 | - start_pfn = node_remap_start_pfn[node]; |
623 | - nr_pages = (node_remap_end_vaddr[node] - |
624 | - node_remap_start_vaddr[node]) >> PAGE_SHIFT; |
625 | - |
626 | - printk(KERN_DEBUG "%s: node %d\n", __func__, node); |
627 | - |
628 | - for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { |
629 | - unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); |
630 | - pgd_t *pgd = pgd_base + pgd_index(vaddr); |
631 | - pud_t *pud = pud_offset(pgd, vaddr); |
632 | - pmd_t *pmd = pmd_offset(pud, vaddr); |
633 | - |
634 | - set_pmd(pmd, pfn_pmd(start_pfn + pfn, |
635 | - PAGE_KERNEL_LARGE_EXEC)); |
636 | - |
637 | - printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", |
638 | - __func__, vaddr, start_pfn + pfn); |
639 | - } |
640 | - } |
641 | -} |
642 | -#endif |
643 | - |
644 | -/** |
645 | - * init_alloc_remap - Initialize remap allocator for a NUMA node |
646 | - * @nid: NUMA node to initizlie remap allocator for |
647 | - * |
648 | - * NUMA nodes may end up without any lowmem. As allocating pgdat and |
649 | - * memmap on a different node with lowmem is inefficient, a special |
650 | - * remap allocator is implemented which can be used by alloc_remap(). |
651 | - * |
652 | - * For each node, the amount of memory which will be necessary for |
653 | - * pgdat and memmap is calculated and two memory areas of the size are |
654 | - * allocated - one in the node and the other in lowmem; then, the area |
655 | - * in the node is remapped to the lowmem area. |
656 | - * |
657 | - * As pgdat and memmap must be allocated in lowmem anyway, this |
658 | - * doesn't waste lowmem address space; however, the actual lowmem |
659 | - * which gets remapped over is wasted. The amount shouldn't be |
660 | - * problematic on machines this feature will be used. |
661 | - * |
662 | - * Initialization failure isn't fatal. alloc_remap() is used |
663 | - * opportunistically and the callers will fall back to other memory |
664 | - * allocation mechanisms on failure. |
665 | - */ |
666 | -void __init init_alloc_remap(int nid, u64 start, u64 end) |
667 | -{ |
668 | - unsigned long start_pfn = start >> PAGE_SHIFT; |
669 | - unsigned long end_pfn = end >> PAGE_SHIFT; |
670 | - unsigned long size, pfn; |
671 | - u64 node_pa, remap_pa; |
672 | - void *remap_va; |
673 | - |
674 | - /* |
675 | - * The acpi/srat node info can show hot-add memroy zones where |
676 | - * memory could be added but not currently present. |
677 | - */ |
678 | - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", |
679 | - nid, start_pfn, end_pfn); |
680 | - |
681 | - /* calculate the necessary space aligned to large page size */ |
682 | - size = node_memmap_size_bytes(nid, start_pfn, end_pfn); |
683 | - size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
684 | - size = ALIGN(size, LARGE_PAGE_BYTES); |
685 | - |
686 | - /* allocate node memory and the lowmem remap area */ |
687 | - node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); |
688 | - if (!node_pa) { |
689 | - pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", |
690 | - size, nid); |
691 | - return; |
692 | - } |
693 | - memblock_reserve(node_pa, size); |
694 | - |
695 | - remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, |
696 | - max_low_pfn << PAGE_SHIFT, |
697 | - size, LARGE_PAGE_BYTES); |
698 | - if (!remap_pa) { |
699 | - pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", |
700 | - size, nid); |
701 | - memblock_free(node_pa, size); |
702 | - return; |
703 | - } |
704 | - memblock_reserve(remap_pa, size); |
705 | - remap_va = phys_to_virt(remap_pa); |
706 | - |
707 | - /* perform actual remap */ |
708 | - for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) |
709 | - set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), |
710 | - (node_pa >> PAGE_SHIFT) + pfn, |
711 | - PAGE_KERNEL_LARGE); |
712 | - |
713 | - /* initialize remap allocator parameters */ |
714 | - node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; |
715 | - node_remap_start_vaddr[nid] = remap_va; |
716 | - node_remap_end_vaddr[nid] = remap_va + size; |
717 | - node_remap_alloc_vaddr[nid] = remap_va; |
718 | - |
719 | - printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", |
720 | - nid, node_pa, node_pa + size, remap_va, remap_va + size); |
721 | -} |
722 | - |
723 | void __init initmem_init(void) |
724 | { |
725 | x86_numa_init(); |
726 | diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h |
727 | index 7178c3a..ad86ec9 100644 |
728 | --- a/arch/x86/mm/numa_internal.h |
729 | +++ b/arch/x86/mm/numa_internal.h |
730 | @@ -21,12 +21,6 @@ void __init numa_reset_distance(void); |
731 | |
732 | void __init x86_numa_init(void); |
733 | |
734 | -#ifdef CONFIG_X86_64 |
735 | -static inline void init_alloc_remap(int nid, u64 start, u64 end) { } |
736 | -#else |
737 | -void __init init_alloc_remap(int nid, u64 start, u64 end); |
738 | -#endif |
739 | - |
740 | #ifdef CONFIG_NUMA_EMU |
741 | void __init numa_emulation(struct numa_meminfo *numa_meminfo, |
742 | int numa_dist_cnt); |
743 | diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c |
744 | index 74202c1..7d28c88 100644 |
745 | --- a/arch/x86/power/hibernate_32.c |
746 | +++ b/arch/x86/power/hibernate_32.c |
747 | @@ -129,8 +129,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) |
748 | } |
749 | } |
750 | |
751 | - resume_map_numa_kva(pgd_base); |
752 | - |
753 | return 0; |
754 | } |
755 | |
756 | diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c |
757 | index 83e866d..f7a080e 100644 |
758 | --- a/arch/x86/xen/spinlock.c |
759 | +++ b/arch/x86/xen/spinlock.c |
760 | @@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) |
761 | if (per_cpu(lock_spinners, cpu) == xl) { |
762 | ADD_STATS(released_slow_kicked, 1); |
763 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
764 | - break; |
765 | } |
766 | } |
767 | } |
768 | diff --git a/drivers/base/bus.c b/drivers/base/bus.c |
769 | index 181ed26..513a02d 100644 |
770 | --- a/drivers/base/bus.c |
771 | +++ b/drivers/base/bus.c |
772 | @@ -293,7 +293,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, |
773 | struct device *dev; |
774 | int error = 0; |
775 | |
776 | - if (!bus) |
777 | + if (!bus || !bus->p) |
778 | return -EINVAL; |
779 | |
780 | klist_iter_init_node(&bus->p->klist_devices, &i, |
781 | @@ -327,7 +327,7 @@ struct device *bus_find_device(struct bus_type *bus, |
782 | struct klist_iter i; |
783 | struct device *dev; |
784 | |
785 | - if (!bus) |
786 | + if (!bus || !bus->p) |
787 | return NULL; |
788 | |
789 | klist_iter_init_node(&bus->p->klist_devices, &i, |
790 | diff --git a/drivers/base/dd.c b/drivers/base/dd.c |
791 | index e3bbed8..61d3e1b 100644 |
792 | --- a/drivers/base/dd.c |
793 | +++ b/drivers/base/dd.c |
794 | @@ -172,6 +172,8 @@ static int deferred_probe_initcall(void) |
795 | |
796 | driver_deferred_probe_enable = true; |
797 | driver_deferred_probe_trigger(); |
798 | + /* Sort as many dependencies as possible before exiting initcalls */ |
799 | + flush_workqueue(deferred_wq); |
800 | return 0; |
801 | } |
802 | late_initcall(deferred_probe_initcall); |
803 | diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c |
804 | index 9dcf76a..31dd451 100644 |
805 | --- a/drivers/block/sunvdc.c |
806 | +++ b/drivers/block/sunvdc.c |
807 | @@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) |
808 | int op_len, err; |
809 | void *req_buf; |
810 | |
811 | - if (!(((u64)1 << ((u64)op - 1)) & port->operations)) |
812 | + if (!(((u64)1 << (u64)op) & port->operations)) |
813 | return -EOPNOTSUPP; |
814 | |
815 | switch (op) { |
816 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
817 | index 4e6a2b2..313088f 100644 |
818 | --- a/drivers/gpu/drm/i915/intel_pm.c |
819 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
820 | @@ -1474,7 +1474,7 @@ static void i9xx_update_wm(struct drm_device *dev) |
821 | |
822 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
823 | crtc = intel_get_crtc_for_plane(dev, 0); |
824 | - if (crtc->enabled && crtc->fb) { |
825 | + if (intel_crtc_active(crtc)) { |
826 | planea_wm = intel_calculate_wm(crtc->mode.clock, |
827 | wm_info, fifo_size, |
828 | crtc->fb->bits_per_pixel / 8, |
829 | diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c |
830 | index 082c11b..77c67fc 100644 |
831 | --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c |
832 | +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c |
833 | @@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, |
834 | u64 mm_length = (offset + length) - mm_offset; |
835 | int ret; |
836 | |
837 | - vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL); |
838 | + vm = kzalloc(sizeof(*vm), GFP_KERNEL); |
839 | if (!vm) |
840 | return -ENOMEM; |
841 | |
842 | @@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, |
843 | return ret; |
844 | } |
845 | |
846 | + *pvm = vm; |
847 | + |
848 | return 0; |
849 | } |
850 | |
851 | diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h |
852 | index 315b96e..9fdd198 100644 |
853 | --- a/drivers/net/wireless/b43/dma.h |
854 | +++ b/drivers/net/wireless/b43/dma.h |
855 | @@ -169,7 +169,7 @@ struct b43_dmadesc_generic { |
856 | |
857 | /* DMA engine tuning knobs */ |
858 | #define B43_TXRING_SLOTS 256 |
859 | -#define B43_RXRING_SLOTS 64 |
860 | +#define B43_RXRING_SLOTS 256 |
861 | #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN) |
862 | #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN) |
863 | |
864 | diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c |
865 | index 4191294..3d4dd4d 100644 |
866 | --- a/drivers/net/wireless/p54/p54usb.c |
867 | +++ b/drivers/net/wireless/p54/p54usb.c |
868 | @@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = { |
869 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ |
870 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ |
871 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ |
872 | - {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ |
873 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ |
874 | + {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ |
875 | {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ |
876 | {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ |
877 | {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ |
878 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
879 | index b7e6607..6395412 100644 |
880 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
881 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
882 | @@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { |
883 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, |
884 | /* RTL8188CUS-VL */ |
885 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, |
886 | + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)}, |
887 | /* 8188 Combo for BC4 */ |
888 | {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, |
889 | |
890 | diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c |
891 | index d1c3a7e..0253927 100644 |
892 | --- a/drivers/net/wireless/rtlwifi/usb.c |
893 | +++ b/drivers/net/wireless/rtlwifi/usb.c |
894 | @@ -42,8 +42,12 @@ |
895 | |
896 | static void usbctrl_async_callback(struct urb *urb) |
897 | { |
898 | - if (urb) |
899 | - kfree(urb->context); |
900 | + if (urb) { |
901 | + /* free dr */ |
902 | + kfree(urb->setup_packet); |
903 | + /* free databuf */ |
904 | + kfree(urb->transfer_buffer); |
905 | + } |
906 | } |
907 | |
908 | static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, |
909 | @@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, |
910 | u8 reqtype; |
911 | struct usb_ctrlrequest *dr; |
912 | struct urb *urb; |
913 | - struct rtl819x_async_write_data { |
914 | - u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; |
915 | - struct usb_ctrlrequest dr; |
916 | - } *buf; |
917 | + const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; |
918 | + u8 *databuf; |
919 | + |
920 | + if (WARN_ON_ONCE(len > databuf_maxlen)) |
921 | + len = databuf_maxlen; |
922 | |
923 | pipe = usb_sndctrlpipe(udev, 0); /* write_out */ |
924 | reqtype = REALTEK_USB_VENQT_WRITE; |
925 | |
926 | - buf = kmalloc(sizeof(*buf), GFP_ATOMIC); |
927 | - if (!buf) |
928 | + dr = kmalloc(sizeof(*dr), GFP_ATOMIC); |
929 | + if (!dr) |
930 | return -ENOMEM; |
931 | |
932 | + databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); |
933 | + if (!databuf) { |
934 | + kfree(dr); |
935 | + return -ENOMEM; |
936 | + } |
937 | + |
938 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
939 | if (!urb) { |
940 | - kfree(buf); |
941 | + kfree(databuf); |
942 | + kfree(dr); |
943 | return -ENOMEM; |
944 | } |
945 | |
946 | - dr = &buf->dr; |
947 | - |
948 | dr->bRequestType = reqtype; |
949 | dr->bRequest = request; |
950 | dr->wValue = cpu_to_le16(value); |
951 | dr->wIndex = cpu_to_le16(index); |
952 | dr->wLength = cpu_to_le16(len); |
953 | /* data are already in little-endian order */ |
954 | - memcpy(buf, pdata, len); |
955 | + memcpy(databuf, pdata, len); |
956 | usb_fill_control_urb(urb, udev, pipe, |
957 | - (unsigned char *)dr, buf, len, |
958 | - usbctrl_async_callback, buf); |
959 | + (unsigned char *)dr, databuf, len, |
960 | + usbctrl_async_callback, NULL); |
961 | rc = usb_submit_urb(urb, GFP_ATOMIC); |
962 | - if (rc < 0) |
963 | - kfree(buf); |
964 | + if (rc < 0) { |
965 | + kfree(databuf); |
966 | + kfree(dr); |
967 | + } |
968 | usb_free_urb(urb); |
969 | return rc; |
970 | } |
971 | diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c |
972 | index 7dabef6..b846b6c 100644 |
973 | --- a/drivers/s390/kvm/kvm_virtio.c |
974 | +++ b/drivers/s390/kvm/kvm_virtio.c |
975 | @@ -422,6 +422,26 @@ static void kvm_extint_handler(struct ext_code ext_code, |
976 | } |
977 | |
978 | /* |
979 | + * For s390-virtio, we expect a page above main storage containing |
980 | + * the virtio configuration. Try to actually load from this area |
981 | + * in order to figure out if the host provides this page. |
982 | + */ |
983 | +static int __init test_devices_support(unsigned long addr) |
984 | +{ |
985 | + int ret = -EIO; |
986 | + |
987 | + asm volatile( |
988 | + "0: lura 0,%1\n" |
989 | + "1: xgr %0,%0\n" |
990 | + "2:\n" |
991 | + EX_TABLE(0b,2b) |
992 | + EX_TABLE(1b,2b) |
993 | + : "+d" (ret) |
994 | + : "a" (addr) |
995 | + : "0", "cc"); |
996 | + return ret; |
997 | +} |
998 | +/* |
999 | * Init function for virtio |
1000 | * devices are in a single page above top of "normal" mem |
1001 | */ |
1002 | @@ -432,21 +452,23 @@ static int __init kvm_devices_init(void) |
1003 | if (!MACHINE_IS_KVM) |
1004 | return -ENODEV; |
1005 | |
1006 | + if (test_devices_support(real_memory_size) < 0) |
1007 | + return -ENODEV; |
1008 | + |
1009 | + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); |
1010 | + if (rc) |
1011 | + return rc; |
1012 | + |
1013 | + kvm_devices = (void *) real_memory_size; |
1014 | + |
1015 | kvm_root = root_device_register("kvm_s390"); |
1016 | if (IS_ERR(kvm_root)) { |
1017 | rc = PTR_ERR(kvm_root); |
1018 | printk(KERN_ERR "Could not register kvm_s390 root device"); |
1019 | + vmem_remove_mapping(real_memory_size, PAGE_SIZE); |
1020 | return rc; |
1021 | } |
1022 | |
1023 | - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); |
1024 | - if (rc) { |
1025 | - root_device_unregister(kvm_root); |
1026 | - return rc; |
1027 | - } |
1028 | - |
1029 | - kvm_devices = (void *) real_memory_size; |
1030 | - |
1031 | INIT_WORK(&hotplug_work, hotplug_devices); |
1032 | |
1033 | service_subclass_irq_register(); |
1034 | diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c |
1035 | index f2a73bd..071e058 100644 |
1036 | --- a/drivers/staging/zram/zram_drv.c |
1037 | +++ b/drivers/staging/zram/zram_drv.c |
1038 | @@ -228,11 +228,12 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
1039 | return 0; |
1040 | } |
1041 | |
1042 | - user_mem = kmap_atomic(page); |
1043 | if (is_partial_io(bvec)) |
1044 | /* Use a temporary buffer to decompress the page */ |
1045 | - uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1046 | - else |
1047 | + uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
1048 | + |
1049 | + user_mem = kmap_atomic(page); |
1050 | + if (!is_partial_io(bvec)) |
1051 | uncmem = user_mem; |
1052 | |
1053 | if (!uncmem) { |
1054 | @@ -279,7 +280,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
1055 | * This is a partial IO. We need to read the full page |
1056 | * before to write the changes. |
1057 | */ |
1058 | - uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1059 | + uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
1060 | if (!uncmem) { |
1061 | pr_info("Error allocating temp memory!\n"); |
1062 | ret = -ENOMEM; |
1063 | diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c |
1064 | index 0943ff0..94fda90 100644 |
1065 | --- a/drivers/target/target_core_device.c |
1066 | +++ b/drivers/target/target_core_device.c |
1067 | @@ -1195,6 +1195,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) |
1068 | |
1069 | int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) |
1070 | { |
1071 | + int block_size = dev->se_sub_dev->se_dev_attrib.block_size; |
1072 | + |
1073 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1074 | pr_err("dev[%p]: Unable to change SE Device" |
1075 | " fabric_max_sectors while dev_export_obj: %d count exists\n", |
1076 | @@ -1232,8 +1234,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) |
1077 | /* |
1078 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() |
1079 | */ |
1080 | + if (!block_size) { |
1081 | + block_size = 512; |
1082 | + pr_warn("Defaulting to 512 for zero block_size\n"); |
1083 | + } |
1084 | fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, |
1085 | - dev->se_sub_dev->se_dev_attrib.block_size); |
1086 | + block_size); |
1087 | |
1088 | dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; |
1089 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
1090 | diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c |
1091 | index 1e8e8ce..b94da78 100644 |
1092 | --- a/drivers/tty/n_gsm.c |
1093 | +++ b/drivers/tty/n_gsm.c |
1094 | @@ -1691,6 +1691,8 @@ static inline void dlci_put(struct gsm_dlci *dlci) |
1095 | kref_put(&dlci->ref, gsm_dlci_free); |
1096 | } |
1097 | |
1098 | +static void gsm_destroy_network(struct gsm_dlci *dlci); |
1099 | + |
1100 | /** |
1101 | * gsm_dlci_release - release DLCI |
1102 | * @dlci: DLCI to destroy |
1103 | @@ -1704,9 +1706,19 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) |
1104 | { |
1105 | struct tty_struct *tty = tty_port_tty_get(&dlci->port); |
1106 | if (tty) { |
1107 | + mutex_lock(&dlci->mutex); |
1108 | + gsm_destroy_network(dlci); |
1109 | + mutex_unlock(&dlci->mutex); |
1110 | + |
1111 | + /* tty_vhangup needs the tty_lock, so unlock and |
1112 | + relock after doing the hangup. */ |
1113 | + tty_unlock(tty); |
1114 | tty_vhangup(tty); |
1115 | + tty_lock(tty); |
1116 | + tty_port_tty_set(&dlci->port, NULL); |
1117 | tty_kref_put(tty); |
1118 | } |
1119 | + dlci->state = DLCI_CLOSED; |
1120 | dlci_put(dlci); |
1121 | } |
1122 | |
1123 | @@ -2948,6 +2960,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp) |
1124 | |
1125 | if (dlci == NULL) |
1126 | return; |
1127 | + if (dlci->state == DLCI_CLOSED) |
1128 | + return; |
1129 | mutex_lock(&dlci->mutex); |
1130 | gsm_destroy_network(dlci); |
1131 | mutex_unlock(&dlci->mutex); |
1132 | @@ -2966,6 +2980,8 @@ out: |
1133 | static void gsmtty_hangup(struct tty_struct *tty) |
1134 | { |
1135 | struct gsm_dlci *dlci = tty->driver_data; |
1136 | + if (dlci->state == DLCI_CLOSED) |
1137 | + return; |
1138 | tty_port_hangup(&dlci->port); |
1139 | gsm_dlci_begin_close(dlci); |
1140 | } |
1141 | @@ -2973,9 +2989,12 @@ static void gsmtty_hangup(struct tty_struct *tty) |
1142 | static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, |
1143 | int len) |
1144 | { |
1145 | + int sent; |
1146 | struct gsm_dlci *dlci = tty->driver_data; |
1147 | + if (dlci->state == DLCI_CLOSED) |
1148 | + return -EINVAL; |
1149 | /* Stuff the bytes into the fifo queue */ |
1150 | - int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); |
1151 | + sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock); |
1152 | /* Need to kick the channel */ |
1153 | gsm_dlci_data_kick(dlci); |
1154 | return sent; |
1155 | @@ -2984,18 +3003,24 @@ static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, |
1156 | static int gsmtty_write_room(struct tty_struct *tty) |
1157 | { |
1158 | struct gsm_dlci *dlci = tty->driver_data; |
1159 | + if (dlci->state == DLCI_CLOSED) |
1160 | + return -EINVAL; |
1161 | return TX_SIZE - kfifo_len(dlci->fifo); |
1162 | } |
1163 | |
1164 | static int gsmtty_chars_in_buffer(struct tty_struct *tty) |
1165 | { |
1166 | struct gsm_dlci *dlci = tty->driver_data; |
1167 | + if (dlci->state == DLCI_CLOSED) |
1168 | + return -EINVAL; |
1169 | return kfifo_len(dlci->fifo); |
1170 | } |
1171 | |
1172 | static void gsmtty_flush_buffer(struct tty_struct *tty) |
1173 | { |
1174 | struct gsm_dlci *dlci = tty->driver_data; |
1175 | + if (dlci->state == DLCI_CLOSED) |
1176 | + return; |
1177 | /* Caution needed: If we implement reliable transport classes |
1178 | then the data being transmitted can't simply be junked once |
1179 | it has first hit the stack. Until then we can just blow it |
1180 | @@ -3014,6 +3039,8 @@ static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) |
1181 | static int gsmtty_tiocmget(struct tty_struct *tty) |
1182 | { |
1183 | struct gsm_dlci *dlci = tty->driver_data; |
1184 | + if (dlci->state == DLCI_CLOSED) |
1185 | + return -EINVAL; |
1186 | return dlci->modem_rx; |
1187 | } |
1188 | |
1189 | @@ -3023,6 +3050,8 @@ static int gsmtty_tiocmset(struct tty_struct *tty, |
1190 | struct gsm_dlci *dlci = tty->driver_data; |
1191 | unsigned int modem_tx = dlci->modem_tx; |
1192 | |
1193 | + if (dlci->state == DLCI_CLOSED) |
1194 | + return -EINVAL; |
1195 | modem_tx &= ~clear; |
1196 | modem_tx |= set; |
1197 | |
1198 | @@ -3041,6 +3070,8 @@ static int gsmtty_ioctl(struct tty_struct *tty, |
1199 | struct gsm_netconfig nc; |
1200 | int index; |
1201 | |
1202 | + if (dlci->state == DLCI_CLOSED) |
1203 | + return -EINVAL; |
1204 | switch (cmd) { |
1205 | case GSMIOC_ENABLE_NET: |
1206 | if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) |
1207 | @@ -3067,6 +3098,9 @@ static int gsmtty_ioctl(struct tty_struct *tty, |
1208 | |
1209 | static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) |
1210 | { |
1211 | + struct gsm_dlci *dlci = tty->driver_data; |
1212 | + if (dlci->state == DLCI_CLOSED) |
1213 | + return; |
1214 | /* For the moment its fixed. In actual fact the speed information |
1215 | for the virtual channel can be propogated in both directions by |
1216 | the RPN control message. This however rapidly gets nasty as we |
1217 | @@ -3078,6 +3112,8 @@ static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) |
1218 | static void gsmtty_throttle(struct tty_struct *tty) |
1219 | { |
1220 | struct gsm_dlci *dlci = tty->driver_data; |
1221 | + if (dlci->state == DLCI_CLOSED) |
1222 | + return; |
1223 | if (tty->termios.c_cflag & CRTSCTS) |
1224 | dlci->modem_tx &= ~TIOCM_DTR; |
1225 | dlci->throttled = 1; |
1226 | @@ -3088,6 +3124,8 @@ static void gsmtty_throttle(struct tty_struct *tty) |
1227 | static void gsmtty_unthrottle(struct tty_struct *tty) |
1228 | { |
1229 | struct gsm_dlci *dlci = tty->driver_data; |
1230 | + if (dlci->state == DLCI_CLOSED) |
1231 | + return; |
1232 | if (tty->termios.c_cflag & CRTSCTS) |
1233 | dlci->modem_tx |= TIOCM_DTR; |
1234 | dlci->throttled = 0; |
1235 | @@ -3099,6 +3137,8 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) |
1236 | { |
1237 | struct gsm_dlci *dlci = tty->driver_data; |
1238 | int encode = 0; /* Off */ |
1239 | + if (dlci->state == DLCI_CLOSED) |
1240 | + return -EINVAL; |
1241 | |
1242 | if (state == -1) /* "On indefinitely" - we can't encode this |
1243 | properly */ |
1244 | diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
1245 | index 5981912..5c110c8 100644 |
1246 | --- a/drivers/tty/serial/imx.c |
1247 | +++ b/drivers/tty/serial/imx.c |
1248 | @@ -1213,8 +1213,14 @@ imx_console_write(struct console *co, const char *s, unsigned int count) |
1249 | struct imx_port_ucrs old_ucr; |
1250 | unsigned int ucr1; |
1251 | unsigned long flags; |
1252 | + int locked = 1; |
1253 | |
1254 | - spin_lock_irqsave(&sport->port.lock, flags); |
1255 | + if (sport->port.sysrq) |
1256 | + locked = 0; |
1257 | + else if (oops_in_progress) |
1258 | + locked = spin_trylock_irqsave(&sport->port.lock, flags); |
1259 | + else |
1260 | + spin_lock_irqsave(&sport->port.lock, flags); |
1261 | |
1262 | /* |
1263 | * First, save UCR1/2/3 and then disable interrupts |
1264 | @@ -1241,7 +1247,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) |
1265 | |
1266 | imx_port_ucrs_restore(&sport->port, &old_ucr); |
1267 | |
1268 | - spin_unlock_irqrestore(&sport->port.lock, flags); |
1269 | + if (locked) |
1270 | + spin_unlock_irqrestore(&sport->port.lock, flags); |
1271 | } |
1272 | |
1273 | /* |
1274 | diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c |
1275 | index 12b1fa0..f7d2e89 100644 |
1276 | --- a/drivers/tty/tty_ioctl.c |
1277 | +++ b/drivers/tty/tty_ioctl.c |
1278 | @@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) |
1279 | if (opt & TERMIOS_WAIT) { |
1280 | tty_wait_until_sent(tty, 0); |
1281 | if (signal_pending(current)) |
1282 | - return -EINTR; |
1283 | + return -ERESTARTSYS; |
1284 | } |
1285 | |
1286 | tty_set_termios(tty, &tmp_termios); |
1287 | @@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt) |
1288 | if (opt & TERMIOS_WAIT) { |
1289 | tty_wait_until_sent(tty, 0); |
1290 | if (signal_pending(current)) |
1291 | - return -EINTR; |
1292 | + return -ERESTARTSYS; |
1293 | } |
1294 | |
1295 | mutex_lock(&tty->termios_mutex); |
1296 | diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c |
1297 | index 4e0d0c3..e94c11c 100644 |
1298 | --- a/drivers/tty/vt/vt.c |
1299 | +++ b/drivers/tty/vt/vt.c |
1300 | @@ -539,7 +539,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr) |
1301 | { |
1302 | unsigned short *p = (unsigned short *) vc->vc_pos; |
1303 | |
1304 | - scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x) * 2); |
1305 | + scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2); |
1306 | scr_memsetw(p, vc->vc_video_erase_char, nr * 2); |
1307 | vc->vc_need_wrap = 0; |
1308 | if (DO_UPDATE(vc)) |
1309 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
1310 | index b786b7d..50fbc30 100644 |
1311 | --- a/drivers/usb/serial/ftdi_sio.c |
1312 | +++ b/drivers/usb/serial/ftdi_sio.c |
1313 | @@ -1884,24 +1884,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on) |
1314 | { |
1315 | struct ftdi_private *priv = usb_get_serial_port_data(port); |
1316 | |
1317 | - mutex_lock(&port->serial->disc_mutex); |
1318 | - if (!port->serial->disconnected) { |
1319 | - /* Disable flow control */ |
1320 | - if (!on && usb_control_msg(port->serial->dev, |
1321 | + /* Disable flow control */ |
1322 | + if (!on) { |
1323 | + if (usb_control_msg(port->serial->dev, |
1324 | usb_sndctrlpipe(port->serial->dev, 0), |
1325 | FTDI_SIO_SET_FLOW_CTRL_REQUEST, |
1326 | FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, |
1327 | 0, priv->interface, NULL, 0, |
1328 | WDR_TIMEOUT) < 0) { |
1329 | - dev_err(&port->dev, "error from flowcontrol urb\n"); |
1330 | + dev_err(&port->dev, "error from flowcontrol urb\n"); |
1331 | } |
1332 | - /* drop RTS and DTR */ |
1333 | - if (on) |
1334 | - set_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
1335 | - else |
1336 | - clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
1337 | } |
1338 | - mutex_unlock(&port->serial->disc_mutex); |
1339 | + /* drop RTS and DTR */ |
1340 | + if (on) |
1341 | + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
1342 | + else |
1343 | + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
1344 | } |
1345 | |
1346 | /* |
1347 | diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c |
1348 | index 8a20810..6602059 100644 |
1349 | --- a/drivers/usb/serial/mct_u232.c |
1350 | +++ b/drivers/usb/serial/mct_u232.c |
1351 | @@ -503,19 +503,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) |
1352 | unsigned int control_state; |
1353 | struct mct_u232_private *priv = usb_get_serial_port_data(port); |
1354 | |
1355 | - mutex_lock(&port->serial->disc_mutex); |
1356 | - if (!port->serial->disconnected) { |
1357 | - /* drop DTR and RTS */ |
1358 | - spin_lock_irq(&priv->lock); |
1359 | - if (on) |
1360 | - priv->control_state |= TIOCM_DTR | TIOCM_RTS; |
1361 | - else |
1362 | - priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); |
1363 | - control_state = priv->control_state; |
1364 | - spin_unlock_irq(&priv->lock); |
1365 | - mct_u232_set_modem_ctrl(port, control_state); |
1366 | - } |
1367 | - mutex_unlock(&port->serial->disc_mutex); |
1368 | + spin_lock_irq(&priv->lock); |
1369 | + if (on) |
1370 | + priv->control_state |= TIOCM_DTR | TIOCM_RTS; |
1371 | + else |
1372 | + priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); |
1373 | + control_state = priv->control_state; |
1374 | + spin_unlock_irq(&priv->lock); |
1375 | + |
1376 | + mct_u232_set_modem_ctrl(port, control_state); |
1377 | } |
1378 | |
1379 | static void mct_u232_close(struct usb_serial_port *port) |
1380 | diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c |
1381 | index ffcfc96..0cba503 100644 |
1382 | --- a/drivers/usb/serial/quatech2.c |
1383 | +++ b/drivers/usb/serial/quatech2.c |
1384 | @@ -947,19 +947,17 @@ static void qt2_dtr_rts(struct usb_serial_port *port, int on) |
1385 | struct usb_device *dev = port->serial->dev; |
1386 | struct qt2_port_private *port_priv = usb_get_serial_port_data(port); |
1387 | |
1388 | - mutex_lock(&port->serial->disc_mutex); |
1389 | - if (!port->serial->disconnected) { |
1390 | - /* Disable flow control */ |
1391 | - if (!on && qt2_setregister(dev, port_priv->device_port, |
1392 | + /* Disable flow control */ |
1393 | + if (!on) { |
1394 | + if (qt2_setregister(dev, port_priv->device_port, |
1395 | UART_MCR, 0) < 0) |
1396 | dev_warn(&port->dev, "error from flowcontrol urb\n"); |
1397 | - /* drop RTS and DTR */ |
1398 | - if (on) |
1399 | - update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); |
1400 | - else |
1401 | - update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); |
1402 | } |
1403 | - mutex_unlock(&port->serial->disc_mutex); |
1404 | + /* drop RTS and DTR */ |
1405 | + if (on) |
1406 | + update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); |
1407 | + else |
1408 | + update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); |
1409 | } |
1410 | |
1411 | static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) |
1412 | diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c |
1413 | index 270860f..4eed702 100644 |
1414 | --- a/drivers/usb/serial/sierra.c |
1415 | +++ b/drivers/usb/serial/sierra.c |
1416 | @@ -861,19 +861,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) |
1417 | |
1418 | static void sierra_dtr_rts(struct usb_serial_port *port, int on) |
1419 | { |
1420 | - struct usb_serial *serial = port->serial; |
1421 | struct sierra_port_private *portdata; |
1422 | |
1423 | portdata = usb_get_serial_port_data(port); |
1424 | portdata->rts_state = on; |
1425 | portdata->dtr_state = on; |
1426 | |
1427 | - if (serial->dev) { |
1428 | - mutex_lock(&serial->disc_mutex); |
1429 | - if (!serial->disconnected) |
1430 | - sierra_send_setup(port); |
1431 | - mutex_unlock(&serial->disc_mutex); |
1432 | - } |
1433 | + sierra_send_setup(port); |
1434 | } |
1435 | |
1436 | static int sierra_startup(struct usb_serial *serial) |
1437 | diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c |
1438 | index 868d1e6..5238bf8 100644 |
1439 | --- a/drivers/usb/serial/ssu100.c |
1440 | +++ b/drivers/usb/serial/ssu100.c |
1441 | @@ -508,19 +508,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) |
1442 | { |
1443 | struct usb_device *dev = port->serial->dev; |
1444 | |
1445 | - mutex_lock(&port->serial->disc_mutex); |
1446 | - if (!port->serial->disconnected) { |
1447 | - /* Disable flow control */ |
1448 | - if (!on && |
1449 | - ssu100_setregister(dev, 0, UART_MCR, 0) < 0) |
1450 | + /* Disable flow control */ |
1451 | + if (!on) { |
1452 | + if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0) |
1453 | dev_err(&port->dev, "error from flowcontrol urb\n"); |
1454 | - /* drop RTS and DTR */ |
1455 | - if (on) |
1456 | - set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); |
1457 | - else |
1458 | - clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); |
1459 | } |
1460 | - mutex_unlock(&port->serial->disc_mutex); |
1461 | + /* drop RTS and DTR */ |
1462 | + if (on) |
1463 | + set_mctrl(dev, TIOCM_DTR | TIOCM_RTS); |
1464 | + else |
1465 | + clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS); |
1466 | } |
1467 | |
1468 | static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) |
1469 | diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c |
1470 | index 73b8e05..f057f4d 100644 |
1471 | --- a/drivers/usb/serial/usb-serial.c |
1472 | +++ b/drivers/usb/serial/usb-serial.c |
1473 | @@ -687,10 +687,20 @@ static int serial_carrier_raised(struct tty_port *port) |
1474 | static void serial_dtr_rts(struct tty_port *port, int on) |
1475 | { |
1476 | struct usb_serial_port *p = container_of(port, struct usb_serial_port, port); |
1477 | - struct usb_serial_driver *drv = p->serial->type; |
1478 | + struct usb_serial *serial = p->serial; |
1479 | + struct usb_serial_driver *drv = serial->type; |
1480 | |
1481 | - if (drv->dtr_rts) |
1482 | + if (!drv->dtr_rts) |
1483 | + return; |
1484 | + /* |
1485 | + * Work-around bug in the tty-layer which can result in dtr_rts |
1486 | + * being called after a disconnect (and tty_unregister_device |
1487 | + * has returned). Remove once bug has been squashed. |
1488 | + */ |
1489 | + mutex_lock(&serial->disc_mutex); |
1490 | + if (!serial->disconnected) |
1491 | drv->dtr_rts(p, on); |
1492 | + mutex_unlock(&serial->disc_mutex); |
1493 | } |
1494 | |
1495 | static const struct tty_port_operations serial_port_ops = { |
1496 | diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c |
1497 | index a3e9c09..2897859 100644 |
1498 | --- a/drivers/usb/serial/usb_wwan.c |
1499 | +++ b/drivers/usb/serial/usb_wwan.c |
1500 | @@ -39,7 +39,6 @@ |
1501 | |
1502 | void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) |
1503 | { |
1504 | - struct usb_serial *serial = port->serial; |
1505 | struct usb_wwan_port_private *portdata; |
1506 | struct usb_wwan_intf_private *intfdata; |
1507 | |
1508 | @@ -49,12 +48,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) |
1509 | return; |
1510 | |
1511 | portdata = usb_get_serial_port_data(port); |
1512 | - mutex_lock(&serial->disc_mutex); |
1513 | + /* FIXME: locking */ |
1514 | portdata->rts_state = on; |
1515 | portdata->dtr_state = on; |
1516 | - if (serial->dev) |
1517 | - intfdata->send_setup(port); |
1518 | - mutex_unlock(&serial->disc_mutex); |
1519 | + |
1520 | + intfdata->send_setup(port); |
1521 | } |
1522 | EXPORT_SYMBOL(usb_wwan_dtr_rts); |
1523 | |
1524 | diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c |
1525 | index 77d1fdb..716daaa 100644 |
1526 | --- a/drivers/video/backlight/adp8860_bl.c |
1527 | +++ b/drivers/video/backlight/adp8860_bl.c |
1528 | @@ -783,7 +783,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) |
1529 | |
1530 | static int adp8860_i2c_resume(struct i2c_client *client) |
1531 | { |
1532 | - adp8860_set_bits(client, ADP8860_MDCR, NSTBY); |
1533 | + adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN); |
1534 | |
1535 | return 0; |
1536 | } |
1537 | diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c |
1538 | index edf7f91..f58a189 100644 |
1539 | --- a/drivers/video/backlight/adp8870_bl.c |
1540 | +++ b/drivers/video/backlight/adp8870_bl.c |
1541 | @@ -957,7 +957,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) |
1542 | |
1543 | static int adp8870_i2c_resume(struct i2c_client *client) |
1544 | { |
1545 | - adp8870_set_bits(client, ADP8870_MDCR, NSTBY); |
1546 | + adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN); |
1547 | |
1548 | return 0; |
1549 | } |
1550 | diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c |
1551 | index b1f60a0..b2db77e 100644 |
1552 | --- a/drivers/xen/evtchn.c |
1553 | +++ b/drivers/xen/evtchn.c |
1554 | @@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) |
1555 | u->name, (void *)(unsigned long)port); |
1556 | if (rc >= 0) |
1557 | rc = evtchn_make_refcounted(port); |
1558 | + else { |
1559 | + /* bind failed, should close the port now */ |
1560 | + struct evtchn_close close; |
1561 | + close.port = port; |
1562 | + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
1563 | + BUG(); |
1564 | + set_port_user(port, NULL); |
1565 | + } |
1566 | |
1567 | return rc; |
1568 | } |
1569 | @@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) |
1570 | { |
1571 | int irq = irq_from_evtchn(port); |
1572 | |
1573 | + BUG_ON(irq < 0); |
1574 | + |
1575 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); |
1576 | |
1577 | set_port_user(port, NULL); |
1578 | diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c |
1579 | index 97f5d26..37c1f82 100644 |
1580 | --- a/drivers/xen/xen-pciback/pciback_ops.c |
1581 | +++ b/drivers/xen/xen-pciback/pciback_ops.c |
1582 | @@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, |
1583 | struct pci_dev *dev, struct xen_pci_op *op) |
1584 | { |
1585 | struct xen_pcibk_dev_data *dev_data; |
1586 | - int otherend = pdev->xdev->otherend_id; |
1587 | int status; |
1588 | |
1589 | if (unlikely(verbose_request)) |
1590 | @@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, |
1591 | status = pci_enable_msi(dev); |
1592 | |
1593 | if (status) { |
1594 | - printk(KERN_ERR "error enable msi for guest %x status %x\n", |
1595 | - otherend, status); |
1596 | + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", |
1597 | + pci_name(dev), pdev->xdev->otherend_id, |
1598 | + status); |
1599 | op->value = 0; |
1600 | return XEN_PCI_ERR_op_failed; |
1601 | } |
1602 | @@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, |
1603 | pci_name(dev), i, |
1604 | op->msix_entries[i].vector); |
1605 | } |
1606 | - } else { |
1607 | - printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", |
1608 | - pci_name(dev), result); |
1609 | - } |
1610 | + } else |
1611 | + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", |
1612 | + pci_name(dev), pdev->xdev->otherend_id, |
1613 | + result); |
1614 | kfree(entries); |
1615 | |
1616 | op->value = result; |
1617 | diff --git a/fs/block_dev.c b/fs/block_dev.c |
1618 | index ab3a456..c42cd59 100644 |
1619 | --- a/fs/block_dev.c |
1620 | +++ b/fs/block_dev.c |
1621 | @@ -994,6 +994,7 @@ int revalidate_disk(struct gendisk *disk) |
1622 | |
1623 | mutex_lock(&bdev->bd_mutex); |
1624 | check_disk_size_change(disk, bdev); |
1625 | + bdev->bd_invalidated = 0; |
1626 | mutex_unlock(&bdev->bd_mutex); |
1627 | bdput(bdev); |
1628 | return ret; |
1629 | diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c |
1630 | index 1fd3ae2..8f331e0 100644 |
1631 | --- a/fs/gfs2/bmap.c |
1632 | +++ b/fs/gfs2/bmap.c |
1633 | @@ -1240,6 +1240,10 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) |
1634 | |
1635 | inode_dio_wait(inode); |
1636 | |
1637 | + ret = gfs2_rs_alloc(GFS2_I(inode)); |
1638 | + if (ret) |
1639 | + return ret; |
1640 | + |
1641 | oldsize = inode->i_size; |
1642 | if (newsize >= oldsize) |
1643 | return do_grow(inode, newsize); |
1644 | diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c |
1645 | index 05d2912..a5657ff 100644 |
1646 | --- a/fs/lockd/clntproc.c |
1647 | +++ b/fs/lockd/clntproc.c |
1648 | @@ -551,6 +551,9 @@ again: |
1649 | status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); |
1650 | if (status < 0) |
1651 | break; |
1652 | + /* Resend the blocking lock request after a server reboot */ |
1653 | + if (resp->status == nlm_lck_denied_grace_period) |
1654 | + continue; |
1655 | if (resp->status != nlm_lck_blocked) |
1656 | break; |
1657 | } |
1658 | diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c |
1659 | index f1027b0..1e481c5 100644 |
1660 | --- a/fs/nfs/blocklayout/blocklayout.c |
1661 | +++ b/fs/nfs/blocklayout/blocklayout.c |
1662 | @@ -1272,6 +1272,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = { |
1663 | static struct pnfs_layoutdriver_type blocklayout_type = { |
1664 | .id = LAYOUT_BLOCK_VOLUME, |
1665 | .name = "LAYOUT_BLOCK_VOLUME", |
1666 | + .owner = THIS_MODULE, |
1667 | .read_pagelist = bl_read_pagelist, |
1668 | .write_pagelist = bl_write_pagelist, |
1669 | .alloc_layout_hdr = bl_alloc_layout_hdr, |
1670 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
1671 | index 4432b2f..50bf31d 100644 |
1672 | --- a/fs/nfs/nfs4proc.c |
1673 | +++ b/fs/nfs/nfs4proc.c |
1674 | @@ -6517,7 +6517,8 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) |
1675 | status = nfs4_wait_for_completion_rpc_task(task); |
1676 | if (status == 0) |
1677 | status = task->tk_status; |
1678 | - if (status == 0) |
1679 | + /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ |
1680 | + if (status == 0 && lgp->res.layoutp->len) |
1681 | lseg = pnfs_layout_process(lgp); |
1682 | rpc_put_task(task); |
1683 | dprintk("<-- %s status=%d\n", __func__, status); |
1684 | diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c |
1685 | index c6f9906..88f9611 100644 |
1686 | --- a/fs/nfs/objlayout/objio_osd.c |
1687 | +++ b/fs/nfs/objlayout/objio_osd.c |
1688 | @@ -647,6 +647,7 @@ static struct pnfs_layoutdriver_type objlayout_type = { |
1689 | .flags = PNFS_LAYOUTRET_ON_SETATTR | |
1690 | PNFS_LAYOUTRET_ON_ERROR, |
1691 | |
1692 | + .owner = THIS_MODULE, |
1693 | .alloc_layout_hdr = objlayout_alloc_layout_hdr, |
1694 | .free_layout_hdr = objlayout_free_layout_hdr, |
1695 | |
1696 | diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c |
1697 | index c311dda..448744c 100644 |
1698 | --- a/fs/notify/inotify/inotify_user.c |
1699 | +++ b/fs/notify/inotify/inotify_user.c |
1700 | @@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, |
1701 | |
1702 | /* don't allow invalid bits: we don't want flags set */ |
1703 | mask = inotify_arg_to_mask(arg); |
1704 | - if (unlikely(!(mask & IN_ALL_EVENTS))) |
1705 | - return -EINVAL; |
1706 | |
1707 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
1708 | if (!fsn_mark) |
1709 | @@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group, |
1710 | |
1711 | /* don't allow invalid bits: we don't want flags set */ |
1712 | mask = inotify_arg_to_mask(arg); |
1713 | - if (unlikely(!(mask & IN_ALL_EVENTS))) |
1714 | - return -EINVAL; |
1715 | |
1716 | tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
1717 | if (unlikely(!tmp_i_mark)) |
1718 | diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c |
1719 | index 4f7795f..88577eb 100644 |
1720 | --- a/fs/ocfs2/dlmglue.c |
1721 | +++ b/fs/ocfs2/dlmglue.c |
1722 | @@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb, |
1723 | * everything is up to the caller :) */ |
1724 | status = ocfs2_should_refresh_lock_res(lockres); |
1725 | if (status < 0) { |
1726 | + ocfs2_cluster_unlock(osb, lockres, level); |
1727 | mlog_errno(status); |
1728 | goto bail; |
1729 | } |
1730 | @@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb, |
1731 | |
1732 | ocfs2_complete_lock_res_refresh(lockres, status); |
1733 | |
1734 | - if (status < 0) |
1735 | + if (status < 0) { |
1736 | + ocfs2_cluster_unlock(osb, lockres, level); |
1737 | mlog_errno(status); |
1738 | + } |
1739 | ocfs2_track_lock_refresh(lockres); |
1740 | } |
1741 | bail: |
1742 | diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h |
1743 | index dbb95db..ca2560c 100644 |
1744 | --- a/include/uapi/linux/serial_core.h |
1745 | +++ b/include/uapi/linux/serial_core.h |
1746 | @@ -50,7 +50,7 @@ |
1747 | #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ |
1748 | #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ |
1749 | #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ |
1750 | -#define PORT_BRCM_TRUMANAGE 24 |
1751 | +#define PORT_BRCM_TRUMANAGE 25 |
1752 | #define PORT_MAX_8250 25 /* max port ID */ |
1753 | |
1754 | /* |
1755 | diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h |
1756 | index ac90037..d2314be 100644 |
1757 | --- a/include/uapi/linux/usb/audio.h |
1758 | +++ b/include/uapi/linux/usb/audio.h |
1759 | @@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de |
1760 | int protocol) |
1761 | { |
1762 | __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); |
1763 | - return desc->baSourceID[desc->bNrInPins + control_size]; |
1764 | + return *(uac_processing_unit_bmControls(desc, protocol) |
1765 | + + control_size); |
1766 | } |
1767 | |
1768 | static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, |
1769 | int protocol) |
1770 | { |
1771 | __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); |
1772 | - return &desc->baSourceID[desc->bNrInPins + control_size + 1]; |
1773 | + return uac_processing_unit_bmControls(desc, protocol) |
1774 | + + control_size + 1; |
1775 | } |
1776 | |
1777 | /* 4.5.2 Class-Specific AS Interface Descriptor */ |
1778 | diff --git a/kernel/futex.c b/kernel/futex.c |
1779 | index 19eb089..8879430 100644 |
1780 | --- a/kernel/futex.c |
1781 | +++ b/kernel/futex.c |
1782 | @@ -2471,8 +2471,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, |
1783 | if (!futex_cmpxchg_enabled) |
1784 | return -ENOSYS; |
1785 | |
1786 | - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); |
1787 | - |
1788 | rcu_read_lock(); |
1789 | |
1790 | ret = -ESRCH; |
1791 | diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c |
1792 | index 83e368b..a9642d5 100644 |
1793 | --- a/kernel/futex_compat.c |
1794 | +++ b/kernel/futex_compat.c |
1795 | @@ -142,8 +142,6 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, |
1796 | if (!futex_cmpxchg_enabled) |
1797 | return -ENOSYS; |
1798 | |
1799 | - WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n"); |
1800 | - |
1801 | rcu_read_lock(); |
1802 | |
1803 | ret = -ESRCH; |
1804 | diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c |
1805 | index 6db7a5e..cdd5607 100644 |
1806 | --- a/kernel/hrtimer.c |
1807 | +++ b/kernel/hrtimer.c |
1808 | @@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
1809 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
1810 | */ |
1811 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
1812 | - struct hrtimer_clock_base *base, |
1813 | - int wakeup) |
1814 | + struct hrtimer_clock_base *base) |
1815 | { |
1816 | - if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
1817 | - if (wakeup) { |
1818 | - raw_spin_unlock(&base->cpu_base->lock); |
1819 | - raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1820 | - raw_spin_lock(&base->cpu_base->lock); |
1821 | - } else |
1822 | - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1823 | - |
1824 | - return 1; |
1825 | - } |
1826 | - |
1827 | - return 0; |
1828 | + return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); |
1829 | } |
1830 | |
1831 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
1832 | @@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; } |
1833 | static inline void |
1834 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
1835 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
1836 | - struct hrtimer_clock_base *base, |
1837 | - int wakeup) |
1838 | + struct hrtimer_clock_base *base) |
1839 | { |
1840 | return 0; |
1841 | } |
1842 | @@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
1843 | * |
1844 | * XXX send_remote_softirq() ? |
1845 | */ |
1846 | - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
1847 | - hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
1848 | + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) |
1849 | + && hrtimer_enqueue_reprogram(timer, new_base)) { |
1850 | + if (wakeup) { |
1851 | + /* |
1852 | + * We need to drop cpu_base->lock to avoid a |
1853 | + * lock ordering issue vs. rq->lock. |
1854 | + */ |
1855 | + raw_spin_unlock(&new_base->cpu_base->lock); |
1856 | + raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1857 | + local_irq_restore(flags); |
1858 | + return ret; |
1859 | + } else { |
1860 | + __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1861 | + } |
1862 | + } |
1863 | |
1864 | unlock_hrtimer_base(timer, &flags); |
1865 | |
1866 | diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c |
1867 | index 611cd60..7b5f012 100644 |
1868 | --- a/kernel/irq/spurious.c |
1869 | +++ b/kernel/irq/spurious.c |
1870 | @@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
1871 | |
1872 | /* |
1873 | * All handlers must agree on IRQF_SHARED, so we test just the |
1874 | - * first. Check for action->next as well. |
1875 | + * first. |
1876 | */ |
1877 | action = desc->action; |
1878 | if (!action || !(action->flags & IRQF_SHARED) || |
1879 | - (action->flags & __IRQF_TIMER) || |
1880 | - (action->handler(irq, action->dev_id) == IRQ_HANDLED) || |
1881 | - !action->next) |
1882 | + (action->flags & __IRQF_TIMER)) |
1883 | goto out; |
1884 | |
1885 | /* Already running on another processor */ |
1886 | @@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
1887 | do { |
1888 | if (handle_irq_event(desc) == IRQ_HANDLED) |
1889 | ret = IRQ_HANDLED; |
1890 | + /* Make sure that there is still a valid action */ |
1891 | action = desc->action; |
1892 | } while ((desc->istate & IRQS_PENDING) && action); |
1893 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
1894 | diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c |
1895 | index 125cb67..acbb79c 100644 |
1896 | --- a/kernel/posix-cpu-timers.c |
1897 | +++ b/kernel/posix-cpu-timers.c |
1898 | @@ -1422,8 +1422,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1899 | while (!signal_pending(current)) { |
1900 | if (timer.it.cpu.expires.sched == 0) { |
1901 | /* |
1902 | - * Our timer fired and was reset. |
1903 | + * Our timer fired and was reset, below |
1904 | + * deletion can not fail. |
1905 | */ |
1906 | + posix_cpu_timer_del(&timer); |
1907 | spin_unlock_irq(&timer.it_lock); |
1908 | return 0; |
1909 | } |
1910 | @@ -1441,9 +1443,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1911 | * We were interrupted by a signal. |
1912 | */ |
1913 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1914 | - posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1915 | + error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1916 | + if (!error) { |
1917 | + /* |
1918 | + * Timer is now unarmed, deletion can not fail. |
1919 | + */ |
1920 | + posix_cpu_timer_del(&timer); |
1921 | + } |
1922 | spin_unlock_irq(&timer.it_lock); |
1923 | |
1924 | + while (error == TIMER_RETRY) { |
1925 | + /* |
1926 | + * We need to handle case when timer was or is in the |
1927 | + * middle of firing. In other cases we already freed |
1928 | + * resources. |
1929 | + */ |
1930 | + spin_lock_irq(&timer.it_lock); |
1931 | + error = posix_cpu_timer_del(&timer); |
1932 | + spin_unlock_irq(&timer.it_lock); |
1933 | + } |
1934 | + |
1935 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1936 | /* |
1937 | * It actually did fire already. |
1938 | diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl |
1939 | index eb51d76..3f42652 100644 |
1940 | --- a/kernel/timeconst.pl |
1941 | +++ b/kernel/timeconst.pl |
1942 | @@ -369,10 +369,8 @@ if ($hz eq '--can') { |
1943 | die "Usage: $0 HZ\n"; |
1944 | } |
1945 | |
1946 | - @val = @{$canned_values{$hz}}; |
1947 | - if (!defined(@val)) { |
1948 | - @val = compute_values($hz); |
1949 | - } |
1950 | + $cv = $canned_values{$hz}; |
1951 | + @val = defined($cv) ? @$cv : compute_values($hz); |
1952 | output($hz, @val); |
1953 | } |
1954 | exit 0; |
1955 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
1956 | index 1dae900..bb05784 100644 |
1957 | --- a/kernel/workqueue.c |
1958 | +++ b/kernel/workqueue.c |
1959 | @@ -1350,7 +1350,7 @@ void delayed_work_timer_fn(unsigned long __data) |
1960 | /* should have been called from irqsafe timer with irq already off */ |
1961 | __queue_work(dwork->cpu, cwq->wq, &dwork->work); |
1962 | } |
1963 | -EXPORT_SYMBOL_GPL(delayed_work_timer_fn); |
1964 | +EXPORT_SYMBOL(delayed_work_timer_fn); |
1965 | |
1966 | static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, |
1967 | struct delayed_work *dwork, unsigned long delay) |
1968 | diff --git a/mm/fadvise.c b/mm/fadvise.c |
1969 | index a47f0f5..909ec55 100644 |
1970 | --- a/mm/fadvise.c |
1971 | +++ b/mm/fadvise.c |
1972 | @@ -17,6 +17,7 @@ |
1973 | #include <linux/fadvise.h> |
1974 | #include <linux/writeback.h> |
1975 | #include <linux/syscalls.h> |
1976 | +#include <linux/swap.h> |
1977 | |
1978 | #include <asm/unistd.h> |
1979 | |
1980 | @@ -120,9 +121,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) |
1981 | start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; |
1982 | end_index = (endbyte >> PAGE_CACHE_SHIFT); |
1983 | |
1984 | - if (end_index >= start_index) |
1985 | - invalidate_mapping_pages(mapping, start_index, |
1986 | + if (end_index >= start_index) { |
1987 | + unsigned long count = invalidate_mapping_pages(mapping, |
1988 | + start_index, end_index); |
1989 | + |
1990 | + /* |
1991 | + * If fewer pages were invalidated than expected then |
1992 | + * it is possible that some of the pages were on |
1993 | + * a per-cpu pagevec for a remote CPU. Drain all |
1994 | + * pagevecs and try again. |
1995 | + */ |
1996 | + if (count < (end_index - start_index + 1)) { |
1997 | + lru_add_drain_all(); |
1998 | + invalidate_mapping_pages(mapping, start_index, |
1999 | end_index); |
2000 | + } |
2001 | + } |
2002 | break; |
2003 | default: |
2004 | ret = -EINVAL; |
2005 | diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c |
2006 | index 8a5ac8c..f5c3d96 100644 |
2007 | --- a/mm/mmu_notifier.c |
2008 | +++ b/mm/mmu_notifier.c |
2009 | @@ -37,49 +37,51 @@ static struct srcu_struct srcu; |
2010 | void __mmu_notifier_release(struct mm_struct *mm) |
2011 | { |
2012 | struct mmu_notifier *mn; |
2013 | - struct hlist_node *n; |
2014 | int id; |
2015 | |
2016 | /* |
2017 | - * SRCU here will block mmu_notifier_unregister until |
2018 | - * ->release returns. |
2019 | + * srcu_read_lock() here will block synchronize_srcu() in |
2020 | + * mmu_notifier_unregister() until all registered |
2021 | + * ->release() callouts this function makes have |
2022 | + * returned. |
2023 | */ |
2024 | id = srcu_read_lock(&srcu); |
2025 | - hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) |
2026 | - /* |
2027 | - * if ->release runs before mmu_notifier_unregister it |
2028 | - * must be handled as it's the only way for the driver |
2029 | - * to flush all existing sptes and stop the driver |
2030 | - * from establishing any more sptes before all the |
2031 | - * pages in the mm are freed. |
2032 | - */ |
2033 | - if (mn->ops->release) |
2034 | - mn->ops->release(mn, mm); |
2035 | - srcu_read_unlock(&srcu, id); |
2036 | - |
2037 | spin_lock(&mm->mmu_notifier_mm->lock); |
2038 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
2039 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
2040 | struct mmu_notifier, |
2041 | hlist); |
2042 | + |
2043 | /* |
2044 | - * We arrived before mmu_notifier_unregister so |
2045 | - * mmu_notifier_unregister will do nothing other than |
2046 | - * to wait ->release to finish and |
2047 | - * mmu_notifier_unregister to return. |
2048 | + * Unlink. This will prevent mmu_notifier_unregister() |
2049 | + * from also making the ->release() callout. |
2050 | */ |
2051 | hlist_del_init_rcu(&mn->hlist); |
2052 | + spin_unlock(&mm->mmu_notifier_mm->lock); |
2053 | + |
2054 | + /* |
2055 | + * Clear sptes. (see 'release' description in mmu_notifier.h) |
2056 | + */ |
2057 | + if (mn->ops->release) |
2058 | + mn->ops->release(mn, mm); |
2059 | + |
2060 | + spin_lock(&mm->mmu_notifier_mm->lock); |
2061 | } |
2062 | spin_unlock(&mm->mmu_notifier_mm->lock); |
2063 | |
2064 | /* |
2065 | - * synchronize_srcu here prevents mmu_notifier_release to |
2066 | - * return to exit_mmap (which would proceed freeing all pages |
2067 | - * in the mm) until the ->release method returns, if it was |
2068 | - * invoked by mmu_notifier_unregister. |
2069 | - * |
2070 | - * The mmu_notifier_mm can't go away from under us because one |
2071 | - * mm_count is hold by exit_mmap. |
2072 | + * All callouts to ->release() which we have done are complete. |
2073 | + * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
2074 | + */ |
2075 | + srcu_read_unlock(&srcu, id); |
2076 | + |
2077 | + /* |
2078 | + * mmu_notifier_unregister() may have unlinked a notifier and may |
2079 | + * still be calling out to it. Additionally, other notifiers |
2080 | + * may have been active via vmtruncate() et. al. Block here |
2081 | + * to ensure that all notifier callouts for this mm have been |
2082 | + * completed and the sptes are really cleaned up before returning |
2083 | + * to exit_mmap(). |
2084 | */ |
2085 | synchronize_srcu(&srcu); |
2086 | } |
2087 | @@ -294,31 +296,31 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
2088 | { |
2089 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
2090 | |
2091 | + spin_lock(&mm->mmu_notifier_mm->lock); |
2092 | if (!hlist_unhashed(&mn->hlist)) { |
2093 | - /* |
2094 | - * SRCU here will force exit_mmap to wait ->release to finish |
2095 | - * before freeing the pages. |
2096 | - */ |
2097 | int id; |
2098 | |
2099 | - id = srcu_read_lock(&srcu); |
2100 | /* |
2101 | - * exit_mmap will block in mmu_notifier_release to |
2102 | - * guarantee ->release is called before freeing the |
2103 | - * pages. |
2104 | + * Ensure we synchronize up with __mmu_notifier_release(). |
2105 | */ |
2106 | + id = srcu_read_lock(&srcu); |
2107 | + |
2108 | + hlist_del_rcu(&mn->hlist); |
2109 | + spin_unlock(&mm->mmu_notifier_mm->lock); |
2110 | + |
2111 | if (mn->ops->release) |
2112 | mn->ops->release(mn, mm); |
2113 | - srcu_read_unlock(&srcu, id); |
2114 | |
2115 | - spin_lock(&mm->mmu_notifier_mm->lock); |
2116 | - hlist_del_rcu(&mn->hlist); |
2117 | + /* |
2118 | + * Allow __mmu_notifier_release() to complete. |
2119 | + */ |
2120 | + srcu_read_unlock(&srcu, id); |
2121 | + } else |
2122 | spin_unlock(&mm->mmu_notifier_mm->lock); |
2123 | - } |
2124 | |
2125 | /* |
2126 | - * Wait any running method to finish, of course including |
2127 | - * ->release if it was run by mmu_notifier_relase instead of us. |
2128 | + * Wait for any running method to finish, including ->release() if it |
2129 | + * was run by __mmu_notifier_release() instead of us. |
2130 | */ |
2131 | synchronize_srcu(&srcu); |
2132 | |
2133 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
2134 | index ceb4168..2238a14 100644 |
2135 | --- a/mm/page_alloc.c |
2136 | +++ b/mm/page_alloc.c |
2137 | @@ -4351,10 +4351,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, |
2138 | * round what is now in bits to nearest long in bits, then return it in |
2139 | * bytes. |
2140 | */ |
2141 | -static unsigned long __init usemap_size(unsigned long zonesize) |
2142 | +static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) |
2143 | { |
2144 | unsigned long usemapsize; |
2145 | |
2146 | + zonesize += zone_start_pfn & (pageblock_nr_pages-1); |
2147 | usemapsize = roundup(zonesize, pageblock_nr_pages); |
2148 | usemapsize = usemapsize >> pageblock_order; |
2149 | usemapsize *= NR_PAGEBLOCK_BITS; |
2150 | @@ -4364,17 +4365,19 @@ static unsigned long __init usemap_size(unsigned long zonesize) |
2151 | } |
2152 | |
2153 | static void __init setup_usemap(struct pglist_data *pgdat, |
2154 | - struct zone *zone, unsigned long zonesize) |
2155 | + struct zone *zone, |
2156 | + unsigned long zone_start_pfn, |
2157 | + unsigned long zonesize) |
2158 | { |
2159 | - unsigned long usemapsize = usemap_size(zonesize); |
2160 | + unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); |
2161 | zone->pageblock_flags = NULL; |
2162 | if (usemapsize) |
2163 | zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, |
2164 | usemapsize); |
2165 | } |
2166 | #else |
2167 | -static inline void setup_usemap(struct pglist_data *pgdat, |
2168 | - struct zone *zone, unsigned long zonesize) {} |
2169 | +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, |
2170 | + unsigned long zone_start_pfn, unsigned long zonesize) {} |
2171 | #endif /* CONFIG_SPARSEMEM */ |
2172 | |
2173 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
2174 | @@ -4492,7 +4495,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, |
2175 | continue; |
2176 | |
2177 | set_pageblock_order(); |
2178 | - setup_usemap(pgdat, zone, size); |
2179 | + setup_usemap(pgdat, zone, zone_start_pfn, size); |
2180 | ret = init_currently_empty_zone(zone, zone_start_pfn, |
2181 | size, MEMMAP_EARLY); |
2182 | BUG_ON(ret); |
2183 | diff --git a/mm/shmem.c b/mm/shmem.c |
2184 | index 50c5b8f..74d8fb7 100644 |
2185 | --- a/mm/shmem.c |
2186 | +++ b/mm/shmem.c |
2187 | @@ -2397,6 +2397,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2188 | unsigned long inodes; |
2189 | int error = -EINVAL; |
2190 | |
2191 | + config.mpol = NULL; |
2192 | if (shmem_parse_options(data, &config, true)) |
2193 | return error; |
2194 | |
2195 | @@ -2421,8 +2422,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2196 | sbinfo->max_inodes = config.max_inodes; |
2197 | sbinfo->free_inodes = config.max_inodes - inodes; |
2198 | |
2199 | - mpol_put(sbinfo->mpol); |
2200 | - sbinfo->mpol = config.mpol; /* transfers initial ref */ |
2201 | + /* |
2202 | + * Preserve previous mempolicy unless mpol remount option was specified. |
2203 | + */ |
2204 | + if (config.mpol) { |
2205 | + mpol_put(sbinfo->mpol); |
2206 | + sbinfo->mpol = config.mpol; /* transfers initial ref */ |
2207 | + } |
2208 | out: |
2209 | spin_unlock(&sbinfo->stat_lock); |
2210 | return error; |
2211 | diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c |
2212 | index 602cd63..750f44f 100644 |
2213 | --- a/net/core/sock_diag.c |
2214 | +++ b/net/core/sock_diag.c |
2215 | @@ -121,6 +121,9 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
2216 | if (nlmsg_len(nlh) < sizeof(*req)) |
2217 | return -EINVAL; |
2218 | |
2219 | + if (req->sdiag_family >= AF_MAX) |
2220 | + return -EINVAL; |
2221 | + |
2222 | hndl = sock_diag_lock_handler(req->sdiag_family); |
2223 | if (hndl == NULL) |
2224 | err = -ENOENT; |
2225 | diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c |
2226 | index c7e3c53..57d0187 100644 |
2227 | --- a/sound/pci/ali5451/ali5451.c |
2228 | +++ b/sound/pci/ali5451/ali5451.c |
2229 | @@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) |
2230 | |
2231 | spin_lock(&codec->reg_lock); |
2232 | if (!pvoice->running) { |
2233 | - spin_unlock_irq(&codec->reg_lock); |
2234 | + spin_unlock(&codec->reg_lock); |
2235 | return 0; |
2236 | } |
2237 | outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); |
2238 | diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c |
2239 | index a95e198..206626d 100644 |
2240 | --- a/sound/pci/hda/patch_hdmi.c |
2241 | +++ b/sound/pci/hda/patch_hdmi.c |
2242 | @@ -714,9 +714,10 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca) |
2243 | |
2244 | static void hdmi_setup_channel_mapping(struct hda_codec *codec, |
2245 | hda_nid_t pin_nid, bool non_pcm, int ca, |
2246 | - int channels, unsigned char *map) |
2247 | + int channels, unsigned char *map, |
2248 | + bool chmap_set) |
2249 | { |
2250 | - if (!non_pcm && map) { |
2251 | + if (!non_pcm && chmap_set) { |
2252 | hdmi_manual_setup_channel_mapping(codec, pin_nid, |
2253 | channels, map); |
2254 | } else { |
2255 | @@ -905,7 +906,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx, |
2256 | pin_nid, |
2257 | channels); |
2258 | hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca, |
2259 | - channels, per_pin->chmap); |
2260 | + channels, per_pin->chmap, |
2261 | + per_pin->chmap_set); |
2262 | hdmi_stop_infoframe_trans(codec, pin_nid); |
2263 | hdmi_fill_audio_infoframe(codec, pin_nid, |
2264 | ai.bytes, sizeof(ai)); |
2265 | @@ -915,7 +917,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx, |
2266 | * accordingly */ |
2267 | if (per_pin->non_pcm != non_pcm) |
2268 | hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca, |
2269 | - channels, per_pin->chmap); |
2270 | + channels, per_pin->chmap, |
2271 | + per_pin->chmap_set); |
2272 | } |
2273 | |
2274 | per_pin->non_pcm = non_pcm; |
2275 | @@ -1100,8 +1103,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, |
2276 | if (!static_hdmi_pcm && eld->eld_valid) { |
2277 | snd_hdmi_eld_update_pcm_info(eld, hinfo); |
2278 | if (hinfo->channels_min > hinfo->channels_max || |
2279 | - !hinfo->rates || !hinfo->formats) |
2280 | + !hinfo->rates || !hinfo->formats) { |
2281 | + per_cvt->assigned = 0; |
2282 | + hinfo->nid = 0; |
2283 | + snd_hda_spdif_ctls_unassign(codec, pin_idx); |
2284 | return -ENODEV; |
2285 | + } |
2286 | } |
2287 | |
2288 | /* Store the updated parameters */ |
2289 | @@ -1165,6 +1172,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) |
2290 | "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", |
2291 | codec->addr, pin_nid, eld->monitor_present, eld_valid); |
2292 | |
2293 | + eld->eld_valid = false; |
2294 | if (eld_valid) { |
2295 | if (!snd_hdmi_get_eld(eld, codec, pin_nid)) |
2296 | snd_hdmi_show_eld(eld); |
2297 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
2298 | index c98cb89..38a893f 100644 |
2299 | --- a/sound/pci/hda/patch_realtek.c |
2300 | +++ b/sound/pci/hda/patch_realtek.c |
2301 | @@ -5388,6 +5388,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
2302 | SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), |
2303 | SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), |
2304 | SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), |
2305 | + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), |
2306 | |
2307 | /* All Apple entries are in codec SSIDs */ |
2308 | SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), |
2309 | diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c |
2310 | index 46b3629..f1dec07 100644 |
2311 | --- a/sound/pci/rme32.c |
2312 | +++ b/sound/pci/rme32.c |
2313 | @@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream) |
2314 | spin_lock_irq(&rme32->lock); |
2315 | rme32->capture_substream = NULL; |
2316 | rme32->capture_periodsize = 0; |
2317 | - spin_unlock(&rme32->lock); |
2318 | + spin_unlock_irq(&rme32->lock); |
2319 | return 0; |
2320 | } |
2321 | |
2322 | diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
2323 | index 88d8ceb..b666c6b 100644 |
2324 | --- a/sound/usb/quirks-table.h |
2325 | +++ b/sound/usb/quirks-table.h |
2326 | @@ -1658,7 +1658,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
2327 | .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
2328 | /* .vendor_name = "Roland", */ |
2329 | /* .product_name = "A-PRO", */ |
2330 | - .ifnum = 1, |
2331 | + .ifnum = 0, |
2332 | .type = QUIRK_MIDI_FIXED_ENDPOINT, |
2333 | .data = & (const struct snd_usb_midi_endpoint_info) { |
2334 | .out_cables = 0x0003, |
2335 | diff --git a/tools/perf/Makefile b/tools/perf/Makefile |
2336 | index 627849f..c3e02b3 100644 |
2337 | --- a/tools/perf/Makefile |
2338 | +++ b/tools/perf/Makefile |
2339 | @@ -268,13 +268,13 @@ $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-event |
2340 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c |
2341 | |
2342 | $(OUTPUT)util/parse-events-bison.c: util/parse-events.y |
2343 | - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c |
2344 | + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ |
2345 | |
2346 | $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c |
2347 | $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c |
2348 | |
2349 | $(OUTPUT)util/pmu-bison.c: util/pmu.y |
2350 | - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c |
2351 | + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ |
2352 | |
2353 | $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c |
2354 | $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c |
2355 | diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c |
2356 | index f5a1e4f..947e20a 100644 |
2357 | --- a/tools/perf/ui/hist.c |
2358 | +++ b/tools/perf/ui/hist.c |
2359 | @@ -363,11 +363,15 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, |
2360 | if (!perf_hpp__format[i].cond) |
2361 | continue; |
2362 | |
2363 | + /* |
2364 | + * If there's no field_sep, we still need |
2365 | + * to display initial ' '. |
2366 | + */ |
2367 | if (!sep || !first) { |
2368 | ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); |
2369 | advance_hpp(hpp, ret); |
2370 | + } else |
2371 | first = false; |
2372 | - } |
2373 | |
2374 | if (color && perf_hpp__format[i].color) |
2375 | ret = perf_hpp__format[i].color(hpp, he); |
2376 | diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y |
2377 | index cd88209..85cdaed 100644 |
2378 | --- a/tools/perf/util/parse-events.y |
2379 | +++ b/tools/perf/util/parse-events.y |
2380 | @@ -1,5 +1,4 @@ |
2381 | %pure-parser |
2382 | -%name-prefix "parse_events_" |
2383 | %parse-param {void *_data} |
2384 | %parse-param {void *scanner} |
2385 | %lex-param {void* scanner} |
2386 | diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y |
2387 | index ec89804..bfd7e85 100644 |
2388 | --- a/tools/perf/util/pmu.y |
2389 | +++ b/tools/perf/util/pmu.y |
2390 | @@ -1,5 +1,4 @@ |
2391 | |
2392 | -%name-prefix "perf_pmu_" |
2393 | %parse-param {struct list_head *format} |
2394 | %parse-param {char *name} |
2395 |