Contents of /trunk/kernel26-alx/patches-2.6.37/0104-2.6.37.5-all-fixes.patch
Parent Directory | Revision Log
Revision 1714 -
(show annotations)
(download)
Tue Mar 27 14:22:06 2012 UTC (12 years, 6 months ago) by niro
File size: 107755 byte(s)
Tue Mar 27 14:22:06 2012 UTC (12 years, 6 months ago) by niro
File size: 107755 byte(s)
1 | diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices |
2 | index 87da405..9edb75d 100644 |
3 | --- a/Documentation/i2c/instantiating-devices |
4 | +++ b/Documentation/i2c/instantiating-devices |
5 | @@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) |
6 | (...) |
7 | i2c_adap = i2c_get_adapter(2); |
8 | memset(&i2c_info, 0, sizeof(struct i2c_board_info)); |
9 | - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); |
10 | + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); |
11 | isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, |
12 | normal_i2c, NULL); |
13 | i2c_put_adapter(i2c_adap); |
14 | diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h |
15 | index d840f4a..5bb95a1 100644 |
16 | --- a/arch/microblaze/include/asm/uaccess.h |
17 | +++ b/arch/microblaze/include/asm/uaccess.h |
18 | @@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, |
19 | { |
20 | /* normal memset with two words to __ex_table */ |
21 | __asm__ __volatile__ ( \ |
22 | - "1: sb r0, %2, r0;" \ |
23 | + "1: sb r0, %1, r0;" \ |
24 | " addik %0, %0, -1;" \ |
25 | " bneid %0, 1b;" \ |
26 | - " addik %2, %2, 1;" \ |
27 | + " addik %1, %1, 1;" \ |
28 | "2: " \ |
29 | __EX_TABLE_SECTION \ |
30 | ".word 1b,2b;" \ |
31 | ".previous;" \ |
32 | - : "=r"(n) \ |
33 | - : "0"(n), "r"(to) |
34 | + : "=r"(n), "=r"(to) \ |
35 | + : "0"(n), "1"(to) |
36 | ); |
37 | return n; |
38 | } |
39 | diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c |
40 | index e30e42a..956f946 100644 |
41 | --- a/arch/mips/alchemy/mtx-1/platform.c |
42 | +++ b/arch/mips/alchemy/mtx-1/platform.c |
43 | @@ -28,6 +28,8 @@ |
44 | #include <linux/mtd/physmap.h> |
45 | #include <mtd/mtd-abi.h> |
46 | |
47 | +#include <asm/mach-au1x00/au1xxx_eth.h> |
48 | + |
49 | static struct gpio_keys_button mtx1_gpio_button[] = { |
50 | { |
51 | .gpio = 207, |
52 | @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { |
53 | &mtx1_mtd, |
54 | }; |
55 | |
56 | +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { |
57 | + .phy_search_highest_addr = 1, |
58 | + .phy1_search_mac0 = 1, |
59 | +}; |
60 | + |
61 | static int __init mtx1_register_devices(void) |
62 | { |
63 | int rc; |
64 | |
65 | + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); |
66 | + |
67 | rc = gpio_request(mtx1_gpio_button[0].gpio, |
68 | mtx1_gpio_button[0].desc); |
69 | if (rc < 0) { |
70 | diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c |
71 | index d7d94b8..3948f1d 100644 |
72 | --- a/arch/parisc/kernel/irq.c |
73 | +++ b/arch/parisc/kernel/irq.c |
74 | @@ -108,7 +108,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) |
75 | int cpu_dest; |
76 | |
77 | /* timer and ipi have to always be received on all CPUs */ |
78 | - if (CHECK_IRQ_PER_CPU(irq)) { |
79 | + if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { |
80 | /* Bad linux design decision. The mask has already |
81 | * been set; we must reset it */ |
82 | cpumask_setall(irq_desc[irq].affinity); |
83 | diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
84 | index ff0005eec..bf92cc2 100644 |
85 | --- a/arch/powerpc/include/asm/reg.h |
86 | +++ b/arch/powerpc/include/asm/reg.h |
87 | @@ -878,6 +878,7 @@ |
88 | #define PV_970 0x0039 |
89 | #define PV_POWER5 0x003A |
90 | #define PV_POWER5p 0x003B |
91 | +#define PV_POWER7 0x003F |
92 | #define PV_970FX 0x003C |
93 | #define PV_630 0x0040 |
94 | #define PV_630p 0x0041 |
95 | diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c |
96 | index 3129c85..130712a 100644 |
97 | --- a/arch/powerpc/kernel/perf_event.c |
98 | +++ b/arch/powerpc/kernel/perf_event.c |
99 | @@ -1268,6 +1268,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) |
100 | return ip; |
101 | } |
102 | |
103 | +static bool pmc_overflow(unsigned long val) |
104 | +{ |
105 | + if ((int)val < 0) |
106 | + return true; |
107 | + |
108 | + /* |
109 | + * Events on POWER7 can roll back if a speculative event doesn't |
110 | + * eventually complete. Unfortunately in some rare cases they will |
111 | + * raise a performance monitor exception. We need to catch this to |
112 | + * ensure we reset the PMC. In all cases the PMC will be 256 or less |
113 | + * cycles from overflow. |
114 | + * |
115 | + * We only do this if the first pass fails to find any overflowing |
116 | + * PMCs because a user might set a period of less than 256 and we |
117 | + * don't want to mistakenly reset them. |
118 | + */ |
119 | + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) |
120 | + return true; |
121 | + |
122 | + return false; |
123 | +} |
124 | + |
125 | /* |
126 | * Performance monitor interrupt stuff |
127 | */ |
128 | @@ -1315,7 +1337,7 @@ static void perf_event_interrupt(struct pt_regs *regs) |
129 | if (is_limited_pmc(i + 1)) |
130 | continue; |
131 | val = read_pmc(i + 1); |
132 | - if ((int)val < 0) |
133 | + if (pmc_overflow(val)) |
134 | write_pmc(i + 1, 0); |
135 | } |
136 | } |
137 | diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h |
138 | index 177b016..33927d2 100644 |
139 | --- a/arch/x86/include/asm/pgtable-3level.h |
140 | +++ b/arch/x86/include/asm/pgtable-3level.h |
141 | @@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) |
142 | |
143 | static inline void pud_clear(pud_t *pudp) |
144 | { |
145 | - unsigned long pgd; |
146 | - |
147 | set_pud(pudp, __pud(0)); |
148 | |
149 | /* |
150 | @@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) |
151 | * section 8.1: in PAE mode we explicitly have to flush the |
152 | * TLB via cr3 if the top-level pgd is changed... |
153 | * |
154 | - * Make sure the pud entry we're updating is within the |
155 | - * current pgd to avoid unnecessary TLB flushes. |
156 | + * Currently all places where pud_clear() is called either have |
157 | + * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or |
158 | + * pud_clear_bad()), so we don't need TLB flush here. |
159 | */ |
160 | - pgd = read_cr3(); |
161 | - if (__pa(pudp) >= pgd && __pa(pudp) < |
162 | - (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) |
163 | - write_cr3(pgd); |
164 | } |
165 | |
166 | #ifdef CONFIG_SMP |
167 | diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c |
168 | index 5079f24..9495ac1 100644 |
169 | --- a/arch/x86/kernel/alternative.c |
170 | +++ b/arch/x86/kernel/alternative.c |
171 | @@ -417,7 +417,12 @@ int alternatives_text_reserved(void *start, void *end) |
172 | return 1; |
173 | } |
174 | } |
175 | - |
176 | + /* |
177 | + * Intel Archiecture Software Developer's Manual section 7.1.3 specifies |
178 | + * that a core serializing instruction such as "cpuid" should be |
179 | + * executed on _each_ core before the new instruction is made visible. |
180 | + */ |
181 | + sync_core(); |
182 | return 0; |
183 | } |
184 | #endif |
185 | diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c |
186 | index 13a3891..452932d 100644 |
187 | --- a/arch/x86/kernel/check.c |
188 | +++ b/arch/x86/kernel/check.c |
189 | @@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) |
190 | addr += size; |
191 | } |
192 | |
193 | - printk(KERN_INFO "Scanning %d areas for low memory corruption\n", |
194 | - num_scan_areas); |
195 | + if (num_scan_areas) |
196 | + printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); |
197 | } |
198 | |
199 | |
200 | @@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) |
201 | { |
202 | check_for_bios_corruption(); |
203 | schedule_delayed_work(&bios_check_work, |
204 | - round_jiffies_relative(corruption_check_period*HZ)); |
205 | + round_jiffies_relative(corruption_check_period*HZ)); |
206 | } |
207 | |
208 | static int start_periodic_check_for_corruption(void) |
209 | { |
210 | - if (!memory_corruption_check || corruption_check_period == 0) |
211 | + if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) |
212 | return 0; |
213 | |
214 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
215 | diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c |
216 | index 0c2b7ef..d54f0a4 100644 |
217 | --- a/arch/x86/kernel/e820.c |
218 | +++ b/arch/x86/kernel/e820.c |
219 | @@ -846,15 +846,21 @@ static int __init parse_memopt(char *p) |
220 | if (!p) |
221 | return -EINVAL; |
222 | |
223 | -#ifdef CONFIG_X86_32 |
224 | if (!strcmp(p, "nopentium")) { |
225 | +#ifdef CONFIG_X86_32 |
226 | setup_clear_cpu_cap(X86_FEATURE_PSE); |
227 | return 0; |
228 | - } |
229 | +#else |
230 | + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); |
231 | + return -EINVAL; |
232 | #endif |
233 | + } |
234 | |
235 | userdef = 1; |
236 | mem_size = memparse(p, &p); |
237 | + /* don't remove all of memory when handling "mem={invalid}" param */ |
238 | + if (mem_size == 0) |
239 | + return -EINVAL; |
240 | e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); |
241 | |
242 | return 0; |
243 | diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c |
244 | index 9efbdcc..3755ef4 100644 |
245 | --- a/arch/x86/kernel/early-quirks.c |
246 | +++ b/arch/x86/kernel/early-quirks.c |
247 | @@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) |
248 | if (rev >= 0x40) |
249 | acpi_fix_pin2_polarity = 1; |
250 | |
251 | - if (rev > 0x13) |
252 | + /* |
253 | + * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... |
254 | + * SB700: revisions 0x39, 0x3a, ... |
255 | + * SB800: revisions 0x40, 0x41, ... |
256 | + */ |
257 | + if (rev >= 0x39) |
258 | return; |
259 | |
260 | if (acpi_use_timer_override) |
261 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
262 | index e3ba417..b235db9 100644 |
263 | --- a/arch/x86/kernel/entry_64.S |
264 | +++ b/arch/x86/kernel/entry_64.S |
265 | @@ -1238,7 +1238,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
266 | decl PER_CPU_VAR(irq_count) |
267 | jmp error_exit |
268 | CFI_ENDPROC |
269 | -END(do_hypervisor_callback) |
270 | +END(xen_do_hypervisor_callback) |
271 | |
272 | /* |
273 | * Hypervisor uses this for application faults while it executes. |
274 | diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c |
275 | index 7d90ceb..20e3f87 100644 |
276 | --- a/arch/x86/mm/fault.c |
277 | +++ b/arch/x86/mm/fault.c |
278 | @@ -229,15 +229,14 @@ void vmalloc_sync_all(void) |
279 | for (address = VMALLOC_START & PMD_MASK; |
280 | address >= TASK_SIZE && address < FIXADDR_TOP; |
281 | address += PMD_SIZE) { |
282 | - |
283 | - unsigned long flags; |
284 | struct page *page; |
285 | |
286 | - spin_lock_irqsave(&pgd_lock, flags); |
287 | + spin_lock(&pgd_lock); |
288 | list_for_each_entry(page, &pgd_list, lru) { |
289 | spinlock_t *pgt_lock; |
290 | pmd_t *ret; |
291 | |
292 | + /* the pgt_lock only for Xen */ |
293 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
294 | |
295 | spin_lock(pgt_lock); |
296 | @@ -247,7 +246,7 @@ void vmalloc_sync_all(void) |
297 | if (!ret) |
298 | break; |
299 | } |
300 | - spin_unlock_irqrestore(&pgd_lock, flags); |
301 | + spin_unlock(&pgd_lock); |
302 | } |
303 | } |
304 | |
305 | @@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
306 | unsigned long address, unsigned int fault) |
307 | { |
308 | if (fault & VM_FAULT_OOM) { |
309 | + /* Kernel mode? Handle exceptions or die: */ |
310 | + if (!(error_code & PF_USER)) { |
311 | + up_read(¤t->mm->mmap_sem); |
312 | + no_context(regs, error_code, address); |
313 | + return; |
314 | + } |
315 | + |
316 | out_of_memory(regs, error_code, address); |
317 | } else { |
318 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
319 | diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c |
320 | index 71a5929..c14a542 100644 |
321 | --- a/arch/x86/mm/init_64.c |
322 | +++ b/arch/x86/mm/init_64.c |
323 | @@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) |
324 | |
325 | for (address = start; address <= end; address += PGDIR_SIZE) { |
326 | const pgd_t *pgd_ref = pgd_offset_k(address); |
327 | - unsigned long flags; |
328 | struct page *page; |
329 | |
330 | if (pgd_none(*pgd_ref)) |
331 | continue; |
332 | |
333 | - spin_lock_irqsave(&pgd_lock, flags); |
334 | + spin_lock(&pgd_lock); |
335 | list_for_each_entry(page, &pgd_list, lru) { |
336 | pgd_t *pgd; |
337 | spinlock_t *pgt_lock; |
338 | |
339 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
340 | + /* the pgt_lock only for Xen */ |
341 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
342 | spin_lock(pgt_lock); |
343 | |
344 | @@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) |
345 | |
346 | spin_unlock(pgt_lock); |
347 | } |
348 | - spin_unlock_irqrestore(&pgd_lock, flags); |
349 | + spin_unlock(&pgd_lock); |
350 | } |
351 | } |
352 | |
353 | diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c |
354 | index 532e793..89d66f4 100644 |
355 | --- a/arch/x86/mm/pageattr.c |
356 | +++ b/arch/x86/mm/pageattr.c |
357 | @@ -56,12 +56,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
358 | |
359 | void update_page_count(int level, unsigned long pages) |
360 | { |
361 | - unsigned long flags; |
362 | - |
363 | /* Protect against CPA */ |
364 | - spin_lock_irqsave(&pgd_lock, flags); |
365 | + spin_lock(&pgd_lock); |
366 | direct_pages_count[level] += pages; |
367 | - spin_unlock_irqrestore(&pgd_lock, flags); |
368 | + spin_unlock(&pgd_lock); |
369 | } |
370 | |
371 | static void split_page_count(int level) |
372 | @@ -391,7 +389,7 @@ static int |
373 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
374 | struct cpa_data *cpa) |
375 | { |
376 | - unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
377 | + unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
378 | pte_t new_pte, old_pte, *tmp; |
379 | pgprot_t old_prot, new_prot; |
380 | int i, do_split = 1; |
381 | @@ -400,7 +398,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, |
382 | if (cpa->force_split) |
383 | return 1; |
384 | |
385 | - spin_lock_irqsave(&pgd_lock, flags); |
386 | + spin_lock(&pgd_lock); |
387 | /* |
388 | * Check for races, another CPU might have split this page |
389 | * up already: |
390 | @@ -495,14 +493,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, |
391 | } |
392 | |
393 | out_unlock: |
394 | - spin_unlock_irqrestore(&pgd_lock, flags); |
395 | + spin_unlock(&pgd_lock); |
396 | |
397 | return do_split; |
398 | } |
399 | |
400 | static int split_large_page(pte_t *kpte, unsigned long address) |
401 | { |
402 | - unsigned long flags, pfn, pfninc = 1; |
403 | + unsigned long pfn, pfninc = 1; |
404 | unsigned int i, level; |
405 | pte_t *pbase, *tmp; |
406 | pgprot_t ref_prot; |
407 | @@ -516,7 +514,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) |
408 | if (!base) |
409 | return -ENOMEM; |
410 | |
411 | - spin_lock_irqsave(&pgd_lock, flags); |
412 | + spin_lock(&pgd_lock); |
413 | /* |
414 | * Check for races, another CPU might have split this page |
415 | * up for us already: |
416 | @@ -588,7 +586,7 @@ out_unlock: |
417 | */ |
418 | if (base) |
419 | __free_page(base); |
420 | - spin_unlock_irqrestore(&pgd_lock, flags); |
421 | + spin_unlock(&pgd_lock); |
422 | |
423 | return 0; |
424 | } |
425 | diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c |
426 | index 8be8c7d..7dd4719 100644 |
427 | --- a/arch/x86/mm/pgtable.c |
428 | +++ b/arch/x86/mm/pgtable.c |
429 | @@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) |
430 | |
431 | static void pgd_dtor(pgd_t *pgd) |
432 | { |
433 | - unsigned long flags; /* can be called from interrupt context */ |
434 | - |
435 | if (SHARED_KERNEL_PMD) |
436 | return; |
437 | |
438 | - spin_lock_irqsave(&pgd_lock, flags); |
439 | + spin_lock(&pgd_lock); |
440 | pgd_list_del(pgd); |
441 | - spin_unlock_irqrestore(&pgd_lock, flags); |
442 | + spin_unlock(&pgd_lock); |
443 | } |
444 | |
445 | /* |
446 | @@ -170,8 +168,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
447 | * section 8.1: in PAE mode we explicitly have to flush the |
448 | * TLB via cr3 if the top-level pgd is changed... |
449 | */ |
450 | - if (mm == current->active_mm) |
451 | - write_cr3(read_cr3()); |
452 | + flush_tlb_mm(mm); |
453 | } |
454 | #else /* !CONFIG_X86_PAE */ |
455 | |
456 | @@ -260,7 +257,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) |
457 | { |
458 | pgd_t *pgd; |
459 | pmd_t *pmds[PREALLOCATED_PMDS]; |
460 | - unsigned long flags; |
461 | |
462 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
463 | |
464 | @@ -280,12 +276,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) |
465 | * respect to anything walking the pgd_list, so that they |
466 | * never see a partially populated pgd. |
467 | */ |
468 | - spin_lock_irqsave(&pgd_lock, flags); |
469 | + spin_lock(&pgd_lock); |
470 | |
471 | pgd_ctor(mm, pgd); |
472 | pgd_prepopulate_pmd(mm, pgd, pmds); |
473 | |
474 | - spin_unlock_irqrestore(&pgd_lock, flags); |
475 | + spin_unlock(&pgd_lock); |
476 | |
477 | return pgd; |
478 | |
479 | diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c |
480 | index 198df8d..9ea0dc6 100644 |
481 | --- a/arch/x86/xen/mmu.c |
482 | +++ b/arch/x86/xen/mmu.c |
483 | @@ -1362,10 +1362,9 @@ static void xen_pgd_pin(struct mm_struct *mm) |
484 | */ |
485 | void xen_mm_pin_all(void) |
486 | { |
487 | - unsigned long flags; |
488 | struct page *page; |
489 | |
490 | - spin_lock_irqsave(&pgd_lock, flags); |
491 | + spin_lock(&pgd_lock); |
492 | |
493 | list_for_each_entry(page, &pgd_list, lru) { |
494 | if (!PagePinned(page)) { |
495 | @@ -1374,7 +1373,7 @@ void xen_mm_pin_all(void) |
496 | } |
497 | } |
498 | |
499 | - spin_unlock_irqrestore(&pgd_lock, flags); |
500 | + spin_unlock(&pgd_lock); |
501 | } |
502 | |
503 | /* |
504 | @@ -1475,10 +1474,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) |
505 | */ |
506 | void xen_mm_unpin_all(void) |
507 | { |
508 | - unsigned long flags; |
509 | struct page *page; |
510 | |
511 | - spin_lock_irqsave(&pgd_lock, flags); |
512 | + spin_lock(&pgd_lock); |
513 | |
514 | list_for_each_entry(page, &pgd_list, lru) { |
515 | if (PageSavePinned(page)) { |
516 | @@ -1488,7 +1486,7 @@ void xen_mm_unpin_all(void) |
517 | } |
518 | } |
519 | |
520 | - spin_unlock_irqrestore(&pgd_lock, flags); |
521 | + spin_unlock(&pgd_lock); |
522 | } |
523 | |
524 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
525 | diff --git a/block/blk-lib.c b/block/blk-lib.c |
526 | index 1a320d2..ccf5a40 100644 |
527 | --- a/block/blk-lib.c |
528 | +++ b/block/blk-lib.c |
529 | @@ -109,7 +109,6 @@ struct bio_batch |
530 | atomic_t done; |
531 | unsigned long flags; |
532 | struct completion *wait; |
533 | - bio_end_io_t *end_io; |
534 | }; |
535 | |
536 | static void bio_batch_end_io(struct bio *bio, int err) |
537 | @@ -122,12 +121,9 @@ static void bio_batch_end_io(struct bio *bio, int err) |
538 | else |
539 | clear_bit(BIO_UPTODATE, &bb->flags); |
540 | } |
541 | - if (bb) { |
542 | - if (bb->end_io) |
543 | - bb->end_io(bio, err); |
544 | - atomic_inc(&bb->done); |
545 | - complete(bb->wait); |
546 | - } |
547 | + if (bb) |
548 | + if (atomic_dec_and_test(&bb->done)) |
549 | + complete(bb->wait); |
550 | bio_put(bio); |
551 | } |
552 | |
553 | @@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
554 | int ret; |
555 | struct bio *bio; |
556 | struct bio_batch bb; |
557 | - unsigned int sz, issued = 0; |
558 | + unsigned int sz; |
559 | DECLARE_COMPLETION_ONSTACK(wait); |
560 | |
561 | - atomic_set(&bb.done, 0); |
562 | + atomic_set(&bb.done, 1); |
563 | bb.flags = 1 << BIO_UPTODATE; |
564 | bb.wait = &wait; |
565 | - bb.end_io = NULL; |
566 | |
567 | submit: |
568 | ret = 0; |
569 | @@ -185,12 +180,12 @@ submit: |
570 | break; |
571 | } |
572 | ret = 0; |
573 | - issued++; |
574 | + atomic_inc(&bb.done); |
575 | submit_bio(WRITE, bio); |
576 | } |
577 | |
578 | /* Wait for bios in-flight */ |
579 | - while (issued != atomic_read(&bb.done)) |
580 | + if (!atomic_dec_and_test(&bb.done)) |
581 | wait_for_completion(&wait); |
582 | |
583 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
584 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
585 | index 3288263..95c0be0 100644 |
586 | --- a/drivers/ata/ahci.c |
587 | +++ b/drivers/ata/ahci.c |
588 | @@ -260,6 +260,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
589 | { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ |
590 | { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ |
591 | { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ |
592 | + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ |
593 | + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ |
594 | |
595 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
596 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
597 | @@ -380,6 +382,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
598 | { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ |
599 | { PCI_DEVICE(0x1b4b, 0x9123), |
600 | .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ |
601 | + { PCI_DEVICE(0x1b4b, 0x9125), |
602 | + .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ |
603 | |
604 | /* Promise */ |
605 | { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ |
606 | diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c |
607 | index 17a6378..e16850e 100644 |
608 | --- a/drivers/ata/libata-eh.c |
609 | +++ b/drivers/ata/libata-eh.c |
610 | @@ -1618,7 +1618,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) |
611 | * host links. For disabled PMP links, only N bit is |
612 | * considered as X bit is left at 1 for link plugging. |
613 | */ |
614 | - if (link->lpm_policy != ATA_LPM_MAX_POWER) |
615 | + if (link->lpm_policy > ATA_LPM_MAX_POWER) |
616 | hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ |
617 | else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) |
618 | hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; |
619 | diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c |
620 | index 85da4c4..2eee8e0 100644 |
621 | --- a/drivers/gpu/drm/drm_sysfs.c |
622 | +++ b/drivers/gpu/drm/drm_sysfs.c |
623 | @@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device, |
624 | { |
625 | struct drm_connector *connector = to_drm_connector(device); |
626 | enum drm_connector_status status; |
627 | + int ret; |
628 | + |
629 | + ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex); |
630 | + if (ret) |
631 | + return ret; |
632 | |
633 | status = connector->funcs->detect(connector, true); |
634 | + mutex_unlock(&connector->dev->mode_config.mutex); |
635 | + |
636 | return snprintf(buf, PAGE_SIZE, "%s\n", |
637 | drm_get_connector_status_name(status)); |
638 | } |
639 | diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
640 | index 0df86b5..c87e5d9 100644 |
641 | --- a/drivers/gpu/drm/i915/intel_panel.c |
642 | +++ b/drivers/gpu/drm/i915/intel_panel.c |
643 | @@ -176,7 +176,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) |
644 | val &= ~1; |
645 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); |
646 | val *= lbpc; |
647 | - val >>= 1; |
648 | } |
649 | } |
650 | |
651 | diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig |
652 | index 3052e29..3e01479 100644 |
653 | --- a/drivers/hid/Kconfig |
654 | +++ b/drivers/hid/Kconfig |
655 | @@ -291,10 +291,10 @@ config HID_NTRIG |
656 | Support for N-Trig touch screen. |
657 | |
658 | config HID_ORTEK |
659 | - tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad" |
660 | + tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" |
661 | depends on USB_HID |
662 | ---help--- |
663 | - Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. |
664 | + Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. |
665 | |
666 | config HID_PANTHERLORD |
667 | tristate "Pantherlord/GreenAsia game controller" |
668 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
669 | index 52cf7ca..cdfa368 100644 |
670 | --- a/drivers/hid/hid-core.c |
671 | +++ b/drivers/hid/hid-core.c |
672 | @@ -1365,6 +1365,7 @@ static const struct hid_device_id hid_blacklist[] = { |
673 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, |
674 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, |
675 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, |
676 | + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
677 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
678 | { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, |
679 | { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, |
680 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
681 | index f04789d..b4df785 100644 |
682 | --- a/drivers/hid/hid-ids.h |
683 | +++ b/drivers/hid/hid-ids.h |
684 | @@ -445,6 +445,7 @@ |
685 | #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 |
686 | |
687 | #define USB_VENDOR_ID_ORTEK 0x05a4 |
688 | +#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 |
689 | #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 |
690 | |
691 | #define USB_VENDOR_ID_PANJIT 0x134c |
692 | diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c |
693 | index ed732b7..d383a44 100644 |
694 | --- a/drivers/hid/hid-magicmouse.c |
695 | +++ b/drivers/hid/hid-magicmouse.c |
696 | @@ -256,7 +256,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda |
697 | input_report_abs(input, ABS_MT_TRACKING_ID, id); |
698 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); |
699 | input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); |
700 | - input_report_abs(input, ABS_MT_ORIENTATION, orientation); |
701 | + input_report_abs(input, ABS_MT_ORIENTATION, -orientation); |
702 | input_report_abs(input, ABS_MT_POSITION_X, x); |
703 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
704 | |
705 | @@ -395,7 +395,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h |
706 | input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); |
707 | input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); |
708 | input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); |
709 | - input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); |
710 | + input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); |
711 | |
712 | /* Note: Touch Y position from the device is inverted relative |
713 | * to how pointer motion is reported (and relative to how USB |
714 | diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c |
715 | index 2e79716..d021461 100644 |
716 | --- a/drivers/hid/hid-ortek.c |
717 | +++ b/drivers/hid/hid-ortek.c |
718 | @@ -1,5 +1,5 @@ |
719 | /* |
720 | - * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). |
721 | + * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). |
722 | * Fixes LogicalMaximum error in USB report description, see |
723 | * http://bugzilla.kernel.org/show_bug.cgi?id=14787 |
724 | * |
725 | @@ -31,6 +31,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
726 | } |
727 | |
728 | static const struct hid_device_id ortek_devices[] = { |
729 | + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
730 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
731 | { } |
732 | }; |
733 | diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c |
734 | index 75afb3b..2855ef9 100644 |
735 | --- a/drivers/hwmon/f71882fg.c |
736 | +++ b/drivers/hwmon/f71882fg.c |
737 | @@ -2110,7 +2110,6 @@ static int f71882fg_remove(struct platform_device *pdev) |
738 | int nr_fans = (data->type == f71882fg) ? 4 : 3; |
739 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); |
740 | |
741 | - platform_set_drvdata(pdev, NULL); |
742 | if (data->hwmon_dev) |
743 | hwmon_device_unregister(data->hwmon_dev); |
744 | |
745 | @@ -2177,6 +2176,7 @@ static int f71882fg_remove(struct platform_device *pdev) |
746 | } |
747 | } |
748 | |
749 | + platform_set_drvdata(pdev, NULL); |
750 | kfree(data); |
751 | |
752 | return 0; |
753 | diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c |
754 | index a610e78..38a41d2 100644 |
755 | --- a/drivers/hwmon/sht15.c |
756 | +++ b/drivers/hwmon/sht15.c |
757 | @@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) |
758 | |
759 | const int c1 = -4; |
760 | const int c2 = 40500; /* x 10 ^ -6 */ |
761 | - const int c3 = -2800; /* x10 ^ -9 */ |
762 | + const int c3 = -28; /* x 10 ^ -7 */ |
763 | |
764 | RHlinear = c1*1000 |
765 | + c2 * data->val_humid/1000 |
766 | - + (data->val_humid * data->val_humid * c3)/1000000; |
767 | + + (data->val_humid * data->val_humid * c3) / 10000; |
768 | return (temp - 25000) * (10000 + 80 * data->val_humid) |
769 | / 1000000 + RHlinear; |
770 | } |
771 | diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c |
772 | index 64e0903..1d9616b 100644 |
773 | --- a/drivers/infiniband/core/cm.c |
774 | +++ b/drivers/infiniband/core/cm.c |
775 | @@ -2989,6 +2989,7 @@ static int cm_sidr_req_handler(struct cm_work *work) |
776 | goto out; /* No match. */ |
777 | } |
778 | atomic_inc(&cur_cm_id_priv->refcount); |
779 | + atomic_inc(&cm_id_priv->refcount); |
780 | spin_unlock_irq(&cm.lock); |
781 | |
782 | cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; |
783 | diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c |
784 | index 6884da2..e450c5a 100644 |
785 | --- a/drivers/infiniband/core/cma.c |
786 | +++ b/drivers/infiniband/core/cma.c |
787 | @@ -1210,6 +1210,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
788 | cm_id->context = conn_id; |
789 | cm_id->cm_handler = cma_ib_handler; |
790 | |
791 | + /* |
792 | + * Protect against the user destroying conn_id from another thread |
793 | + * until we're done accessing it. |
794 | + */ |
795 | + atomic_inc(&conn_id->refcount); |
796 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
797 | if (!ret) { |
798 | /* |
799 | @@ -1222,8 +1227,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
800 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
801 | mutex_unlock(&lock); |
802 | mutex_unlock(&conn_id->handler_mutex); |
803 | + cma_deref_id(conn_id); |
804 | goto out; |
805 | } |
806 | + cma_deref_id(conn_id); |
807 | |
808 | /* Destroy the CM ID by returning a non-zero value. */ |
809 | conn_id->cm_id.ib = NULL; |
810 | @@ -1425,17 +1432,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, |
811 | event.param.conn.private_data_len = iw_event->private_data_len; |
812 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; |
813 | event.param.conn.responder_resources = attr.max_qp_rd_atom; |
814 | + |
815 | + /* |
816 | + * Protect against the user destroying conn_id from another thread |
817 | + * until we're done accessing it. |
818 | + */ |
819 | + atomic_inc(&conn_id->refcount); |
820 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
821 | if (ret) { |
822 | /* User wants to destroy the CM ID */ |
823 | conn_id->cm_id.iw = NULL; |
824 | cma_exch(conn_id, CMA_DESTROYING); |
825 | mutex_unlock(&conn_id->handler_mutex); |
826 | + cma_deref_id(conn_id); |
827 | rdma_destroy_id(&conn_id->id); |
828 | goto out; |
829 | } |
830 | |
831 | mutex_unlock(&conn_id->handler_mutex); |
832 | + cma_deref_id(conn_id); |
833 | |
834 | out: |
835 | if (dev) |
836 | diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c |
837 | index efef5f9..47c7461 100644 |
838 | --- a/drivers/mmc/core/sdio.c |
839 | +++ b/drivers/mmc/core/sdio.c |
840 | @@ -395,6 +395,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, |
841 | if (err) |
842 | goto remove; |
843 | |
844 | + /* |
845 | + * Update oldcard with the new RCA received from the SDIO |
846 | + * device -- we're doing this so that it's updated in the |
847 | + * "card" struct when oldcard overwrites that later. |
848 | + */ |
849 | + if (oldcard) |
850 | + oldcard->rca = card->rca; |
851 | + |
852 | mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); |
853 | } |
854 | |
855 | diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c |
856 | index 3d9c246..bfd631a 100644 |
857 | --- a/drivers/mmc/host/sdhci-pci.c |
858 | +++ b/drivers/mmc/host/sdhci-pci.c |
859 | @@ -454,6 +454,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { |
860 | }, |
861 | |
862 | { |
863 | + .vendor = PCI_VENDOR_ID_RICOH, |
864 | + .device = 0xe823, |
865 | + .subvendor = PCI_ANY_ID, |
866 | + .subdevice = PCI_ANY_ID, |
867 | + .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, |
868 | + }, |
869 | + |
870 | + { |
871 | .vendor = PCI_VENDOR_ID_ENE, |
872 | .device = PCI_DEVICE_ID_ENE_CB712_SD, |
873 | .subvendor = PCI_ANY_ID, |
874 | diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c |
875 | index ad9268b..56802cd 100644 |
876 | --- a/drivers/mtd/chips/cfi_cmdset_0001.c |
877 | +++ b/drivers/mtd/chips/cfi_cmdset_0001.c |
878 | @@ -1229,10 +1229,32 @@ static int inval_cache_and_wait_for_operation( |
879 | sleep_time = chip_op_time / 2; |
880 | |
881 | for (;;) { |
882 | + if (chip->state != chip_state) { |
883 | + /* Someone's suspended the operation: sleep */ |
884 | + DECLARE_WAITQUEUE(wait, current); |
885 | + set_current_state(TASK_UNINTERRUPTIBLE); |
886 | + add_wait_queue(&chip->wq, &wait); |
887 | + mutex_unlock(&chip->mutex); |
888 | + schedule(); |
889 | + remove_wait_queue(&chip->wq, &wait); |
890 | + mutex_lock(&chip->mutex); |
891 | + continue; |
892 | + } |
893 | + |
894 | status = map_read(map, cmd_adr); |
895 | if (map_word_andequal(map, status, status_OK, status_OK)) |
896 | break; |
897 | |
898 | + if (chip->erase_suspended && chip_state == FL_ERASING) { |
899 | + /* Erase suspend occured while sleep: reset timeout */ |
900 | + timeo = reset_timeo; |
901 | + chip->erase_suspended = 0; |
902 | + } |
903 | + if (chip->write_suspended && chip_state == FL_WRITING) { |
904 | + /* Write suspend occured while sleep: reset timeout */ |
905 | + timeo = reset_timeo; |
906 | + chip->write_suspended = 0; |
907 | + } |
908 | if (!timeo) { |
909 | map_write(map, CMD(0x70), cmd_adr); |
910 | chip->state = FL_STATUS; |
911 | @@ -1256,27 +1278,6 @@ static int inval_cache_and_wait_for_operation( |
912 | timeo--; |
913 | } |
914 | mutex_lock(&chip->mutex); |
915 | - |
916 | - while (chip->state != chip_state) { |
917 | - /* Someone's suspended the operation: sleep */ |
918 | - DECLARE_WAITQUEUE(wait, current); |
919 | - set_current_state(TASK_UNINTERRUPTIBLE); |
920 | - add_wait_queue(&chip->wq, &wait); |
921 | - mutex_unlock(&chip->mutex); |
922 | - schedule(); |
923 | - remove_wait_queue(&chip->wq, &wait); |
924 | - mutex_lock(&chip->mutex); |
925 | - } |
926 | - if (chip->erase_suspended && chip_state == FL_ERASING) { |
927 | - /* Erase suspend occured while sleep: reset timeout */ |
928 | - timeo = reset_timeo; |
929 | - chip->erase_suspended = 0; |
930 | - } |
931 | - if (chip->write_suspended && chip_state == FL_WRITING) { |
932 | - /* Write suspend occured while sleep: reset timeout */ |
933 | - timeo = reset_timeo; |
934 | - chip->write_suspended = 0; |
935 | - } |
936 | } |
937 | |
938 | /* Done and happy. */ |
939 | diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c |
940 | index d72a5fb..4e1be51 100644 |
941 | --- a/drivers/mtd/chips/jedec_probe.c |
942 | +++ b/drivers/mtd/chips/jedec_probe.c |
943 | @@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) |
944 | } |
945 | |
946 | |
947 | -static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) |
948 | +static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index) |
949 | { |
950 | int i,num_erase_regions; |
951 | uint8_t uaddr; |
952 | |
953 | - if (! (jedec_table[index].devtypes & p_cfi->device_type)) { |
954 | + if (!(jedec_table[index].devtypes & cfi->device_type)) { |
955 | DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", |
956 | - jedec_table[index].name, 4 * (1<<p_cfi->device_type)); |
957 | + jedec_table[index].name, 4 * (1<<cfi->device_type)); |
958 | return 0; |
959 | } |
960 | |
961 | @@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) |
962 | |
963 | num_erase_regions = jedec_table[index].nr_regions; |
964 | |
965 | - p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); |
966 | - if (!p_cfi->cfiq) { |
967 | + cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); |
968 | + if (!cfi->cfiq) { |
969 | //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); |
970 | return 0; |
971 | } |
972 | |
973 | - memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); |
974 | + memset(cfi->cfiq, 0, sizeof(struct cfi_ident)); |
975 | |
976 | - p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; |
977 | - p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; |
978 | - p_cfi->cfiq->DevSize = jedec_table[index].dev_size; |
979 | - p_cfi->cfi_mode = CFI_MODE_JEDEC; |
980 | + cfi->cfiq->P_ID = jedec_table[index].cmd_set; |
981 | + cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; |
982 | + cfi->cfiq->DevSize = jedec_table[index].dev_size; |
983 | + cfi->cfi_mode = CFI_MODE_JEDEC; |
984 | + cfi->sector_erase_cmd = CMD(0x30); |
985 | |
986 | for (i=0; i<num_erase_regions; i++){ |
987 | - p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; |
988 | + cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; |
989 | } |
990 | - p_cfi->cmdset_priv = NULL; |
991 | + cfi->cmdset_priv = NULL; |
992 | |
993 | /* This may be redundant for some cases, but it doesn't hurt */ |
994 | - p_cfi->mfr = jedec_table[index].mfr_id; |
995 | - p_cfi->id = jedec_table[index].dev_id; |
996 | + cfi->mfr = jedec_table[index].mfr_id; |
997 | + cfi->id = jedec_table[index].dev_id; |
998 | |
999 | uaddr = jedec_table[index].uaddr; |
1000 | |
1001 | @@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) |
1002 | our brains explode when we see the datasheets talking about address |
1003 | lines numbered from A-1 to A18. The CFI table has unlock addresses |
1004 | in device-words according to the mode the device is connected in */ |
1005 | - p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; |
1006 | - p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; |
1007 | + cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type; |
1008 | + cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type; |
1009 | |
1010 | return 1; /* ok */ |
1011 | } |
1012 | @@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, |
1013 | "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", |
1014 | __func__, cfi->mfr, cfi->id, |
1015 | cfi->addr_unlock1, cfi->addr_unlock2 ); |
1016 | - if (!cfi_jedec_setup(cfi, i)) |
1017 | + if (!cfi_jedec_setup(map, cfi, i)) |
1018 | return 0; |
1019 | goto ok_out; |
1020 | } |
1021 | diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c |
1022 | index cb20c67..e0a2373 100644 |
1023 | --- a/drivers/mtd/mtd_blkdevs.c |
1024 | +++ b/drivers/mtd/mtd_blkdevs.c |
1025 | @@ -413,7 +413,6 @@ error3: |
1026 | error2: |
1027 | list_del(&new->list); |
1028 | error1: |
1029 | - kfree(new); |
1030 | return ret; |
1031 | } |
1032 | |
1033 | diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c |
1034 | index 15682ec..28af71c 100644 |
1035 | --- a/drivers/mtd/nand/omap2.c |
1036 | +++ b/drivers/mtd/nand/omap2.c |
1037 | @@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void) |
1038 | module_init(omap_nand_init); |
1039 | module_exit(omap_nand_exit); |
1040 | |
1041 | -MODULE_ALIAS(DRIVER_NAME); |
1042 | +MODULE_ALIAS("platform:" DRIVER_NAME); |
1043 | MODULE_LICENSE("GPL"); |
1044 | MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); |
1045 | diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c |
1046 | index e789149..ac08750 100644 |
1047 | --- a/drivers/mtd/onenand/generic.c |
1048 | +++ b/drivers/mtd/onenand/generic.c |
1049 | @@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = { |
1050 | .remove = __devexit_p(generic_onenand_remove), |
1051 | }; |
1052 | |
1053 | -MODULE_ALIAS(DRIVER_NAME); |
1054 | +MODULE_ALIAS("platform:" DRIVER_NAME); |
1055 | |
1056 | static int __init generic_onenand_init(void) |
1057 | { |
1058 | diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c |
1059 | index 9f322f1..348ce71 100644 |
1060 | --- a/drivers/mtd/onenand/omap2.c |
1061 | +++ b/drivers/mtd/onenand/omap2.c |
1062 | @@ -815,7 +815,7 @@ static void __exit omap2_onenand_exit(void) |
1063 | module_init(omap2_onenand_init); |
1064 | module_exit(omap2_onenand_exit); |
1065 | |
1066 | -MODULE_ALIAS(DRIVER_NAME); |
1067 | +MODULE_ALIAS("platform:" DRIVER_NAME); |
1068 | MODULE_LICENSE("GPL"); |
1069 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); |
1070 | MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); |
1071 | diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c |
1072 | index e3eca13..c7fee09 100644 |
1073 | --- a/drivers/net/ixgbe/ixgbe_common.c |
1074 | +++ b/drivers/net/ixgbe/ixgbe_common.c |
1075 | @@ -1292,6 +1292,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) |
1076 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); |
1077 | |
1078 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1079 | + |
1080 | + /* clear VMDq pool/queue selection for RAR 0 */ |
1081 | + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); |
1082 | } |
1083 | hw->addr_ctrl.overflow_promisc = 0; |
1084 | |
1085 | diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c |
1086 | index 05efa6a..d506434 100644 |
1087 | --- a/drivers/net/ixgbe/ixgbe_fcoe.c |
1088 | +++ b/drivers/net/ixgbe/ixgbe_fcoe.c |
1089 | @@ -151,7 +151,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, |
1090 | struct scatterlist *sg; |
1091 | unsigned int i, j, dmacount; |
1092 | unsigned int len; |
1093 | - static const unsigned int bufflen = 4096; |
1094 | + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
1095 | unsigned int firstoff = 0; |
1096 | unsigned int lastsize; |
1097 | unsigned int thisoff = 0; |
1098 | @@ -241,6 +241,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, |
1099 | /* only the last buffer may have non-full bufflen */ |
1100 | lastsize = thisoff + thislen; |
1101 | |
1102 | + /* |
1103 | + * lastsize can not be buffer len. |
1104 | + * If it is then adding another buffer with lastsize = 1. |
1105 | + */ |
1106 | + if (lastsize == bufflen) { |
1107 | + if (j >= IXGBE_BUFFCNT_MAX) { |
1108 | + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " |
1109 | + "not enough user buffers. We need an extra " |
1110 | + "buffer because lastsize is bufflen.\n", |
1111 | + xid, i, j, dmacount, (u64)addr); |
1112 | + goto out_noddp_free; |
1113 | + } |
1114 | + |
1115 | + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); |
1116 | + j++; |
1117 | + lastsize = 1; |
1118 | + } |
1119 | + |
1120 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
1121 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
1122 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
1123 | @@ -519,6 +537,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) |
1124 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
1125 | |
1126 | spin_lock_init(&fcoe->lock); |
1127 | + |
1128 | + /* Extra buffer to be shared by all DDPs for HW work around */ |
1129 | + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); |
1130 | + if (fcoe->extra_ddp_buffer == NULL) { |
1131 | + e_err(drv, "failed to allocated extra DDP buffer\n"); |
1132 | + goto out_extra_ddp_buffer_alloc; |
1133 | + } |
1134 | + |
1135 | + fcoe->extra_ddp_buffer_dma = |
1136 | + dma_map_single(&adapter->pdev->dev, |
1137 | + fcoe->extra_ddp_buffer, |
1138 | + IXGBE_FCBUFF_MIN, |
1139 | + DMA_FROM_DEVICE); |
1140 | + if (dma_mapping_error(&adapter->pdev->dev, |
1141 | + fcoe->extra_ddp_buffer_dma)) { |
1142 | + e_err(drv, "failed to map extra DDP buffer\n"); |
1143 | + goto out_extra_ddp_buffer_dma; |
1144 | + } |
1145 | } |
1146 | |
1147 | /* Enable L2 eth type filter for FCoE */ |
1148 | @@ -568,6 +604,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) |
1149 | } |
1150 | } |
1151 | #endif |
1152 | + |
1153 | + return; |
1154 | + |
1155 | +out_extra_ddp_buffer_dma: |
1156 | + kfree(fcoe->extra_ddp_buffer); |
1157 | +out_extra_ddp_buffer_alloc: |
1158 | + pci_pool_destroy(fcoe->pool); |
1159 | + fcoe->pool = NULL; |
1160 | } |
1161 | |
1162 | /** |
1163 | @@ -587,6 +631,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) |
1164 | if (fcoe->pool) { |
1165 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
1166 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
1167 | + dma_unmap_single(&adapter->pdev->dev, |
1168 | + fcoe->extra_ddp_buffer_dma, |
1169 | + IXGBE_FCBUFF_MIN, |
1170 | + DMA_FROM_DEVICE); |
1171 | + kfree(fcoe->extra_ddp_buffer); |
1172 | pci_pool_destroy(fcoe->pool); |
1173 | fcoe->pool = NULL; |
1174 | } |
1175 | diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h |
1176 | index 4bc2c55..65cc8fb 100644 |
1177 | --- a/drivers/net/ixgbe/ixgbe_fcoe.h |
1178 | +++ b/drivers/net/ixgbe/ixgbe_fcoe.h |
1179 | @@ -70,6 +70,8 @@ struct ixgbe_fcoe { |
1180 | spinlock_t lock; |
1181 | struct pci_pool *pool; |
1182 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
1183 | + unsigned char *extra_ddp_buffer; |
1184 | + dma_addr_t extra_ddp_buffer_dma; |
1185 | }; |
1186 | |
1187 | #endif /* _IXGBE_FCOE_H */ |
1188 | diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c |
1189 | index 5428153..960a2d6 100644 |
1190 | --- a/drivers/net/ixgbe/ixgbe_sriov.c |
1191 | +++ b/drivers/net/ixgbe/ixgbe_sriov.c |
1192 | @@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
1193 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
1194 | } |
1195 | |
1196 | - |
1197 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
1198 | { |
1199 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
1200 | vmolr |= (IXGBE_VMOLR_ROMPE | |
1201 | - IXGBE_VMOLR_ROPE | |
1202 | IXGBE_VMOLR_BAM); |
1203 | if (aupe) |
1204 | vmolr |= IXGBE_VMOLR_AUPE; |
1205 | diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c |
1206 | index 5dcf140..0466f74 100644 |
1207 | --- a/drivers/net/wireless/ath/ath9k/hw.c |
1208 | +++ b/drivers/net/wireless/ath/ath9k/hw.c |
1209 | @@ -516,6 +516,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) |
1210 | if (ah->hw_version.devid == AR5416_AR9100_DEVID) |
1211 | ah->hw_version.macVersion = AR_SREV_VERSION_9100; |
1212 | |
1213 | + /* |
1214 | + * Read back AR_WA into a permanent copy and set bits 14 and 17. |
1215 | + * We need to do this to avoid RMW of this register. We cannot |
1216 | + * read the reg when chip is asleep. |
1217 | + */ |
1218 | + ah->WARegVal = REG_READ(ah, AR_WA); |
1219 | + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | |
1220 | + AR_WA_ASPM_TIMER_BASED_DISABLE); |
1221 | + |
1222 | + ath9k_hw_read_revisions(ah); |
1223 | + |
1224 | if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { |
1225 | ath_print(common, ATH_DBG_FATAL, |
1226 | "Couldn't reset chip\n"); |
1227 | @@ -574,14 +585,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) |
1228 | |
1229 | ath9k_hw_init_mode_regs(ah); |
1230 | |
1231 | - /* |
1232 | - * Read back AR_WA into a permanent copy and set bits 14 and 17. |
1233 | - * We need to do this to avoid RMW of this register. We cannot |
1234 | - * read the reg when chip is asleep. |
1235 | - */ |
1236 | - ah->WARegVal = REG_READ(ah, AR_WA); |
1237 | - ah->WARegVal |= (AR_WA_D3_L1_DISABLE | |
1238 | - AR_WA_ASPM_TIMER_BASED_DISABLE); |
1239 | |
1240 | if (ah->is_pciexpress) |
1241 | ath9k_hw_configpcipowersave(ah, 0, 0); |
1242 | @@ -1089,8 +1092,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) |
1243 | return false; |
1244 | } |
1245 | |
1246 | - ath9k_hw_read_revisions(ah); |
1247 | - |
1248 | return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); |
1249 | } |
1250 | |
1251 | diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c |
1252 | index fdc2ec5..d6bbb57 100644 |
1253 | --- a/drivers/net/wireless/ath/ath9k/recv.c |
1254 | +++ b/drivers/net/wireless/ath/ath9k/recv.c |
1255 | @@ -439,9 +439,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) |
1256 | * mode interface or when in monitor mode. AP mode does not need this |
1257 | * since it receives all in-BSS frames anyway. |
1258 | */ |
1259 | - if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && |
1260 | - (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
1261 | - (sc->sc_ah->is_monitoring)) |
1262 | + if (sc->sc_ah->is_monitoring) |
1263 | rfilt |= ATH9K_RX_FILTER_PROM; |
1264 | |
1265 | if (sc->rx.rxfilter & FIF_CONTROL) |
1266 | diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c |
1267 | index d019830..06995b2 100644 |
1268 | --- a/drivers/net/wireless/rt2x00/rt2x00dev.c |
1269 | +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c |
1270 | @@ -486,6 +486,10 @@ void rt2x00lib_rxdone(struct queue_entry *entry) |
1271 | unsigned int header_length; |
1272 | int rate_idx; |
1273 | |
1274 | + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || |
1275 | + !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) |
1276 | + goto submit_entry; |
1277 | + |
1278 | if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) |
1279 | goto submit_entry; |
1280 | |
1281 | @@ -570,9 +574,13 @@ void rt2x00lib_rxdone(struct queue_entry *entry) |
1282 | entry->skb = skb; |
1283 | |
1284 | submit_entry: |
1285 | - rt2x00dev->ops->lib->clear_entry(entry); |
1286 | - rt2x00queue_index_inc(entry->queue, Q_INDEX); |
1287 | + entry->flags = 0; |
1288 | rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); |
1289 | + if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && |
1290 | + test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) { |
1291 | + rt2x00dev->ops->lib->clear_entry(entry); |
1292 | + rt2x00queue_index_inc(entry->queue, Q_INDEX); |
1293 | + } |
1294 | } |
1295 | EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); |
1296 | |
1297 | diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c |
1298 | index b3317df..051758f 100644 |
1299 | --- a/drivers/net/wireless/rt2x00/rt2x00usb.c |
1300 | +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c |
1301 | @@ -226,9 +226,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) |
1302 | * Schedule the delayed work for reading the TX status |
1303 | * from the device. |
1304 | */ |
1305 | - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && |
1306 | - test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) |
1307 | - ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); |
1308 | + ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); |
1309 | } |
1310 | |
1311 | static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) |
1312 | @@ -237,6 +235,7 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) |
1313 | struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); |
1314 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
1315 | u32 length; |
1316 | + int status; |
1317 | |
1318 | if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) |
1319 | return; |
1320 | @@ -253,7 +252,10 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) |
1321 | entry->skb->data, length, |
1322 | rt2x00usb_interrupt_txdone, entry); |
1323 | |
1324 | - if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) { |
1325 | + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); |
1326 | + if (status) { |
1327 | + if (status == -ENODEV) |
1328 | + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); |
1329 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
1330 | rt2x00lib_dmadone(entry); |
1331 | } |
1332 | @@ -424,9 +426,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb) |
1333 | * Schedule the delayed work for reading the RX status |
1334 | * from the device. |
1335 | */ |
1336 | - if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && |
1337 | - test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) |
1338 | - ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); |
1339 | + ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); |
1340 | } |
1341 | |
1342 | /* |
1343 | @@ -454,6 +454,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry) |
1344 | to_usb_device_intf(entry->queue->rt2x00dev->dev); |
1345 | struct queue_entry_priv_usb *entry_priv = entry->priv_data; |
1346 | int pipe; |
1347 | + int status; |
1348 | |
1349 | entry->flags = 0; |
1350 | |
1351 | @@ -464,7 +465,12 @@ void rt2x00usb_clear_entry(struct queue_entry *entry) |
1352 | rt2x00usb_interrupt_rxdone, entry); |
1353 | |
1354 | set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); |
1355 | - if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) { |
1356 | + |
1357 | + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); |
1358 | + if (status) { |
1359 | + if (status == -ENODEV) |
1360 | + clear_bit(DEVICE_STATE_PRESENT, |
1361 | + &entry->queue->rt2x00dev->flags); |
1362 | set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); |
1363 | rt2x00lib_dmadone(entry); |
1364 | } |
1365 | diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c |
1366 | index 1539d2c..6020580 100644 |
1367 | --- a/drivers/pci/pci-sysfs.c |
1368 | +++ b/drivers/pci/pci-sysfs.c |
1369 | @@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) |
1370 | attr->write = write_vpd_attr; |
1371 | retval = sysfs_create_bin_file(&dev->dev.kobj, attr); |
1372 | if (retval) { |
1373 | - kfree(dev->vpd->attr); |
1374 | + kfree(attr); |
1375 | return retval; |
1376 | } |
1377 | dev->vpd->attr = attr; |
1378 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
1379 | index 53a786f..bd80f63 100644 |
1380 | --- a/drivers/pci/quirks.c |
1381 | +++ b/drivers/pci/quirks.c |
1382 | @@ -533,6 +533,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) |
1383 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); |
1384 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); |
1385 | |
1386 | +#define ICH_PMBASE 0x40 |
1387 | +#define ICH_ACPI_CNTL 0x44 |
1388 | +#define ICH4_ACPI_EN 0x10 |
1389 | +#define ICH6_ACPI_EN 0x80 |
1390 | +#define ICH4_GPIOBASE 0x58 |
1391 | +#define ICH4_GPIO_CNTL 0x5c |
1392 | +#define ICH4_GPIO_EN 0x10 |
1393 | +#define ICH6_GPIOBASE 0x48 |
1394 | +#define ICH6_GPIO_CNTL 0x4c |
1395 | +#define ICH6_GPIO_EN 0x10 |
1396 | + |
1397 | /* |
1398 | * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at |
1399 | * 0x40 (128 bytes of ACPI, GPIO & TCO registers) |
1400 | @@ -541,12 +552,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui |
1401 | static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) |
1402 | { |
1403 | u32 region; |
1404 | + u8 enable; |
1405 | |
1406 | - pci_read_config_dword(dev, 0x40, ®ion); |
1407 | - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); |
1408 | + /* |
1409 | + * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict |
1410 | + * with low legacy (and fixed) ports. We don't know the decoding |
1411 | + * priority and can't tell whether the legacy device or the one created |
1412 | + * here is really at that address. This happens on boards with broken |
1413 | + * BIOSes. |
1414 | + */ |
1415 | + |
1416 | + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); |
1417 | + if (enable & ICH4_ACPI_EN) { |
1418 | + pci_read_config_dword(dev, ICH_PMBASE, ®ion); |
1419 | + region &= PCI_BASE_ADDRESS_IO_MASK; |
1420 | + if (region >= PCIBIOS_MIN_IO) |
1421 | + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, |
1422 | + "ICH4 ACPI/GPIO/TCO"); |
1423 | + } |
1424 | |
1425 | - pci_read_config_dword(dev, 0x58, ®ion); |
1426 | - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); |
1427 | + pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); |
1428 | + if (enable & ICH4_GPIO_EN) { |
1429 | + pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); |
1430 | + region &= PCI_BASE_ADDRESS_IO_MASK; |
1431 | + if (region >= PCIBIOS_MIN_IO) |
1432 | + quirk_io_region(dev, region, 64, |
1433 | + PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); |
1434 | + } |
1435 | } |
1436 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); |
1437 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); |
1438 | @@ -562,12 +594,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui |
1439 | static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) |
1440 | { |
1441 | u32 region; |
1442 | + u8 enable; |
1443 | |
1444 | - pci_read_config_dword(dev, 0x40, ®ion); |
1445 | - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); |
1446 | + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); |
1447 | + if (enable & ICH6_ACPI_EN) { |
1448 | + pci_read_config_dword(dev, ICH_PMBASE, ®ion); |
1449 | + region &= PCI_BASE_ADDRESS_IO_MASK; |
1450 | + if (region >= PCIBIOS_MIN_IO) |
1451 | + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, |
1452 | + "ICH6 ACPI/GPIO/TCO"); |
1453 | + } |
1454 | |
1455 | - pci_read_config_dword(dev, 0x48, ®ion); |
1456 | - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); |
1457 | + pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); |
1458 | + if (enable & ICH4_GPIO_EN) { |
1459 | + pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); |
1460 | + region &= PCI_BASE_ADDRESS_IO_MASK; |
1461 | + if (region >= PCIBIOS_MIN_IO) |
1462 | + quirk_io_region(dev, region, 64, |
1463 | + PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); |
1464 | + } |
1465 | } |
1466 | |
1467 | static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) |
1468 | @@ -2618,58 +2663,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, |
1469 | |
1470 | #endif /* CONFIG_PCI_MSI */ |
1471 | |
1472 | -#ifdef CONFIG_PCI_IOV |
1473 | - |
1474 | -/* |
1475 | - * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the |
1476 | - * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the |
1477 | - * old Flash Memory Space. |
1478 | - */ |
1479 | -static void __devinit quirk_i82576_sriov(struct pci_dev *dev) |
1480 | -{ |
1481 | - int pos, flags; |
1482 | - u32 bar, start, size; |
1483 | - |
1484 | - if (PAGE_SIZE > 0x10000) |
1485 | - return; |
1486 | - |
1487 | - flags = pci_resource_flags(dev, 0); |
1488 | - if ((flags & PCI_BASE_ADDRESS_SPACE) != |
1489 | - PCI_BASE_ADDRESS_SPACE_MEMORY || |
1490 | - (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != |
1491 | - PCI_BASE_ADDRESS_MEM_TYPE_32) |
1492 | - return; |
1493 | - |
1494 | - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); |
1495 | - if (!pos) |
1496 | - return; |
1497 | - |
1498 | - pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar); |
1499 | - if (bar & PCI_BASE_ADDRESS_MEM_MASK) |
1500 | - return; |
1501 | - |
1502 | - start = pci_resource_start(dev, 1); |
1503 | - size = pci_resource_len(dev, 1); |
1504 | - if (!start || size != 0x400000 || start & (size - 1)) |
1505 | - return; |
1506 | - |
1507 | - pci_resource_flags(dev, 1) = 0; |
1508 | - pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); |
1509 | - pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start); |
1510 | - pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2); |
1511 | - |
1512 | - dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n"); |
1513 | -} |
1514 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); |
1515 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); |
1516 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); |
1517 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); |
1518 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); |
1519 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); |
1520 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); |
1521 | - |
1522 | -#endif /* CONFIG_PCI_IOV */ |
1523 | - |
1524 | /* Allow manual resource allocation for PCI hotplug bridges |
1525 | * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For |
1526 | * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), |
1527 | diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c |
1528 | index 6b72932..30f2b33 100644 |
1529 | --- a/drivers/scsi/device_handler/scsi_dh_alua.c |
1530 | +++ b/drivers/scsi/device_handler/scsi_dh_alua.c |
1531 | @@ -285,7 +285,8 @@ static void stpg_endio(struct request *req, int error) |
1532 | print_alua_state(h->state)); |
1533 | } |
1534 | done: |
1535 | - blk_put_request(req); |
1536 | + req->end_io_data = NULL; |
1537 | + __blk_put_request(req->q, req); |
1538 | if (h->callback_fn) { |
1539 | h->callback_fn(h->callback_data, err); |
1540 | h->callback_fn = h->callback_data = NULL; |
1541 | diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c |
1542 | index b47d7aa..e2fe165 100644 |
1543 | --- a/drivers/staging/tidspbridge/rmgr/proc.c |
1544 | +++ b/drivers/staging/tidspbridge/rmgr/proc.c |
1545 | @@ -781,12 +781,14 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, |
1546 | (u32)pmpu_addr, |
1547 | ul_size, dir); |
1548 | |
1549 | + mutex_lock(&proc_lock); |
1550 | + |
1551 | /* find requested memory are in cached mapping information */ |
1552 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); |
1553 | if (!map_obj) { |
1554 | pr_err("%s: find_containing_mapping failed\n", __func__); |
1555 | status = -EFAULT; |
1556 | - goto err_out; |
1557 | + goto no_map; |
1558 | } |
1559 | |
1560 | if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { |
1561 | @@ -795,6 +797,8 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, |
1562 | status = -EFAULT; |
1563 | } |
1564 | |
1565 | +no_map: |
1566 | + mutex_unlock(&proc_lock); |
1567 | err_out: |
1568 | |
1569 | return status; |
1570 | @@ -819,21 +823,24 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, |
1571 | (u32)pmpu_addr, |
1572 | ul_size, dir); |
1573 | |
1574 | + mutex_lock(&proc_lock); |
1575 | + |
1576 | /* find requested memory are in cached mapping information */ |
1577 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); |
1578 | if (!map_obj) { |
1579 | pr_err("%s: find_containing_mapping failed\n", __func__); |
1580 | status = -EFAULT; |
1581 | - goto err_out; |
1582 | + goto no_map; |
1583 | } |
1584 | |
1585 | if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { |
1586 | pr_err("%s: InValid address parameters %p %x\n", |
1587 | __func__, pmpu_addr, ul_size); |
1588 | status = -EFAULT; |
1589 | - goto err_out; |
1590 | } |
1591 | |
1592 | +no_map: |
1593 | + mutex_unlock(&proc_lock); |
1594 | err_out: |
1595 | return status; |
1596 | } |
1597 | @@ -1726,9 +1733,8 @@ int proc_un_map(void *hprocessor, void *map_addr, |
1598 | (p_proc_object->hbridge_context, va_align, size_align); |
1599 | } |
1600 | |
1601 | - mutex_unlock(&proc_lock); |
1602 | if (status) |
1603 | - goto func_end; |
1604 | + goto unmap_failed; |
1605 | |
1606 | /* |
1607 | * A successful unmap should be followed by removal of map_obj |
1608 | @@ -1737,6 +1743,9 @@ int proc_un_map(void *hprocessor, void *map_addr, |
1609 | */ |
1610 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); |
1611 | |
1612 | +unmap_failed: |
1613 | + mutex_unlock(&proc_lock); |
1614 | + |
1615 | func_end: |
1616 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", |
1617 | __func__, hprocessor, map_addr, status); |
1618 | diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h |
1619 | index 2b87a00..7f06e26 100644 |
1620 | --- a/drivers/staging/winbond/core.h |
1621 | +++ b/drivers/staging/winbond/core.h |
1622 | @@ -3,6 +3,7 @@ |
1623 | |
1624 | #include <linux/wireless.h> |
1625 | #include <linux/types.h> |
1626 | +#include <linux/delay.h> |
1627 | |
1628 | #include "wbhal_s.h" |
1629 | #include "mto.h" |
1630 | diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c |
1631 | index 4de52dc..b1e1880 100644 |
1632 | --- a/drivers/usb/core/hcd-pci.c |
1633 | +++ b/drivers/usb/core/hcd-pci.c |
1634 | @@ -364,8 +364,7 @@ static int check_root_hub_suspended(struct device *dev) |
1635 | struct pci_dev *pci_dev = to_pci_dev(dev); |
1636 | struct usb_hcd *hcd = pci_get_drvdata(pci_dev); |
1637 | |
1638 | - if (!(hcd->state == HC_STATE_SUSPENDED || |
1639 | - hcd->state == HC_STATE_HALT)) { |
1640 | + if (HCD_RH_RUNNING(hcd)) { |
1641 | dev_warn(dev, "Root hub is not suspended\n"); |
1642 | return -EBUSY; |
1643 | } |
1644 | @@ -387,7 +386,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) |
1645 | if (retval) |
1646 | return retval; |
1647 | |
1648 | - if (hcd->driver->pci_suspend) { |
1649 | + if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { |
1650 | /* Optimization: Don't suspend if a root-hub wakeup is |
1651 | * pending and it would cause the HCD to wake up anyway. |
1652 | */ |
1653 | @@ -428,7 +427,7 @@ static int resume_common(struct device *dev, int event) |
1654 | struct usb_hcd *hcd = pci_get_drvdata(pci_dev); |
1655 | int retval; |
1656 | |
1657 | - if (hcd->state != HC_STATE_SUSPENDED) { |
1658 | + if (HCD_RH_RUNNING(hcd)) { |
1659 | dev_dbg(dev, "can't resume, not suspended!\n"); |
1660 | return 0; |
1661 | } |
1662 | @@ -443,7 +442,7 @@ static int resume_common(struct device *dev, int event) |
1663 | |
1664 | clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); |
1665 | |
1666 | - if (hcd->driver->pci_resume) { |
1667 | + if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { |
1668 | if (event != PM_EVENT_AUTO_RESUME) |
1669 | wait_for_companions(pci_dev, hcd); |
1670 | |
1671 | @@ -476,10 +475,10 @@ static int hcd_pci_suspend_noirq(struct device *dev) |
1672 | |
1673 | pci_save_state(pci_dev); |
1674 | |
1675 | - /* If the root hub is HALTed rather than SUSPENDed, |
1676 | + /* If the root hub is dead rather than suspended, |
1677 | * disallow remote wakeup. |
1678 | */ |
1679 | - if (hcd->state == HC_STATE_HALT) |
1680 | + if (HCD_DEAD(hcd)) |
1681 | device_set_wakeup_enable(dev, 0); |
1682 | dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); |
1683 | |
1684 | diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c |
1685 | index 8aa6b51..d2c10d0 100644 |
1686 | --- a/drivers/usb/core/hcd.c |
1687 | +++ b/drivers/usb/core/hcd.c |
1688 | @@ -984,7 +984,7 @@ static int register_root_hub(struct usb_hcd *hcd) |
1689 | spin_unlock_irq (&hcd_root_hub_lock); |
1690 | |
1691 | /* Did the HC die before the root hub was registered? */ |
1692 | - if (hcd->state == HC_STATE_HALT) |
1693 | + if (HCD_DEAD(hcd) || hcd->state == HC_STATE_HALT) |
1694 | usb_hc_died (hcd); /* This time clean up */ |
1695 | } |
1696 | |
1697 | @@ -1090,13 +1090,10 @@ int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) |
1698 | * Check the host controller's state and add the URB to the |
1699 | * endpoint's queue. |
1700 | */ |
1701 | - switch (hcd->state) { |
1702 | - case HC_STATE_RUNNING: |
1703 | - case HC_STATE_RESUMING: |
1704 | + if (HCD_RH_RUNNING(hcd)) { |
1705 | urb->unlinked = 0; |
1706 | list_add_tail(&urb->urb_list, &urb->ep->urb_list); |
1707 | - break; |
1708 | - default: |
1709 | + } else { |
1710 | rc = -ESHUTDOWN; |
1711 | goto done; |
1712 | } |
1713 | @@ -1914,7 +1911,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev) |
1714 | { |
1715 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
1716 | |
1717 | - if (!HC_IS_RUNNING (hcd->state)) |
1718 | + if (!HCD_RH_RUNNING(hcd)) |
1719 | return -ESHUTDOWN; |
1720 | return hcd->driver->get_frame_number (hcd); |
1721 | } |
1722 | @@ -1931,9 +1928,15 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) |
1723 | |
1724 | dev_dbg(&rhdev->dev, "bus %s%s\n", |
1725 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); |
1726 | + if (HCD_DEAD(hcd)) { |
1727 | + dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); |
1728 | + return 0; |
1729 | + } |
1730 | + |
1731 | if (!hcd->driver->bus_suspend) { |
1732 | status = -ENOENT; |
1733 | } else { |
1734 | + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1735 | hcd->state = HC_STATE_QUIESCING; |
1736 | status = hcd->driver->bus_suspend(hcd); |
1737 | } |
1738 | @@ -1941,7 +1944,12 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) |
1739 | usb_set_device_state(rhdev, USB_STATE_SUSPENDED); |
1740 | hcd->state = HC_STATE_SUSPENDED; |
1741 | } else { |
1742 | - hcd->state = old_state; |
1743 | + spin_lock_irq(&hcd_root_hub_lock); |
1744 | + if (!HCD_DEAD(hcd)) { |
1745 | + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1746 | + hcd->state = old_state; |
1747 | + } |
1748 | + spin_unlock_irq(&hcd_root_hub_lock); |
1749 | dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", |
1750 | "suspend", status); |
1751 | } |
1752 | @@ -1956,9 +1964,13 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) |
1753 | |
1754 | dev_dbg(&rhdev->dev, "usb %s%s\n", |
1755 | (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); |
1756 | + if (HCD_DEAD(hcd)) { |
1757 | + dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); |
1758 | + return 0; |
1759 | + } |
1760 | if (!hcd->driver->bus_resume) |
1761 | return -ENOENT; |
1762 | - if (hcd->state == HC_STATE_RUNNING) |
1763 | + if (HCD_RH_RUNNING(hcd)) |
1764 | return 0; |
1765 | |
1766 | hcd->state = HC_STATE_RESUMING; |
1767 | @@ -1967,10 +1979,15 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) |
1768 | if (status == 0) { |
1769 | /* TRSMRCY = 10 msec */ |
1770 | msleep(10); |
1771 | - usb_set_device_state(rhdev, rhdev->actconfig |
1772 | - ? USB_STATE_CONFIGURED |
1773 | - : USB_STATE_ADDRESS); |
1774 | - hcd->state = HC_STATE_RUNNING; |
1775 | + spin_lock_irq(&hcd_root_hub_lock); |
1776 | + if (!HCD_DEAD(hcd)) { |
1777 | + usb_set_device_state(rhdev, rhdev->actconfig |
1778 | + ? USB_STATE_CONFIGURED |
1779 | + : USB_STATE_ADDRESS); |
1780 | + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1781 | + hcd->state = HC_STATE_RUNNING; |
1782 | + } |
1783 | + spin_unlock_irq(&hcd_root_hub_lock); |
1784 | } else { |
1785 | hcd->state = old_state; |
1786 | dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", |
1787 | @@ -2081,7 +2098,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) |
1788 | */ |
1789 | local_irq_save(flags); |
1790 | |
1791 | - if (unlikely(hcd->state == HC_STATE_HALT || !HCD_HW_ACCESSIBLE(hcd))) { |
1792 | + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) { |
1793 | rc = IRQ_NONE; |
1794 | } else if (hcd->driver->irq(hcd) == IRQ_NONE) { |
1795 | rc = IRQ_NONE; |
1796 | @@ -2115,6 +2132,8 @@ void usb_hc_died (struct usb_hcd *hcd) |
1797 | dev_err (hcd->self.controller, "HC died; cleaning up\n"); |
1798 | |
1799 | spin_lock_irqsave (&hcd_root_hub_lock, flags); |
1800 | + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1801 | + set_bit(HCD_FLAG_DEAD, &hcd->flags); |
1802 | if (hcd->rh_registered) { |
1803 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
1804 | |
1805 | @@ -2257,6 +2276,12 @@ int usb_add_hcd(struct usb_hcd *hcd, |
1806 | */ |
1807 | device_init_wakeup(&rhdev->dev, 1); |
1808 | |
1809 | + /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is |
1810 | + * registered. But since the controller can die at any time, |
1811 | + * let's initialize the flag before touching the hardware. |
1812 | + */ |
1813 | + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1814 | + |
1815 | /* "reset" is misnamed; its role is now one-time init. the controller |
1816 | * should already have been reset (and boot firmware kicked off etc). |
1817 | */ |
1818 | @@ -2324,6 +2349,7 @@ int usb_add_hcd(struct usb_hcd *hcd, |
1819 | return retval; |
1820 | |
1821 | error_create_attr_group: |
1822 | + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1823 | if (HC_IS_RUNNING(hcd->state)) |
1824 | hcd->state = HC_STATE_QUIESCING; |
1825 | spin_lock_irq(&hcd_root_hub_lock); |
1826 | @@ -2376,6 +2402,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) |
1827 | usb_get_dev(rhdev); |
1828 | sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); |
1829 | |
1830 | + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); |
1831 | if (HC_IS_RUNNING (hcd->state)) |
1832 | hcd->state = HC_STATE_QUIESCING; |
1833 | |
1834 | diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c |
1835 | index c14fc08..ae334b0 100644 |
1836 | --- a/drivers/usb/core/urb.c |
1837 | +++ b/drivers/usb/core/urb.c |
1838 | @@ -366,7 +366,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) |
1839 | if (xfertype == USB_ENDPOINT_XFER_ISOC) { |
1840 | int n, len; |
1841 | |
1842 | - /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */ |
1843 | + /* SuperSpeed isoc endpoints have up to 16 bursts of up to |
1844 | + * 3 packets each |
1845 | + */ |
1846 | + if (dev->speed == USB_SPEED_SUPER) { |
1847 | + int burst = 1 + ep->ss_ep_comp.bMaxBurst; |
1848 | + int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); |
1849 | + max *= burst; |
1850 | + max *= mult; |
1851 | + } |
1852 | + |
1853 | /* "high bandwidth" mode, 1-3 packets/uframe? */ |
1854 | if (dev->speed == USB_SPEED_HIGH) { |
1855 | int mult = 1 + ((max >> 11) & 0x03); |
1856 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c |
1857 | index 8a515f0..72ae77c 100644 |
1858 | --- a/drivers/usb/host/ehci-hub.c |
1859 | +++ b/drivers/usb/host/ehci-hub.c |
1860 | @@ -106,6 +106,27 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) |
1861 | ehci->owned_ports = 0; |
1862 | } |
1863 | |
1864 | +static int ehci_port_change(struct ehci_hcd *ehci) |
1865 | +{ |
1866 | + int i = HCS_N_PORTS(ehci->hcs_params); |
1867 | + |
1868 | + /* First check if the controller indicates a change event */ |
1869 | + |
1870 | + if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) |
1871 | + return 1; |
1872 | + |
1873 | + /* |
1874 | + * Not all controllers appear to update this while going from D3 to D0, |
1875 | + * so check the individual port status registers as well |
1876 | + */ |
1877 | + |
1878 | + while (i--) |
1879 | + if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) |
1880 | + return 1; |
1881 | + |
1882 | + return 0; |
1883 | +} |
1884 | + |
1885 | static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, |
1886 | bool suspending, bool do_wakeup) |
1887 | { |
1888 | @@ -173,7 +194,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, |
1889 | } |
1890 | |
1891 | /* Does the root hub have a port wakeup pending? */ |
1892 | - if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) |
1893 | + if (!suspending && ehci_port_change(ehci)) |
1894 | usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); |
1895 | |
1896 | spin_unlock_irqrestore(&ehci->lock, flags); |
1897 | diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c |
1898 | index bdba8c5..c470cc8 100644 |
1899 | --- a/drivers/usb/host/isp1760-hcd.c |
1900 | +++ b/drivers/usb/host/isp1760-hcd.c |
1901 | @@ -33,6 +33,7 @@ struct isp1760_hcd { |
1902 | struct inter_packet_info atl_ints[32]; |
1903 | struct inter_packet_info int_ints[32]; |
1904 | struct memory_chunk memory_pool[BLOCKS]; |
1905 | + u32 atl_queued; |
1906 | |
1907 | /* periodic schedule support */ |
1908 | #define DEFAULT_I_TDPS 1024 |
1909 | @@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, |
1910 | skip_map &= ~queue_entry; |
1911 | isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); |
1912 | |
1913 | + priv->atl_queued++; |
1914 | + if (priv->atl_queued == 2) |
1915 | + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, |
1916 | + hcd->regs + HC_INTERRUPT_ENABLE); |
1917 | + |
1918 | buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); |
1919 | buffstatus |= ATL_BUFFER; |
1920 | isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); |
1921 | @@ -992,6 +998,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) |
1922 | u32 dw3; |
1923 | |
1924 | status = 0; |
1925 | + priv->atl_queued--; |
1926 | |
1927 | queue_entry = __ffs(done_map); |
1928 | done_map &= ~(1 << queue_entry); |
1929 | @@ -1054,11 +1061,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) |
1930 | * device is not able to send data fast enough. |
1931 | * This happens mostly on slower hardware. |
1932 | */ |
1933 | - printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " |
1934 | - "%d of %zu done: %08x cur: %08x\n", qtd, |
1935 | - urb, qh, PTD_XFERRED_LENGTH(dw3), |
1936 | - qtd->length, done_map, |
1937 | - (1 << queue_entry)); |
1938 | |
1939 | /* RL counter = ERR counter */ |
1940 | dw3 &= ~(0xf << 19); |
1941 | @@ -1086,6 +1088,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) |
1942 | priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + |
1943 | atl_regs, sizeof(ptd)); |
1944 | |
1945 | + priv->atl_queued++; |
1946 | + if (priv->atl_queued == 2) |
1947 | + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, |
1948 | + usb_hcd->regs + HC_INTERRUPT_ENABLE); |
1949 | + |
1950 | buffstatus = isp1760_readl(usb_hcd->regs + |
1951 | HC_BUFFER_STATUS_REG); |
1952 | buffstatus |= ATL_BUFFER; |
1953 | @@ -1191,6 +1198,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) |
1954 | skip_map = isp1760_readl(usb_hcd->regs + |
1955 | HC_ATL_PTD_SKIPMAP_REG); |
1956 | } |
1957 | + if (priv->atl_queued <= 1) |
1958 | + isp1760_writel(INTERRUPT_ENABLE_MASK, |
1959 | + usb_hcd->regs + HC_INTERRUPT_ENABLE); |
1960 | } |
1961 | |
1962 | static void do_intl_int(struct usb_hcd *usb_hcd) |
1963 | @@ -1770,7 +1780,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) |
1964 | goto leave; |
1965 | |
1966 | isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); |
1967 | - if (imask & HC_ATL_INT) |
1968 | + if (imask & (HC_ATL_INT | HC_SOT_INT)) |
1969 | do_atl_int(usb_hcd); |
1970 | |
1971 | if (imask & HC_INTL_INT) |
1972 | diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h |
1973 | index 6931ef5..612bce5 100644 |
1974 | --- a/drivers/usb/host/isp1760-hcd.h |
1975 | +++ b/drivers/usb/host/isp1760-hcd.h |
1976 | @@ -69,6 +69,7 @@ void deinit_kmem_cache(void); |
1977 | |
1978 | #define HC_INTERRUPT_ENABLE 0x314 |
1979 | #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) |
1980 | +#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) |
1981 | |
1982 | #define HC_ISO_INT (1 << 9) |
1983 | #define HC_ATL_INT (1 << 8) |
1984 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
1985 | index 09bb3c9..6478fff 100644 |
1986 | --- a/drivers/usb/host/xhci-ring.c |
1987 | +++ b/drivers/usb/host/xhci-ring.c |
1988 | @@ -505,15 +505,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
1989 | state->new_cycle_state = ~(state->new_cycle_state) & 0x1; |
1990 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
1991 | |
1992 | + /* |
1993 | + * If there is only one segment in a ring, find_trb_seg()'s while loop |
1994 | + * will not run, and it will return before it has a chance to see if it |
1995 | + * needs to toggle the cycle bit. It can't tell if the stalled transfer |
1996 | + * ended just before the link TRB on a one-segment ring, or if the TD |
1997 | + * wrapped around the top of the ring, because it doesn't have the TD in |
1998 | + * question. Look for the one-segment case where stalled TRB's address |
1999 | + * is greater than the new dequeue pointer address. |
2000 | + */ |
2001 | + if (ep_ring->first_seg == ep_ring->first_seg->next && |
2002 | + state->new_deq_ptr < dev->eps[ep_index].stopped_trb) |
2003 | + state->new_cycle_state ^= 0x1; |
2004 | + xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); |
2005 | + |
2006 | /* Don't update the ring cycle state for the producer (us). */ |
2007 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", |
2008 | state->new_deq_seg); |
2009 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); |
2010 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", |
2011 | (unsigned long long) addr); |
2012 | - xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); |
2013 | - ep_ring->dequeue = state->new_deq_ptr; |
2014 | - ep_ring->deq_seg = state->new_deq_seg; |
2015 | } |
2016 | |
2017 | static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, |
2018 | @@ -956,9 +967,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, |
2019 | } else { |
2020 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
2021 | ep_ctx->deq); |
2022 | + if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, |
2023 | + dev->eps[ep_index].queued_deq_ptr) == |
2024 | + (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { |
2025 | + /* Update the ring's dequeue segment and dequeue pointer |
2026 | + * to reflect the new position. |
2027 | + */ |
2028 | + ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; |
2029 | + ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; |
2030 | + } else { |
2031 | + xhci_warn(xhci, "Mismatch between completed Set TR Deq " |
2032 | + "Ptr command & xHCI internal state.\n"); |
2033 | + xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", |
2034 | + dev->eps[ep_index].queued_deq_seg, |
2035 | + dev->eps[ep_index].queued_deq_ptr); |
2036 | + } |
2037 | } |
2038 | |
2039 | dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; |
2040 | + dev->eps[ep_index].queued_deq_seg = NULL; |
2041 | + dev->eps[ep_index].queued_deq_ptr = NULL; |
2042 | /* Restart any rings with pending URBs */ |
2043 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
2044 | } |
2045 | @@ -3218,6 +3246,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, |
2046 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); |
2047 | u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); |
2048 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
2049 | + struct xhci_virt_ep *ep; |
2050 | |
2051 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
2052 | if (addr == 0) { |
2053 | @@ -3226,6 +3255,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, |
2054 | deq_seg, deq_ptr); |
2055 | return 0; |
2056 | } |
2057 | + ep = &xhci->devs[slot_id]->eps[ep_index]; |
2058 | + if ((ep->ep_state & SET_DEQ_PENDING)) { |
2059 | + xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
2060 | + xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); |
2061 | + return 0; |
2062 | + } |
2063 | + ep->queued_deq_seg = deq_seg; |
2064 | + ep->queued_deq_ptr = deq_ptr; |
2065 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, |
2066 | upper_32_bits(addr), trb_stream_id, |
2067 | trb_slot_id | trb_ep_index | type, false); |
2068 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
2069 | index 170c367..2ba3f66 100644 |
2070 | --- a/drivers/usb/host/xhci.h |
2071 | +++ b/drivers/usb/host/xhci.h |
2072 | @@ -648,6 +648,9 @@ struct xhci_ep_ctx { |
2073 | #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) |
2074 | #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) |
2075 | |
2076 | +/* deq bitmasks */ |
2077 | +#define EP_CTX_CYCLE_MASK (1 << 0) |
2078 | + |
2079 | |
2080 | /** |
2081 | * struct xhci_input_control_context |
2082 | @@ -750,6 +753,12 @@ struct xhci_virt_ep { |
2083 | struct timer_list stop_cmd_timer; |
2084 | int stop_cmds_pending; |
2085 | struct xhci_hcd *xhci; |
2086 | + /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue |
2087 | + * command. We'll need to update the ring's dequeue segment and dequeue |
2088 | + * pointer after the command completes. |
2089 | + */ |
2090 | + struct xhci_segment *queued_deq_seg; |
2091 | + union xhci_trb *queued_deq_ptr; |
2092 | /* |
2093 | * Sometimes the xHC can not process isochronous endpoint ring quickly |
2094 | * enough, and it will miss some isoc tds on the ring and generate |
2095 | diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c |
2096 | index 7b8815d..14ac87e 100644 |
2097 | --- a/drivers/usb/serial/ch341.c |
2098 | +++ b/drivers/usb/serial/ch341.c |
2099 | @@ -75,6 +75,7 @@ static int debug; |
2100 | static const struct usb_device_id id_table[] = { |
2101 | { USB_DEVICE(0x4348, 0x5523) }, |
2102 | { USB_DEVICE(0x1a86, 0x7523) }, |
2103 | + { USB_DEVICE(0x1a86, 0x5523) }, |
2104 | { }, |
2105 | }; |
2106 | MODULE_DEVICE_TABLE(usb, id_table); |
2107 | diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c |
2108 | index bd5bd85..b382d9a 100644 |
2109 | --- a/drivers/usb/serial/kobil_sct.c |
2110 | +++ b/drivers/usb/serial/kobil_sct.c |
2111 | @@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) |
2112 | } |
2113 | |
2114 | tty = tty_port_tty_get(&port->port); |
2115 | - if (urb->actual_length) { |
2116 | + if (tty && urb->actual_length) { |
2117 | |
2118 | /* BEGIN DEBUG */ |
2119 | /* |
2120 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2121 | index 356c870..14cd1c0 100644 |
2122 | --- a/drivers/usb/serial/option.c |
2123 | +++ b/drivers/usb/serial/option.c |
2124 | @@ -653,7 +653,8 @@ static const struct usb_device_id option_ids[] = { |
2125 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, |
2126 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, |
2127 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, |
2128 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, |
2129 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, |
2130 | + 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, |
2131 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, |
2132 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, |
2133 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, |
2134 | diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c |
2135 | index 546a521..2ff90a9 100644 |
2136 | --- a/drivers/usb/serial/usb-serial.c |
2137 | +++ b/drivers/usb/serial/usb-serial.c |
2138 | @@ -911,9 +911,8 @@ int usb_serial_probe(struct usb_interface *interface, |
2139 | dev_err(&interface->dev, "No free urbs available\n"); |
2140 | goto probe_error; |
2141 | } |
2142 | - buffer_size = serial->type->bulk_in_size; |
2143 | - if (!buffer_size) |
2144 | - buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); |
2145 | + buffer_size = max_t(int, serial->type->bulk_in_size, |
2146 | + le16_to_cpu(endpoint->wMaxPacketSize)); |
2147 | port->bulk_in_size = buffer_size; |
2148 | port->bulk_in_endpointAddress = endpoint->bEndpointAddress; |
2149 | port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); |
2150 | diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
2151 | index 66836d8..c22a3d1 100644 |
2152 | --- a/fs/btrfs/file.c |
2153 | +++ b/fs/btrfs/file.c |
2154 | @@ -69,6 +69,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, |
2155 | |
2156 | /* Flush processor's dcache for this page */ |
2157 | flush_dcache_page(page); |
2158 | + |
2159 | + /* |
2160 | + * if we get a partial write, we can end up with |
2161 | + * partially up to date pages. These add |
2162 | + * a lot of complexity, so make sure they don't |
2163 | + * happen by forcing this copy to be retried. |
2164 | + * |
2165 | + * The rest of the btrfs_file_write code will fall |
2166 | + * back to page at a time copies after we return 0. |
2167 | + */ |
2168 | + if (!PageUptodate(page) && copied < count) |
2169 | + copied = 0; |
2170 | + |
2171 | iov_iter_advance(i, copied); |
2172 | write_bytes -= copied; |
2173 | total_copied += copied; |
2174 | diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c |
2175 | index bce9dce..d1e0d89 100644 |
2176 | --- a/fs/ext3/namei.c |
2177 | +++ b/fs/ext3/namei.c |
2178 | @@ -1549,8 +1549,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, |
2179 | goto cleanup; |
2180 | node2 = (struct dx_node *)(bh2->b_data); |
2181 | entries2 = node2->entries; |
2182 | + memset(&node2->fake, 0, sizeof(struct fake_dirent)); |
2183 | node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); |
2184 | - node2->fake.inode = 0; |
2185 | BUFFER_TRACE(frame->bh, "get_write_access"); |
2186 | err = ext3_journal_get_write_access(handle, frame->bh); |
2187 | if (err) |
2188 | diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c |
2189 | index b382a1b..33a038d 100644 |
2190 | --- a/fs/nfs/nfs2xdr.c |
2191 | +++ b/fs/nfs/nfs2xdr.c |
2192 | @@ -477,11 +477,13 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se |
2193 | entry->ino = ntohl(*p++); |
2194 | entry->len = ntohl(*p++); |
2195 | |
2196 | - p = xdr_inline_decode(xdr, entry->len + 4); |
2197 | + p = xdr_inline_decode(xdr, entry->len); |
2198 | if (unlikely(!p)) |
2199 | goto out_overflow; |
2200 | entry->name = (const char *) p; |
2201 | - p += XDR_QUADLEN(entry->len); |
2202 | + p = xdr_inline_decode(xdr, 4); |
2203 | + if (unlikely(!p)) |
2204 | + goto out_overflow; |
2205 | entry->prev_cookie = entry->cookie; |
2206 | entry->cookie = ntohl(*p++); |
2207 | |
2208 | diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c |
2209 | index ba91236..dcd934f 100644 |
2210 | --- a/fs/nfs/nfs3xdr.c |
2211 | +++ b/fs/nfs/nfs3xdr.c |
2212 | @@ -614,11 +614,13 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s |
2213 | p = xdr_decode_hyper(p, &entry->ino); |
2214 | entry->len = ntohl(*p++); |
2215 | |
2216 | - p = xdr_inline_decode(xdr, entry->len + 8); |
2217 | + p = xdr_inline_decode(xdr, entry->len); |
2218 | if (unlikely(!p)) |
2219 | goto out_overflow; |
2220 | entry->name = (const char *) p; |
2221 | - p += XDR_QUADLEN(entry->len); |
2222 | + p = xdr_inline_decode(xdr, 8); |
2223 | + if (unlikely(!p)) |
2224 | + goto out_overflow; |
2225 | entry->prev_cookie = entry->cookie; |
2226 | p = xdr_decode_hyper(p, &entry->cookie); |
2227 | |
2228 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
2229 | index c2c7a6b..d851c07 100644 |
2230 | --- a/fs/nfs/nfs4proc.c |
2231 | +++ b/fs/nfs/nfs4proc.c |
2232 | @@ -3226,7 +3226,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, |
2233 | spages = pages; |
2234 | |
2235 | do { |
2236 | - len = min(PAGE_CACHE_SIZE, buflen); |
2237 | + len = min_t(size_t, PAGE_CACHE_SIZE, buflen); |
2238 | newpage = alloc_page(GFP_KERNEL); |
2239 | |
2240 | if (newpage == NULL) |
2241 | diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c |
2242 | index 903908a..c541093 100644 |
2243 | --- a/fs/nfs/nfsroot.c |
2244 | +++ b/fs/nfs/nfsroot.c |
2245 | @@ -86,11 +86,14 @@ |
2246 | /* Default path we try to mount. "%s" gets replaced by our IP address */ |
2247 | #define NFS_ROOT "/tftpboot/%s" |
2248 | |
2249 | +/* Default NFSROOT mount options. */ |
2250 | +#define NFS_DEF_OPTIONS "udp" |
2251 | + |
2252 | /* Parameters passed from the kernel command line */ |
2253 | static char nfs_root_parms[256] __initdata = ""; |
2254 | |
2255 | /* Text-based mount options passed to super.c */ |
2256 | -static char nfs_root_options[256] __initdata = ""; |
2257 | +static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS; |
2258 | |
2259 | /* Address of NFS server */ |
2260 | static __be32 servaddr __initdata = htonl(INADDR_NONE); |
2261 | @@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src, |
2262 | } |
2263 | |
2264 | static int __init root_nfs_cat(char *dest, const char *src, |
2265 | - const size_t destlen) |
2266 | + const size_t destlen) |
2267 | { |
2268 | + size_t len = strlen(dest); |
2269 | + |
2270 | + if (len && dest[len - 1] != ',') |
2271 | + if (strlcat(dest, ",", destlen) > destlen) |
2272 | + return -1; |
2273 | + |
2274 | if (strlcat(dest, src, destlen) > destlen) |
2275 | return -1; |
2276 | return 0; |
2277 | @@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, |
2278 | if (root_nfs_cat(nfs_root_options, incoming, |
2279 | sizeof(nfs_root_options))) |
2280 | return -1; |
2281 | - |
2282 | - /* |
2283 | - * Possibly prepare for more options to be appended |
2284 | - */ |
2285 | - if (nfs_root_options[0] != '\0' && |
2286 | - nfs_root_options[strlen(nfs_root_options)] != ',') |
2287 | - if (root_nfs_cat(nfs_root_options, ",", |
2288 | - sizeof(nfs_root_options))) |
2289 | - return -1; |
2290 | - |
2291 | return 0; |
2292 | } |
2293 | |
2294 | @@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, |
2295 | */ |
2296 | static int __init root_nfs_data(char *cmdline) |
2297 | { |
2298 | - char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; |
2299 | + char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; |
2300 | int len, retval = -1; |
2301 | char *tmp = NULL; |
2302 | const size_t tmplen = sizeof(nfs_export_path); |
2303 | @@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline) |
2304 | * Append mandatory options for nfsroot so they override |
2305 | * what has come before |
2306 | */ |
2307 | - snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", |
2308 | + snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4", |
2309 | &servaddr); |
2310 | - if (root_nfs_cat(nfs_root_options, addr_option, |
2311 | + if (root_nfs_cat(nfs_root_options, mand_options, |
2312 | sizeof(nfs_root_options))) |
2313 | goto out_optionstoolong; |
2314 | |
2315 | diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c |
2316 | index 48cec7c..764b86a 100644 |
2317 | --- a/fs/partitions/osf.c |
2318 | +++ b/fs/partitions/osf.c |
2319 | @@ -10,10 +10,13 @@ |
2320 | #include "check.h" |
2321 | #include "osf.h" |
2322 | |
2323 | +#define MAX_OSF_PARTITIONS 18 |
2324 | + |
2325 | int osf_partition(struct parsed_partitions *state) |
2326 | { |
2327 | int i; |
2328 | int slot = 1; |
2329 | + unsigned int npartitions; |
2330 | Sector sect; |
2331 | unsigned char *data; |
2332 | struct disklabel { |
2333 | @@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state) |
2334 | u8 p_fstype; |
2335 | u8 p_frag; |
2336 | __le16 p_cpg; |
2337 | - } d_partitions[8]; |
2338 | + } d_partitions[MAX_OSF_PARTITIONS]; |
2339 | } * label; |
2340 | struct d_partition * partition; |
2341 | |
2342 | @@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state) |
2343 | put_dev_sector(sect); |
2344 | return 0; |
2345 | } |
2346 | - for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { |
2347 | + npartitions = le16_to_cpu(label->d_npartitions); |
2348 | + if (npartitions > MAX_OSF_PARTITIONS) { |
2349 | + put_dev_sector(sect); |
2350 | + return 0; |
2351 | + } |
2352 | + for (i = 0 ; i < npartitions; i++, partition++) { |
2353 | if (slot == state->limit) |
2354 | break; |
2355 | if (le32_to_cpu(partition->p_size)) |
2356 | diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h |
2357 | index dcd6a7c..ca29e03 100644 |
2358 | --- a/include/linux/ftrace.h |
2359 | +++ b/include/linux/ftrace.h |
2360 | @@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); |
2361 | |
2362 | extern void ftrace_graph_init_task(struct task_struct *t); |
2363 | extern void ftrace_graph_exit_task(struct task_struct *t); |
2364 | +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
2365 | |
2366 | static inline int task_curr_ret_stack(struct task_struct *t) |
2367 | { |
2368 | @@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) |
2369 | |
2370 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
2371 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
2372 | +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
2373 | |
2374 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
2375 | trace_func_graph_ent_t entryfunc) |
2376 | diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h |
2377 | index f917bbb..b8eb2b5 100644 |
2378 | --- a/include/linux/usb/ch9.h |
2379 | +++ b/include/linux/usb/ch9.h |
2380 | @@ -575,6 +575,8 @@ struct usb_ss_ep_comp_descriptor { |
2381 | #define USB_DT_SS_EP_COMP_SIZE 6 |
2382 | /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ |
2383 | #define USB_SS_MAX_STREAMS(p) (1 << (p & 0x1f)) |
2384 | +/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ |
2385 | +#define USB_SS_MULT(p) (1 + ((p) & 0x3)) |
2386 | |
2387 | /*-------------------------------------------------------------------------*/ |
2388 | |
2389 | diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h |
2390 | index 6c37d78..91ad20f 100644 |
2391 | --- a/include/linux/usb/hcd.h |
2392 | +++ b/include/linux/usb/hcd.h |
2393 | @@ -99,6 +99,8 @@ struct usb_hcd { |
2394 | #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ |
2395 | #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ |
2396 | #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ |
2397 | +#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ |
2398 | +#define HCD_FLAG_DEAD 6 /* controller has died? */ |
2399 | |
2400 | /* The flags can be tested using these macros; they are likely to |
2401 | * be slightly faster than test_bit(). |
2402 | @@ -108,6 +110,8 @@ struct usb_hcd { |
2403 | #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) |
2404 | #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) |
2405 | #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) |
2406 | +#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) |
2407 | +#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) |
2408 | |
2409 | /* Flags that get set only during HCD registration or removal. */ |
2410 | unsigned rh_registered:1;/* is root hub registered? */ |
2411 | diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h |
2412 | index c904913..45f3b9d 100644 |
2413 | --- a/include/linux/usb/serial.h |
2414 | +++ b/include/linux/usb/serial.h |
2415 | @@ -191,7 +191,8 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) |
2416 | * @id_table: pointer to a list of usb_device_id structures that define all |
2417 | * of the devices this structure can support. |
2418 | * @num_ports: the number of different ports this device will have. |
2419 | - * @bulk_in_size: bytes to allocate for bulk-in buffer (0 = end-point size) |
2420 | + * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer |
2421 | + * (0 = end-point size) |
2422 | * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) |
2423 | * @calc_num_ports: pointer to a function to determine how many ports this |
2424 | * device has dynamically. It will be called after the probe() |
2425 | diff --git a/kernel/perf_event.c b/kernel/perf_event.c |
2426 | index 785c66a..ee489d0 100644 |
2427 | --- a/kernel/perf_event.c |
2428 | +++ b/kernel/perf_event.c |
2429 | @@ -4414,7 +4414,7 @@ static int perf_exclude_event(struct perf_event *event, |
2430 | struct pt_regs *regs) |
2431 | { |
2432 | if (event->hw.state & PERF_HES_STOPPED) |
2433 | - return 0; |
2434 | + return 1; |
2435 | |
2436 | if (regs) { |
2437 | if (event->attr.exclude_user && user_mode(regs)) |
2438 | @@ -4770,6 +4770,8 @@ static int perf_tp_event_match(struct perf_event *event, |
2439 | struct perf_sample_data *data, |
2440 | struct pt_regs *regs) |
2441 | { |
2442 | + if (event->hw.state & PERF_HES_STOPPED) |
2443 | + return 0; |
2444 | /* |
2445 | * All tracepoints are from kernel-space. |
2446 | */ |
2447 | diff --git a/kernel/sched.c b/kernel/sched.c |
2448 | index 2f912b7..5e0a919 100644 |
2449 | --- a/kernel/sched.c |
2450 | +++ b/kernel/sched.c |
2451 | @@ -5706,7 +5706,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) |
2452 | * The idle tasks have their own, simple scheduling class: |
2453 | */ |
2454 | idle->sched_class = &idle_sched_class; |
2455 | - ftrace_graph_init_task(idle); |
2456 | + ftrace_graph_init_idle_task(idle, cpu); |
2457 | } |
2458 | |
2459 | /* |
2460 | diff --git a/kernel/smp.c b/kernel/smp.c |
2461 | index 8448f8f..a210d13 100644 |
2462 | --- a/kernel/smp.c |
2463 | +++ b/kernel/smp.c |
2464 | @@ -440,7 +440,7 @@ void smp_call_function_many(const struct cpumask *mask, |
2465 | { |
2466 | struct call_function_data *data; |
2467 | unsigned long flags; |
2468 | - int cpu, next_cpu, this_cpu = smp_processor_id(); |
2469 | + int refs, cpu, next_cpu, this_cpu = smp_processor_id(); |
2470 | |
2471 | /* |
2472 | * Can deadlock when called with interrupts disabled. |
2473 | @@ -451,7 +451,7 @@ void smp_call_function_many(const struct cpumask *mask, |
2474 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
2475 | && !oops_in_progress); |
2476 | |
2477 | - /* So, what's a CPU they want? Ignoring this one. */ |
2478 | + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ |
2479 | cpu = cpumask_first_and(mask, cpu_online_mask); |
2480 | if (cpu == this_cpu) |
2481 | cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
2482 | @@ -473,22 +473,49 @@ void smp_call_function_many(const struct cpumask *mask, |
2483 | |
2484 | data = &__get_cpu_var(cfd_data); |
2485 | csd_lock(&data->csd); |
2486 | + |
2487 | + /* This BUG_ON verifies our reuse assertions and can be removed */ |
2488 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); |
2489 | |
2490 | + /* |
2491 | + * The global call function queue list add and delete are protected |
2492 | + * by a lock, but the list is traversed without any lock, relying |
2493 | + * on the rcu list add and delete to allow safe concurrent traversal. |
2494 | + * We reuse the call function data without waiting for any grace |
2495 | + * period after some other cpu removes it from the global queue. |
2496 | + * This means a cpu might find our data block as it is being |
2497 | + * filled out. |
2498 | + * |
2499 | + * We hold off the interrupt handler on the other cpu by |
2500 | + * ordering our writes to the cpu mask vs our setting of the |
2501 | + * refs counter. We assert only the cpu owning the data block |
2502 | + * will set a bit in cpumask, and each bit will only be cleared |
2503 | + * by the subject cpu. Each cpu must first find its bit is |
2504 | + * set and then check that refs is set indicating the element is |
2505 | + * ready to be processed, otherwise it must skip the entry. |
2506 | + * |
2507 | + * On the previous iteration refs was set to 0 by another cpu. |
2508 | + * To avoid the use of transitivity, set the counter to 0 here |
2509 | + * so the wmb will pair with the rmb in the interrupt handler. |
2510 | + */ |
2511 | + atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ |
2512 | + |
2513 | data->csd.func = func; |
2514 | data->csd.info = info; |
2515 | - cpumask_and(data->cpumask, mask, cpu_online_mask); |
2516 | - cpumask_clear_cpu(this_cpu, data->cpumask); |
2517 | |
2518 | - /* |
2519 | - * To ensure the interrupt handler gets an complete view |
2520 | - * we order the cpumask and refs writes and order the read |
2521 | - * of them in the interrupt handler. In addition we may |
2522 | - * only clear our own cpu bit from the mask. |
2523 | - */ |
2524 | + /* Ensure 0 refs is visible before mask. Also orders func and info */ |
2525 | smp_wmb(); |
2526 | |
2527 | - atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
2528 | + /* We rely on the "and" being processed before the store */ |
2529 | + cpumask_and(data->cpumask, mask, cpu_online_mask); |
2530 | + cpumask_clear_cpu(this_cpu, data->cpumask); |
2531 | + refs = cpumask_weight(data->cpumask); |
2532 | + |
2533 | + /* Some callers race with other cpus changing the passed mask */ |
2534 | + if (unlikely(!refs)) { |
2535 | + csd_unlock(&data->csd); |
2536 | + return; |
2537 | + } |
2538 | |
2539 | raw_spin_lock_irqsave(&call_function.lock, flags); |
2540 | /* |
2541 | @@ -497,6 +524,12 @@ void smp_call_function_many(const struct cpumask *mask, |
2542 | * will not miss any other list entries: |
2543 | */ |
2544 | list_add_rcu(&data->csd.list, &call_function.queue); |
2545 | + /* |
2546 | + * We rely on the wmb() in list_add_rcu to complete our writes |
2547 | + * to the cpumask before this write to refs, which indicates |
2548 | + * data is on the list and is ready to be processed. |
2549 | + */ |
2550 | + atomic_set(&data->refs, refs); |
2551 | raw_spin_unlock_irqrestore(&call_function.lock, flags); |
2552 | |
2553 | /* |
2554 | diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
2555 | index f3dadae..888b611 100644 |
2556 | --- a/kernel/trace/ftrace.c |
2557 | +++ b/kernel/trace/ftrace.c |
2558 | @@ -3328,7 +3328,7 @@ static int start_graph_tracing(void) |
2559 | /* The cpu_boot init_task->ret_stack will never be freed */ |
2560 | for_each_online_cpu(cpu) { |
2561 | if (!idle_task(cpu)->ret_stack) |
2562 | - ftrace_graph_init_task(idle_task(cpu)); |
2563 | + ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
2564 | } |
2565 | |
2566 | do { |
2567 | @@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void) |
2568 | mutex_unlock(&ftrace_lock); |
2569 | } |
2570 | |
2571 | +static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
2572 | + |
2573 | +static void |
2574 | +graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) |
2575 | +{ |
2576 | + atomic_set(&t->tracing_graph_pause, 0); |
2577 | + atomic_set(&t->trace_overrun, 0); |
2578 | + t->ftrace_timestamp = 0; |
2579 | + /* make curr_ret_stack visable before we add the ret_stack */ |
2580 | + smp_wmb(); |
2581 | + t->ret_stack = ret_stack; |
2582 | +} |
2583 | + |
2584 | +/* |
2585 | + * Allocate a return stack for the idle task. May be the first |
2586 | + * time through, or it may be done by CPU hotplug online. |
2587 | + */ |
2588 | +void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) |
2589 | +{ |
2590 | + t->curr_ret_stack = -1; |
2591 | + /* |
2592 | + * The idle task has no parent, it either has its own |
2593 | + * stack or no stack at all. |
2594 | + */ |
2595 | + if (t->ret_stack) |
2596 | + WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); |
2597 | + |
2598 | + if (ftrace_graph_active) { |
2599 | + struct ftrace_ret_stack *ret_stack; |
2600 | + |
2601 | + ret_stack = per_cpu(idle_ret_stack, cpu); |
2602 | + if (!ret_stack) { |
2603 | + ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH |
2604 | + * sizeof(struct ftrace_ret_stack), |
2605 | + GFP_KERNEL); |
2606 | + if (!ret_stack) |
2607 | + return; |
2608 | + per_cpu(idle_ret_stack, cpu) = ret_stack; |
2609 | + } |
2610 | + graph_init_task(t, ret_stack); |
2611 | + } |
2612 | +} |
2613 | + |
2614 | /* Allocate a return stack for newly created task */ |
2615 | void ftrace_graph_init_task(struct task_struct *t) |
2616 | { |
2617 | @@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t) |
2618 | GFP_KERNEL); |
2619 | if (!ret_stack) |
2620 | return; |
2621 | - atomic_set(&t->tracing_graph_pause, 0); |
2622 | - atomic_set(&t->trace_overrun, 0); |
2623 | - t->ftrace_timestamp = 0; |
2624 | - /* make curr_ret_stack visable before we add the ret_stack */ |
2625 | - smp_wmb(); |
2626 | - t->ret_stack = ret_stack; |
2627 | + graph_init_task(t, ret_stack); |
2628 | } |
2629 | } |
2630 | |
2631 | diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c |
2632 | index 37f8adb..63f60fc 100644 |
2633 | --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c |
2634 | +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c |
2635 | @@ -97,7 +97,7 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
2636 | |
2637 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); |
2638 | if (ret) |
2639 | - return ret; |
2640 | + return 0; |
2641 | |
2642 | ret = seq_printf(s, "secctx=%s ", secctx); |
2643 | |
2644 | diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c |
2645 | index 742a6dc..bce11a5 100644 |
2646 | --- a/net/netfilter/nf_conntrack_netlink.c |
2647 | +++ b/net/netfilter/nf_conntrack_netlink.c |
2648 | @@ -254,7 +254,7 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) |
2649 | |
2650 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); |
2651 | if (ret) |
2652 | - return ret; |
2653 | + return 0; |
2654 | |
2655 | ret = -1; |
2656 | nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED); |
2657 | @@ -453,16 +453,22 @@ ctnetlink_counters_size(const struct nf_conn *ct) |
2658 | ; |
2659 | } |
2660 | |
2661 | -#ifdef CONFIG_NF_CONNTRACK_SECMARK |
2662 | -static int ctnetlink_nlmsg_secctx_size(const struct nf_conn *ct) |
2663 | +static inline int |
2664 | +ctnetlink_secctx_size(const struct nf_conn *ct) |
2665 | { |
2666 | - int len; |
2667 | +#ifdef CONFIG_NF_CONNTRACK_SECMARK |
2668 | + int len, ret; |
2669 | |
2670 | - security_secid_to_secctx(ct->secmark, NULL, &len); |
2671 | + ret = security_secid_to_secctx(ct->secmark, NULL, &len); |
2672 | + if (ret) |
2673 | + return 0; |
2674 | |
2675 | - return sizeof(char) * len; |
2676 | -} |
2677 | + return nla_total_size(0) /* CTA_SECCTX */ |
2678 | + + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */ |
2679 | +#else |
2680 | + return 0; |
2681 | #endif |
2682 | +} |
2683 | |
2684 | static inline size_t |
2685 | ctnetlink_nlmsg_size(const struct nf_conn *ct) |
2686 | @@ -479,10 +485,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct) |
2687 | + nla_total_size(0) /* CTA_PROTOINFO */ |
2688 | + nla_total_size(0) /* CTA_HELP */ |
2689 | + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ |
2690 | -#ifdef CONFIG_NF_CONNTRACK_SECMARK |
2691 | - + nla_total_size(0) /* CTA_SECCTX */ |
2692 | - + nla_total_size(ctnetlink_nlmsg_secctx_size(ct)) /* CTA_SECCTX_NAME */ |
2693 | -#endif |
2694 | + + ctnetlink_secctx_size(ct) |
2695 | #ifdef CONFIG_NF_NAT_NEEDED |
2696 | + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ |
2697 | + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ |
2698 | diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c |
2699 | index 0fb6570..b4d7f0f 100644 |
2700 | --- a/net/netfilter/nf_conntrack_standalone.c |
2701 | +++ b/net/netfilter/nf_conntrack_standalone.c |
2702 | @@ -118,7 +118,7 @@ static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) |
2703 | |
2704 | ret = security_secid_to_secctx(ct->secmark, &secctx, &len); |
2705 | if (ret) |
2706 | - return ret; |
2707 | + return 0; |
2708 | |
2709 | ret = seq_printf(s, "secctx=%s ", secctx); |
2710 | |
2711 | diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c |
2712 | index 92ce94f..503fbbb 100644 |
2713 | --- a/net/sunrpc/clnt.c |
2714 | +++ b/net/sunrpc/clnt.c |
2715 | @@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) |
2716 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
2717 | rovr->tk_flags |= RPC_TASK_KILLED; |
2718 | rpc_exit(rovr, -EIO); |
2719 | - rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); |
2720 | + if (RPC_IS_QUEUED(rovr)) |
2721 | + rpc_wake_up_queued_task(rovr->tk_waitqueue, |
2722 | + rovr); |
2723 | } |
2724 | } |
2725 | spin_unlock(&clnt->cl_lock); |
2726 | diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c |
2727 | index 243fc09..168fb81 100644 |
2728 | --- a/net/sunrpc/sched.c |
2729 | +++ b/net/sunrpc/sched.c |
2730 | @@ -623,14 +623,12 @@ static void __rpc_execute(struct rpc_task *task) |
2731 | save_callback = task->tk_callback; |
2732 | task->tk_callback = NULL; |
2733 | save_callback(task); |
2734 | - } |
2735 | - |
2736 | - /* |
2737 | - * Perform the next FSM step. |
2738 | - * tk_action may be NULL when the task has been killed |
2739 | - * by someone else. |
2740 | - */ |
2741 | - if (!RPC_IS_QUEUED(task)) { |
2742 | + } else { |
2743 | + /* |
2744 | + * Perform the next FSM step. |
2745 | + * tk_action may be NULL when the task has been killed |
2746 | + * by someone else. |
2747 | + */ |
2748 | if (task->tk_action == NULL) |
2749 | break; |
2750 | task->tk_action(task); |
2751 | diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
2752 | index dfcab5a..3ad452b 100644 |
2753 | --- a/net/sunrpc/xprtsock.c |
2754 | +++ b/net/sunrpc/xprtsock.c |
2755 | @@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, |
2756 | } |
2757 | xs_reclassify_socket(family, sock); |
2758 | |
2759 | - if (xs_bind(transport, sock)) { |
2760 | + err = xs_bind(transport, sock); |
2761 | + if (err) { |
2762 | sock_release(sock); |
2763 | goto out; |
2764 | } |
2765 | diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c |
2766 | index 9d32f18..cb09f1f 100644 |
2767 | --- a/security/tomoyo/file.c |
2768 | +++ b/security/tomoyo/file.c |
2769 | @@ -927,7 +927,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, |
2770 | struct path *path, const int flag) |
2771 | { |
2772 | const u8 acc_mode = ACC_MODE(flag); |
2773 | - int error = -ENOMEM; |
2774 | + int error = 0; |
2775 | struct tomoyo_path_info buf; |
2776 | struct tomoyo_request_info r; |
2777 | int idx; |
2778 | @@ -938,9 +938,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, |
2779 | buf.name = NULL; |
2780 | r.mode = TOMOYO_CONFIG_DISABLED; |
2781 | idx = tomoyo_read_lock(); |
2782 | - if (!tomoyo_get_realpath(&buf, path)) |
2783 | - goto out; |
2784 | - error = 0; |
2785 | /* |
2786 | * If the filename is specified by "deny_rewrite" keyword, |
2787 | * we need to check "allow_rewrite" permission when the filename is not |
2788 | diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c |
2789 | index 12b44b0..a0da775 100644 |
2790 | --- a/sound/drivers/aloop.c |
2791 | +++ b/sound/drivers/aloop.c |
2792 | @@ -482,8 +482,9 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) |
2793 | cable->streams[SNDRV_PCM_STREAM_CAPTURE]; |
2794 | unsigned long delta_play = 0, delta_capt = 0; |
2795 | unsigned int running; |
2796 | + unsigned long flags; |
2797 | |
2798 | - spin_lock(&cable->lock); |
2799 | + spin_lock_irqsave(&cable->lock, flags); |
2800 | running = cable->running ^ cable->pause; |
2801 | if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { |
2802 | delta_play = jiffies - dpcm_play->last_jiffies; |
2803 | @@ -495,10 +496,8 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) |
2804 | dpcm_capt->last_jiffies += delta_capt; |
2805 | } |
2806 | |
2807 | - if (delta_play == 0 && delta_capt == 0) { |
2808 | - spin_unlock(&cable->lock); |
2809 | - return running; |
2810 | - } |
2811 | + if (delta_play == 0 && delta_capt == 0) |
2812 | + goto unlock; |
2813 | |
2814 | if (delta_play > delta_capt) { |
2815 | loopback_bytepos_update(dpcm_play, delta_play - delta_capt, |
2816 | @@ -510,14 +509,14 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) |
2817 | delta_capt = delta_play; |
2818 | } |
2819 | |
2820 | - if (delta_play == 0 && delta_capt == 0) { |
2821 | - spin_unlock(&cable->lock); |
2822 | - return running; |
2823 | - } |
2824 | + if (delta_play == 0 && delta_capt == 0) |
2825 | + goto unlock; |
2826 | + |
2827 | /* note delta_capt == delta_play at this moment */ |
2828 | loopback_bytepos_update(dpcm_capt, delta_capt, BYTEPOS_UPDATE_COPY); |
2829 | loopback_bytepos_update(dpcm_play, delta_play, BYTEPOS_UPDATE_POSONLY); |
2830 | - spin_unlock(&cable->lock); |
2831 | + unlock: |
2832 | + spin_unlock_irqrestore(&cable->lock, flags); |
2833 | return running; |
2834 | } |
2835 | |
2836 | diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c |
2837 | index 22dbd91..448dd01 100644 |
2838 | --- a/sound/pci/asihpi/hpioctl.c |
2839 | +++ b/sound/pci/asihpi/hpioctl.c |
2840 | @@ -155,6 +155,11 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
2841 | goto out; |
2842 | } |
2843 | |
2844 | + if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { |
2845 | + err = -EINVAL; |
2846 | + goto out; |
2847 | + } |
2848 | + |
2849 | pa = &adapters[hm->h.adapter_index]; |
2850 | hr->h.size = 0; |
2851 | if (hm->h.object == HPI_OBJ_SUBSYSTEM) { |
2852 | diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c |
2853 | index 1bff80c..b932154 100644 |
2854 | --- a/sound/pci/ctxfi/ctatc.c |
2855 | +++ b/sound/pci/ctxfi/ctatc.c |
2856 | @@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) |
2857 | mutex_lock(&atc->atc_mutex); |
2858 | dao->ops->get_spos(dao, &status); |
2859 | if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { |
2860 | - status &= ((~IEC958_AES3_CON_FS) << 24); |
2861 | + status &= ~(IEC958_AES3_CON_FS << 24); |
2862 | status |= (iec958_con_fs << 24); |
2863 | dao->ops->set_spos(dao, status); |
2864 | dao->ops->commit_write(dao); |
2865 | diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c |
2866 | index af56eb9..47d9ea9 100644 |
2867 | --- a/sound/pci/ctxfi/ctdaio.c |
2868 | +++ b/sound/pci/ctxfi/ctdaio.c |
2869 | @@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) |
2870 | if (!entry) |
2871 | return -ENOMEM; |
2872 | |
2873 | + dao->ops->clear_left_input(dao); |
2874 | /* Program master and conjugate resources */ |
2875 | input->ops->master(input); |
2876 | daio->rscl.ops->master(&daio->rscl); |
2877 | @@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) |
2878 | if (!entry) |
2879 | return -ENOMEM; |
2880 | |
2881 | + dao->ops->clear_right_input(dao); |
2882 | /* Program master and conjugate resources */ |
2883 | input->ops->master(input); |
2884 | daio->rscr.ops->master(&daio->rscr); |
2885 | diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c |
2886 | index 15c1e72..c3519ff 100644 |
2887 | --- a/sound/pci/ctxfi/ctmixer.c |
2888 | +++ b/sound/pci/ctxfi/ctmixer.c |
2889 | @@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, |
2890 | return 0; |
2891 | } |
2892 | |
2893 | -static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, |
2894 | - struct snd_ctl_elem_value *ucontrol) |
2895 | -{ |
2896 | - unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; |
2897 | - |
2898 | - ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; |
2899 | - ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; |
2900 | - ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; |
2901 | - ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; |
2902 | - |
2903 | - return 0; |
2904 | -} |
2905 | - |
2906 | static int ct_spdif_get(struct snd_kcontrol *kcontrol, |
2907 | struct snd_ctl_elem_value *ucontrol) |
2908 | { |
2909 | @@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, |
2910 | unsigned int status; |
2911 | |
2912 | atc->spdif_out_get_status(atc, &status); |
2913 | + |
2914 | + if (status == 0) |
2915 | + status = SNDRV_PCM_DEFAULT_CON_SPDIF; |
2916 | + |
2917 | ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; |
2918 | ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; |
2919 | ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; |
2920 | @@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { |
2921 | .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), |
2922 | .count = 1, |
2923 | .info = ct_spdif_info, |
2924 | - .get = ct_spdif_default_get, |
2925 | + .get = ct_spdif_get, |
2926 | .put = ct_spdif_put, |
2927 | .private_value = MIXER_IEC958_DEFAULT |
2928 | }; |
2929 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
2930 | index e61c87c..6e44994 100644 |
2931 | --- a/sound/pci/hda/patch_realtek.c |
2932 | +++ b/sound/pci/hda/patch_realtek.c |
2933 | @@ -393,6 +393,7 @@ struct alc_spec { |
2934 | /* other flags */ |
2935 | unsigned int no_analog :1; /* digital I/O only */ |
2936 | unsigned int dual_adc_switch:1; /* switch ADCs (for ALC275) */ |
2937 | + unsigned int single_input_src:1; |
2938 | int init_amp; |
2939 | int codec_variant; /* flag for other variants */ |
2940 | |
2941 | @@ -3798,6 +3799,8 @@ static struct hda_amp_list alc880_lg_loopbacks[] = { |
2942 | * Common callbacks |
2943 | */ |
2944 | |
2945 | +static void alc_init_special_input_src(struct hda_codec *codec); |
2946 | + |
2947 | static int alc_init(struct hda_codec *codec) |
2948 | { |
2949 | struct alc_spec *spec = codec->spec; |
2950 | @@ -3808,6 +3811,7 @@ static int alc_init(struct hda_codec *codec) |
2951 | |
2952 | for (i = 0; i < spec->num_init_verbs; i++) |
2953 | snd_hda_sequence_write(codec, spec->init_verbs[i]); |
2954 | + alc_init_special_input_src(codec); |
2955 | |
2956 | if (spec->init_hook) |
2957 | spec->init_hook(codec); |
2958 | @@ -5441,6 +5445,7 @@ static void fixup_single_adc(struct hda_codec *codec) |
2959 | spec->capsrc_nids += i; |
2960 | spec->adc_nids += i; |
2961 | spec->num_adc_nids = 1; |
2962 | + spec->single_input_src = 1; |
2963 | } |
2964 | } |
2965 | |
2966 | @@ -5452,6 +5457,16 @@ static void fixup_dual_adc_switch(struct hda_codec *codec) |
2967 | init_capsrc_for_pin(codec, spec->int_mic.pin); |
2968 | } |
2969 | |
2970 | +/* initialize some special cases for input sources */ |
2971 | +static void alc_init_special_input_src(struct hda_codec *codec) |
2972 | +{ |
2973 | + struct alc_spec *spec = codec->spec; |
2974 | + if (spec->dual_adc_switch) |
2975 | + fixup_dual_adc_switch(codec); |
2976 | + else if (spec->single_input_src) |
2977 | + init_capsrc_for_pin(codec, spec->autocfg.inputs[0].pin); |
2978 | +} |
2979 | + |
2980 | static void set_capture_mixer(struct hda_codec *codec) |
2981 | { |
2982 | struct alc_spec *spec = codec->spec; |
2983 | @@ -5467,7 +5482,7 @@ static void set_capture_mixer(struct hda_codec *codec) |
2984 | int mux = 0; |
2985 | int num_adcs = spec->num_adc_nids; |
2986 | if (spec->dual_adc_switch) |
2987 | - fixup_dual_adc_switch(codec); |
2988 | + num_adcs = 1; |
2989 | else if (spec->auto_mic) |
2990 | fixup_automic_adc(codec); |
2991 | else if (spec->input_mux) { |
2992 | @@ -5476,8 +5491,6 @@ static void set_capture_mixer(struct hda_codec *codec) |
2993 | else if (spec->input_mux->num_items == 1) |
2994 | fixup_single_adc(codec); |
2995 | } |
2996 | - if (spec->dual_adc_switch) |
2997 | - num_adcs = 1; |
2998 | spec->cap_mixer = caps[mux][num_adcs - 1]; |
2999 | } |
3000 | } |
3001 | @@ -10736,23 +10749,28 @@ static void alc882_auto_init_hp_out(struct hda_codec *codec) |
3002 | hda_nid_t pin, dac; |
3003 | int i; |
3004 | |
3005 | - for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { |
3006 | - pin = spec->autocfg.hp_pins[i]; |
3007 | - if (!pin) |
3008 | - break; |
3009 | - dac = spec->multiout.hp_nid; |
3010 | - if (!dac) |
3011 | - dac = spec->multiout.dac_nids[0]; /* to front */ |
3012 | - alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); |
3013 | + if (spec->autocfg.line_out_type != AUTO_PIN_HP_OUT) { |
3014 | + for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { |
3015 | + pin = spec->autocfg.hp_pins[i]; |
3016 | + if (!pin) |
3017 | + break; |
3018 | + dac = spec->multiout.hp_nid; |
3019 | + if (!dac) |
3020 | + dac = spec->multiout.dac_nids[0]; /* to front */ |
3021 | + alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); |
3022 | + } |
3023 | } |
3024 | - for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { |
3025 | - pin = spec->autocfg.speaker_pins[i]; |
3026 | - if (!pin) |
3027 | - break; |
3028 | - dac = spec->multiout.extra_out_nid[0]; |
3029 | - if (!dac) |
3030 | - dac = spec->multiout.dac_nids[0]; /* to front */ |
3031 | - alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); |
3032 | + |
3033 | + if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT) { |
3034 | + for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { |
3035 | + pin = spec->autocfg.speaker_pins[i]; |
3036 | + if (!pin) |
3037 | + break; |
3038 | + dac = spec->multiout.extra_out_nid[0]; |
3039 | + if (!dac) |
3040 | + dac = spec->multiout.dac_nids[0]; /* to front */ |
3041 | + alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); |
3042 | + } |
3043 | } |
3044 | } |
3045 | |
3046 | diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c |
3047 | index fc16beb..797a16c 100644 |
3048 | --- a/sound/pci/hda/patch_sigmatel.c |
3049 | +++ b/sound/pci/hda/patch_sigmatel.c |
3050 | @@ -749,7 +749,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e |
3051 | struct sigmatel_spec *spec = codec->spec; |
3052 | unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); |
3053 | const struct hda_input_mux *imux = spec->input_mux; |
3054 | - unsigned int idx, prev_idx; |
3055 | + unsigned int idx, prev_idx, didx; |
3056 | |
3057 | idx = ucontrol->value.enumerated.item[0]; |
3058 | if (idx >= imux->num_items) |
3059 | @@ -761,7 +761,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e |
3060 | snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, |
3061 | AC_VERB_SET_CONNECT_SEL, |
3062 | imux->items[idx].index); |
3063 | - if (prev_idx >= spec->num_analog_muxes) { |
3064 | + if (prev_idx >= spec->num_analog_muxes && |
3065 | + spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { |
3066 | imux = spec->dinput_mux; |
3067 | /* 0 = analog */ |
3068 | snd_hda_codec_write_cache(codec, |
3069 | @@ -771,9 +772,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e |
3070 | } |
3071 | } else { |
3072 | imux = spec->dinput_mux; |
3073 | + /* first dimux item is hardcoded to select analog imux, |
3074 | + * so lets skip it |
3075 | + */ |
3076 | + didx = idx - spec->num_analog_muxes + 1; |
3077 | snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, |
3078 | AC_VERB_SET_CONNECT_SEL, |
3079 | - imux->items[idx - 1].index); |
3080 | + imux->items[didx].index); |
3081 | } |
3082 | spec->cur_mux[adc_idx] = idx; |
3083 | return 1; |
3084 | diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c |
3085 | index 13b979a..a008aeb 100644 |
3086 | --- a/sound/soc/codecs/wm8978.c |
3087 | +++ b/sound/soc/codecs/wm8978.c |
3088 | @@ -147,18 +147,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { |
3089 | SOC_SINGLE("DAC Playback Limiter Threshold", |
3090 | WM8978_DAC_LIMITER_2, 4, 7, 0), |
3091 | SOC_SINGLE("DAC Playback Limiter Boost", |
3092 | - WM8978_DAC_LIMITER_2, 0, 15, 0), |
3093 | + WM8978_DAC_LIMITER_2, 0, 12, 0), |
3094 | |
3095 | SOC_ENUM("ALC Enable Switch", alc1), |
3096 | SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), |
3097 | SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), |
3098 | |
3099 | - SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0), |
3100 | + SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0), |
3101 | SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), |
3102 | |
3103 | SOC_ENUM("ALC Capture Mode", alc3), |
3104 | - SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0), |
3105 | - SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0), |
3106 | + SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0), |
3107 | + SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0), |
3108 | |
3109 | SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), |
3110 | SOC_SINGLE("ALC Capture Noise Gate Threshold", |
3111 | @@ -213,8 +213,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { |
3112 | WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), |
3113 | |
3114 | /* DAC / ADC oversampling */ |
3115 | - SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0), |
3116 | - SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0), |
3117 | + SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, |
3118 | + 5, 1, 0), |
3119 | + SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, |
3120 | + 5, 1, 0), |
3121 | }; |
3122 | |
3123 | /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ |
3124 | diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN |
3125 | index 97d7656..26d4d3f 100755 |
3126 | --- a/tools/perf/util/PERF-VERSION-GEN |
3127 | +++ b/tools/perf/util/PERF-VERSION-GEN |
3128 | @@ -23,10 +23,10 @@ if test -d ../../.git -o -f ../../.git && |
3129 | then |
3130 | VN=$(echo "$VN" | sed -e 's/-/./g'); |
3131 | else |
3132 | - eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '` |
3133 | - eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '` |
3134 | - eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '` |
3135 | - eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '` |
3136 | + eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') |
3137 | + eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') |
3138 | + eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') |
3139 | + eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ') |
3140 | |
3141 | VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}" |
3142 | fi |