Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.33-r4/0104-2.6.33.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1286 - (show annotations) (download)
Thu Feb 17 15:05:15 2011 UTC (13 years, 2 months ago) by niro
File size: 52581 byte(s)
2.6.33-alx-r4: enabled usbserial generic module for accu-chek II devices
1 diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
2 index 0d07513..bf241be 100644
3 --- a/Documentation/filesystems/proc.txt
4 +++ b/Documentation/filesystems/proc.txt
5 @@ -308,7 +308,7 @@ address perms offset dev inode pathname
6 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
7 0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
8 a7cb1000-a7cb2000 ---p 00000000 00:00 0
9 -a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4]
10 +a7cb2000-a7eb2000 rw-p 00000000 00:00 0
11 a7eb2000-a7eb3000 ---p 00000000 00:00 0
12 a7eb3000-a7ed5000 rw-p 00000000 00:00 0
13 a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
14 @@ -344,7 +344,6 @@ is not associated with a file:
15 [stack] = the stack of the main process
16 [vdso] = the "virtual dynamic shared object",
17 the kernel system call handler
18 - [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
19
20 or if empty, the mapping is anonymous.
21
22 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
23 index 9f4c9d4..bd100fc 100644
24 --- a/arch/powerpc/include/asm/hw_irq.h
25 +++ b/arch/powerpc/include/asm/hw_irq.h
26 @@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags)
27 */
28 struct irq_chip;
29
30 -#ifdef CONFIG_PERF_EVENTS
31 -
32 -#ifdef CONFIG_PPC64
33 -static inline unsigned long test_perf_event_pending(void)
34 -{
35 - unsigned long x;
36 -
37 - asm volatile("lbz %0,%1(13)"
38 - : "=r" (x)
39 - : "i" (offsetof(struct paca_struct, perf_event_pending)));
40 - return x;
41 -}
42 -
43 -static inline void set_perf_event_pending(void)
44 -{
45 - asm volatile("stb %0,%1(13)" : :
46 - "r" (1),
47 - "i" (offsetof(struct paca_struct, perf_event_pending)));
48 -}
49 -
50 -static inline void clear_perf_event_pending(void)
51 -{
52 - asm volatile("stb %0,%1(13)" : :
53 - "r" (0),
54 - "i" (offsetof(struct paca_struct, perf_event_pending)));
55 -}
56 -#endif /* CONFIG_PPC64 */
57 -
58 -#else /* CONFIG_PERF_EVENTS */
59 -
60 -static inline unsigned long test_perf_event_pending(void)
61 -{
62 - return 0;
63 -}
64 -
65 -static inline void clear_perf_event_pending(void) {}
66 -#endif /* CONFIG_PERF_EVENTS */
67 -
68 #endif /* __KERNEL__ */
69 #endif /* _ASM_POWERPC_HW_IRQ_H */
70 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
71 index a6c2b63..11d0668 100644
72 --- a/arch/powerpc/kernel/asm-offsets.c
73 +++ b/arch/powerpc/kernel/asm-offsets.c
74 @@ -133,7 +133,6 @@ int main(void)
75 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
76 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
77 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
78 - DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
79 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
80 #ifdef CONFIG_PPC_MM_SLICES
81 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
82 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
83 index bdcb557..afbf400 100644
84 --- a/arch/powerpc/kernel/entry_64.S
85 +++ b/arch/powerpc/kernel/entry_64.S
86 @@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
87 2:
88 TRACE_AND_RESTORE_IRQ(r5);
89
90 -#ifdef CONFIG_PERF_EVENTS
91 - /* check paca->perf_event_pending if we're enabling ints */
92 - lbz r3,PACAPERFPEND(r13)
93 - and. r3,r3,r5
94 - beq 27f
95 - bl .perf_event_do_pending
96 -27:
97 -#endif /* CONFIG_PERF_EVENTS */
98 -
99 /* extract EE bit and use it to restore paca->hard_enabled */
100 ld r3,_MSR(r1)
101 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
102 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
103 index 9040330..dee83b8 100644
104 --- a/arch/powerpc/kernel/irq.c
105 +++ b/arch/powerpc/kernel/irq.c
106 @@ -53,7 +53,6 @@
107 #include <linux/bootmem.h>
108 #include <linux/pci.h>
109 #include <linux/debugfs.h>
110 -#include <linux/perf_event.h>
111
112 #include <asm/uaccess.h>
113 #include <asm/system.h>
114 @@ -143,11 +142,6 @@ notrace void raw_local_irq_restore(unsigned long en)
115 }
116 #endif /* CONFIG_PPC_STD_MMU_64 */
117
118 - if (test_perf_event_pending()) {
119 - clear_perf_event_pending();
120 - perf_event_do_pending();
121 - }
122 -
123 /*
124 * if (get_paca()->hard_enabled) return;
125 * But again we need to take care that gcc gets hard_enabled directly
126 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
127 index 6c6093d..6f174e7 100644
128 --- a/arch/powerpc/kernel/time.c
129 +++ b/arch/powerpc/kernel/time.c
130 @@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
131 }
132 #endif /* CONFIG_PPC_ISERIES */
133
134 -#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
135 -DEFINE_PER_CPU(u8, perf_event_pending);
136 +#ifdef CONFIG_PERF_EVENTS
137
138 -void set_perf_event_pending(void)
139 +/*
140 + * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
141 + */
142 +#ifdef CONFIG_PPC64
143 +static inline unsigned long test_perf_event_pending(void)
144 {
145 - get_cpu_var(perf_event_pending) = 1;
146 - set_dec(1);
147 - put_cpu_var(perf_event_pending);
148 + unsigned long x;
149 +
150 + asm volatile("lbz %0,%1(13)"
151 + : "=r" (x)
152 + : "i" (offsetof(struct paca_struct, perf_event_pending)));
153 + return x;
154 }
155
156 +static inline void set_perf_event_pending_flag(void)
157 +{
158 + asm volatile("stb %0,%1(13)" : :
159 + "r" (1),
160 + "i" (offsetof(struct paca_struct, perf_event_pending)));
161 +}
162 +
163 +static inline void clear_perf_event_pending(void)
164 +{
165 + asm volatile("stb %0,%1(13)" : :
166 + "r" (0),
167 + "i" (offsetof(struct paca_struct, perf_event_pending)));
168 +}
169 +
170 +#else /* 32-bit */
171 +
172 +DEFINE_PER_CPU(u8, perf_event_pending);
173 +
174 +#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
175 #define test_perf_event_pending() __get_cpu_var(perf_event_pending)
176 #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
177
178 -#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
179 +#endif /* 32 vs 64 bit */
180 +
181 +void set_perf_event_pending(void)
182 +{
183 + preempt_disable();
184 + set_perf_event_pending_flag();
185 + set_dec(1);
186 + preempt_enable();
187 +}
188 +
189 +#else /* CONFIG_PERF_EVENTS */
190
191 #define test_perf_event_pending() 0
192 #define clear_perf_event_pending()
193
194 -#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
195 +#endif /* CONFIG_PERF_EVENTS */
196
197 /*
198 * For iSeries shared processors, we have to let the hypervisor
199 @@ -580,10 +615,6 @@ void timer_interrupt(struct pt_regs * regs)
200 set_dec(DECREMENTER_MAX);
201
202 #ifdef CONFIG_PPC32
203 - if (test_perf_event_pending()) {
204 - clear_perf_event_pending();
205 - perf_event_do_pending();
206 - }
207 if (atomic_read(&ppc_n_lost_interrupts) != 0)
208 do_IRQ(regs);
209 #endif
210 @@ -602,6 +633,11 @@ void timer_interrupt(struct pt_regs * regs)
211
212 calculate_steal_time();
213
214 + if (test_perf_event_pending()) {
215 + clear_perf_event_pending();
216 + perf_event_do_pending();
217 + }
218 +
219 #ifdef CONFIG_PPC_ISERIES
220 if (firmware_has_feature(FW_FEATURE_ISERIES))
221 get_lppaca()->int_dword.fields.decr_int = 0;
222 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
223 index 7cf4642..11e94de 100644
224 --- a/arch/s390/kernel/ptrace.c
225 +++ b/arch/s390/kernel/ptrace.c
226 @@ -640,7 +640,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
227
228 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
229 {
230 - long ret;
231 + long ret = 0;
232
233 /* Do the secure computing check first. */
234 secure_computing(regs->gprs[2]);
235 @@ -649,7 +649,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
236 * The sysc_tracesys code in entry.S stored the system
237 * call number to gprs[2].
238 */
239 - ret = regs->gprs[2];
240 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
241 (tracehook_report_syscall_entry(regs) ||
242 regs->gprs[2] >= NR_syscalls)) {
243 @@ -671,7 +670,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
244 regs->gprs[2], regs->orig_gpr2,
245 regs->gprs[3], regs->gprs[4],
246 regs->gprs[5]);
247 - return ret;
248 + return ret ?: regs->gprs[2];
249 }
250
251 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
252 diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
253 index f70e600..af00bd1 100644
254 --- a/arch/x86/include/asm/k8.h
255 +++ b/arch/x86/include/asm/k8.h
256 @@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
257 extern int k8_scan_nodes(void);
258
259 #ifdef CONFIG_K8_NB
260 +extern int num_k8_northbridges;
261 +
262 static inline struct pci_dev *node_to_k8_nb_misc(int node)
263 {
264 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
265 }
266 +
267 #else
268 +#define num_k8_northbridges 0
269 +
270 static inline struct pci_dev *node_to_k8_nb_misc(int node)
271 {
272 return NULL;
273 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
274 index d440123..581924b 100644
275 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
276 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
277 @@ -338,6 +338,10 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
278 (boot_cpu_data.x86_mask < 0x1)))
279 return;
280
281 + /* not in virtualized environments */
282 + if (num_k8_northbridges == 0)
283 + return;
284 +
285 this_leaf->can_disable = true;
286 this_leaf->l3_indices = amd_calc_l3_indices();
287 }
288 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
289 index 999c8a6..0571b72 100644
290 --- a/arch/x86/kernel/process.c
291 +++ b/arch/x86/kernel/process.c
292 @@ -539,11 +539,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
293 * check OSVW bit for CPUs that are not affected
294 * by erratum #400
295 */
296 - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
297 - if (val >= 2) {
298 - rdmsrl(MSR_AMD64_OSVW_STATUS, val);
299 - if (!(val & BIT(1)))
300 - goto no_c1e_idle;
301 + if (cpu_has(c, X86_FEATURE_OSVW)) {
302 + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
303 + if (val >= 2) {
304 + rdmsrl(MSR_AMD64_OSVW_STATUS, val);
305 + if (!(val & BIT(1)))
306 + goto no_c1e_idle;
307 + }
308 }
309 return 1;
310 }
311 diff --git a/crypto/authenc.c b/crypto/authenc.c
312 index 4d6f49a..0d54de9 100644
313 --- a/crypto/authenc.c
314 +++ b/crypto/authenc.c
315 @@ -46,6 +46,12 @@ struct authenc_request_ctx {
316 char tail[];
317 };
318
319 +static void authenc_request_complete(struct aead_request *req, int err)
320 +{
321 + if (err != -EINPROGRESS)
322 + aead_request_complete(req, err);
323 +}
324 +
325 static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
326 unsigned int keylen)
327 {
328 @@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
329 crypto_aead_authsize(authenc), 1);
330
331 out:
332 - aead_request_complete(req, err);
333 + authenc_request_complete(req, err);
334 }
335
336 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
337 @@ -208,7 +214,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
338 err = crypto_ablkcipher_decrypt(abreq);
339
340 out:
341 - aead_request_complete(req, err);
342 + authenc_request_complete(req, err);
343 }
344
345 static void authenc_verify_ahash_done(struct crypto_async_request *areq,
346 @@ -245,7 +251,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
347 err = crypto_ablkcipher_decrypt(abreq);
348
349 out:
350 - aead_request_complete(req, err);
351 + authenc_request_complete(req, err);
352 }
353
354 static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
355 @@ -379,7 +385,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
356 err = crypto_authenc_genicv(areq, iv, 0);
357 }
358
359 - aead_request_complete(areq, err);
360 + authenc_request_complete(areq, err);
361 }
362
363 static int crypto_authenc_encrypt(struct aead_request *req)
364 @@ -418,7 +424,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
365 err = crypto_authenc_genicv(areq, greq->giv, 0);
366 }
367
368 - aead_request_complete(areq, err);
369 + authenc_request_complete(areq, err);
370 }
371
372 static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
373 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
374 index 7c85265..9ed9292 100644
375 --- a/drivers/acpi/sleep.c
376 +++ b/drivers/acpi/sleep.c
377 @@ -475,101 +475,13 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
378 },
379 {
380 .callback = init_set_sci_en_on_resume,
381 - .ident = "Lenovo ThinkPad X201",
382 + .ident = "Lenovo ThinkPad X201[s]",
383 .matches = {
384 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
385 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
386 },
387 },
388 {
389 - .callback = init_set_sci_en_on_resume,
390 - .ident = "Lenovo ThinkPad X201",
391 - .matches = {
392 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
393 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
394 - },
395 - },
396 - {
397 - .callback = init_set_sci_en_on_resume,
398 - .ident = "Lenovo ThinkPad T410",
399 - .matches = {
400 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
401 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
402 - },
403 - },
404 - {
405 - .callback = init_set_sci_en_on_resume,
406 - .ident = "Lenovo ThinkPad T510",
407 - .matches = {
408 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
409 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
410 - },
411 - },
412 - {
413 - .callback = init_set_sci_en_on_resume,
414 - .ident = "Lenovo ThinkPad W510",
415 - .matches = {
416 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
417 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
418 - },
419 - },
420 - {
421 - .callback = init_set_sci_en_on_resume,
422 - .ident = "Lenovo ThinkPad X201",
423 - .matches = {
424 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
425 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
426 - },
427 - },
428 - {
429 - .callback = init_set_sci_en_on_resume,
430 - .ident = "Lenovo ThinkPad X201",
431 - .matches = {
432 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
433 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
434 - },
435 - },
436 - {
437 - .callback = init_set_sci_en_on_resume,
438 - .ident = "Lenovo ThinkPad T410",
439 - .matches = {
440 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
441 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
442 - },
443 - },
444 - {
445 - .callback = init_set_sci_en_on_resume,
446 - .ident = "Lenovo ThinkPad T510",
447 - .matches = {
448 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
449 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
450 - },
451 - },
452 - {
453 - .callback = init_set_sci_en_on_resume,
454 - .ident = "Lenovo ThinkPad W510",
455 - .matches = {
456 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
457 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
458 - },
459 - },
460 - {
461 - .callback = init_set_sci_en_on_resume,
462 - .ident = "Lenovo ThinkPad X201",
463 - .matches = {
464 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
466 - },
467 - },
468 - {
469 - .callback = init_set_sci_en_on_resume,
470 - .ident = "Lenovo ThinkPad X201",
471 - .matches = {
472 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
474 - },
475 - },
476 - {
477 .callback = init_old_suspend_ordering,
478 .ident = "Panasonic CF51-2L",
479 .matches = {
480 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
481 index 76253cf..9af6766 100644
482 --- a/drivers/char/tty_io.c
483 +++ b/drivers/char/tty_io.c
484 @@ -1875,6 +1875,7 @@ got_driver:
485 */
486 if (filp->f_op == &hung_up_tty_fops)
487 filp->f_op = &tty_fops;
488 + unlock_kernel();
489 goto retry_open;
490 }
491 unlock_kernel();
492 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
493 index cf4cb3e..4746bfe 100644
494 --- a/drivers/gpu/drm/i915/i915_drv.c
495 +++ b/drivers/gpu/drm/i915/i915_drv.c
496 @@ -79,14 +79,14 @@ const static struct intel_device_info intel_i915g_info = {
497 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
498 };
499 const static struct intel_device_info intel_i915gm_info = {
500 - .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
501 + .is_i9xx = 1, .is_mobile = 1,
502 .cursor_needs_physical = 1,
503 };
504 const static struct intel_device_info intel_i945g_info = {
505 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
506 };
507 const static struct intel_device_info intel_i945gm_info = {
508 - .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
509 + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
510 .has_hotplug = 1, .cursor_needs_physical = 1,
511 };
512
513 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
514 index 16ce3ba..0b33757 100644
515 --- a/drivers/gpu/drm/i915/i915_drv.h
516 +++ b/drivers/gpu/drm/i915/i915_drv.h
517 @@ -206,11 +206,14 @@ typedef struct drm_i915_private {
518
519 drm_dma_handle_t *status_page_dmah;
520 void *hw_status_page;
521 + void *seqno_page;
522 dma_addr_t dma_status_page;
523 uint32_t counter;
524 unsigned int status_gfx_addr;
525 + unsigned int seqno_gfx_addr;
526 drm_local_map_t hws_map;
527 struct drm_gem_object *hws_obj;
528 + struct drm_gem_object *seqno_obj;
529 struct drm_gem_object *pwrctx;
530
531 struct resource mch_res;
532 @@ -1090,6 +1093,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
533
534 #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
535 IS_GEN6(dev))
536 +#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
537
538 #define PRIMARY_RINGBUFFER_SIZE (128*1024)
539
540 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
541 index 6458400..c00c978 100644
542 --- a/drivers/gpu/drm/i915/i915_gem.c
543 +++ b/drivers/gpu/drm/i915/i915_gem.c
544 @@ -1559,6 +1559,13 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
545 i915_verify_inactive(dev, __FILE__, __LINE__);
546 }
547
548 +#define PIPE_CONTROL_FLUSH(addr) \
549 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
550 + PIPE_CONTROL_DEPTH_STALL); \
551 + OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
552 + OUT_RING(0); \
553 + OUT_RING(0); \
554 +
555 /**
556 * Creates a new sequence number, emitting a write of it to the status page
557 * plus an interrupt, which will trigger i915_user_interrupt_handler.
558 @@ -1593,13 +1600,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
559 if (dev_priv->mm.next_gem_seqno == 0)
560 dev_priv->mm.next_gem_seqno++;
561
562 - BEGIN_LP_RING(4);
563 - OUT_RING(MI_STORE_DWORD_INDEX);
564 - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
565 - OUT_RING(seqno);
566 + if (HAS_PIPE_CONTROL(dev)) {
567 + u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
568
569 - OUT_RING(MI_USER_INTERRUPT);
570 - ADVANCE_LP_RING();
571 + /*
572 + * Workaround qword write incoherence by flushing the
573 + * PIPE_NOTIFY buffers out to memory before requesting
574 + * an interrupt.
575 + */
576 + BEGIN_LP_RING(32);
577 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
578 + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
579 + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
580 + OUT_RING(seqno);
581 + OUT_RING(0);
582 + PIPE_CONTROL_FLUSH(scratch_addr);
583 + scratch_addr += 128; /* write to separate cachelines */
584 + PIPE_CONTROL_FLUSH(scratch_addr);
585 + scratch_addr += 128;
586 + PIPE_CONTROL_FLUSH(scratch_addr);
587 + scratch_addr += 128;
588 + PIPE_CONTROL_FLUSH(scratch_addr);
589 + scratch_addr += 128;
590 + PIPE_CONTROL_FLUSH(scratch_addr);
591 + scratch_addr += 128;
592 + PIPE_CONTROL_FLUSH(scratch_addr);
593 + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
594 + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
595 + PIPE_CONTROL_NOTIFY);
596 + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
597 + OUT_RING(seqno);
598 + OUT_RING(0);
599 + ADVANCE_LP_RING();
600 + } else {
601 + BEGIN_LP_RING(4);
602 + OUT_RING(MI_STORE_DWORD_INDEX);
603 + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
604 + OUT_RING(seqno);
605 +
606 + OUT_RING(MI_USER_INTERRUPT);
607 + ADVANCE_LP_RING();
608 + }
609
610 DRM_DEBUG_DRIVER("%d\n", seqno);
611
612 @@ -1744,7 +1785,10 @@ i915_get_gem_seqno(struct drm_device *dev)
613 {
614 drm_i915_private_t *dev_priv = dev->dev_private;
615
616 - return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
617 + if (HAS_PIPE_CONTROL(dev))
618 + return ((volatile u32 *)(dev_priv->seqno_page))[0];
619 + else
620 + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
621 }
622
623 /**
624 @@ -4576,6 +4620,49 @@ i915_gem_idle(struct drm_device *dev)
625 return 0;
626 }
627
628 +/*
629 + * 965+ support PIPE_CONTROL commands, which provide finer grained control
630 + * over cache flushing.
631 + */
632 +static int
633 +i915_gem_init_pipe_control(struct drm_device *dev)
634 +{
635 + drm_i915_private_t *dev_priv = dev->dev_private;
636 + struct drm_gem_object *obj;
637 + struct drm_i915_gem_object *obj_priv;
638 + int ret;
639 +
640 + obj = drm_gem_object_alloc(dev, 4096);
641 + if (obj == NULL) {
642 + DRM_ERROR("Failed to allocate seqno page\n");
643 + ret = -ENOMEM;
644 + goto err;
645 + }
646 + obj_priv = obj->driver_private;
647 + obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
648 +
649 + ret = i915_gem_object_pin(obj, 4096);
650 + if (ret)
651 + goto err_unref;
652 +
653 + dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
654 + dev_priv->seqno_page = kmap(obj_priv->pages[0]);
655 + if (dev_priv->seqno_page == NULL)
656 + goto err_unpin;
657 +
658 + dev_priv->seqno_obj = obj;
659 + memset(dev_priv->seqno_page, 0, PAGE_SIZE);
660 +
661 + return 0;
662 +
663 +err_unpin:
664 + i915_gem_object_unpin(obj);
665 +err_unref:
666 + drm_gem_object_unreference(obj);
667 +err:
668 + return ret;
669 +}
670 +
671 static int
672 i915_gem_init_hws(struct drm_device *dev)
673 {
674 @@ -4593,7 +4680,8 @@ i915_gem_init_hws(struct drm_device *dev)
675 obj = drm_gem_object_alloc(dev, 4096);
676 if (obj == NULL) {
677 DRM_ERROR("Failed to allocate status page\n");
678 - return -ENOMEM;
679 + ret = -ENOMEM;
680 + goto err;
681 }
682 obj_priv = obj->driver_private;
683 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
684 @@ -4601,7 +4689,7 @@ i915_gem_init_hws(struct drm_device *dev)
685 ret = i915_gem_object_pin(obj, 4096);
686 if (ret != 0) {
687 drm_gem_object_unreference(obj);
688 - return ret;
689 + goto err_unref;
690 }
691
692 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
693 @@ -4610,10 +4698,16 @@ i915_gem_init_hws(struct drm_device *dev)
694 if (dev_priv->hw_status_page == NULL) {
695 DRM_ERROR("Failed to map status page.\n");
696 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
697 - i915_gem_object_unpin(obj);
698 - drm_gem_object_unreference(obj);
699 - return -EINVAL;
700 + ret = -EINVAL;
701 + goto err_unpin;
702 }
703 +
704 + if (HAS_PIPE_CONTROL(dev)) {
705 + ret = i915_gem_init_pipe_control(dev);
706 + if (ret)
707 + goto err_unpin;
708 + }
709 +
710 dev_priv->hws_obj = obj;
711 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
712 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
713 @@ -4621,6 +4715,30 @@ i915_gem_init_hws(struct drm_device *dev)
714 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
715
716 return 0;
717 +
718 +err_unpin:
719 + i915_gem_object_unpin(obj);
720 +err_unref:
721 + drm_gem_object_unreference(obj);
722 +err:
723 + return 0;
724 +}
725 +
726 +static void
727 +i915_gem_cleanup_pipe_control(struct drm_device *dev)
728 +{
729 + drm_i915_private_t *dev_priv = dev->dev_private;
730 + struct drm_gem_object *obj;
731 + struct drm_i915_gem_object *obj_priv;
732 +
733 + obj = dev_priv->seqno_obj;
734 + obj_priv = obj->driver_private;
735 + kunmap(obj_priv->pages[0]);
736 + i915_gem_object_unpin(obj);
737 + drm_gem_object_unreference(obj);
738 + dev_priv->seqno_obj = NULL;
739 +
740 + dev_priv->seqno_page = NULL;
741 }
742
743 static void
744 @@ -4644,6 +4762,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
745 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
746 dev_priv->hw_status_page = NULL;
747
748 + if (HAS_PIPE_CONTROL(dev))
749 + i915_gem_cleanup_pipe_control(dev);
750 +
751 /* Write high address into HWS_PGA when disabling. */
752 I915_WRITE(HWS_PGA, 0x1ffff000);
753 }
754 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
755 index 032f667..d6466d5 100644
756 --- a/drivers/gpu/drm/i915/i915_irq.c
757 +++ b/drivers/gpu/drm/i915/i915_irq.c
758 @@ -297,7 +297,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
759 READ_BREADCRUMB(dev_priv);
760 }
761
762 - if (gt_iir & GT_USER_INTERRUPT) {
763 + if (gt_iir & GT_PIPE_NOTIFY) {
764 u32 seqno = i915_get_gem_seqno(dev);
765 dev_priv->mm.irq_gem_seqno = seqno;
766 trace_i915_gem_request_complete(dev, seqno);
767 @@ -738,7 +738,7 @@ void i915_user_irq_get(struct drm_device *dev)
768 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
769 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
770 if (HAS_PCH_SPLIT(dev))
771 - ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
772 + ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
773 else
774 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
775 }
776 @@ -754,7 +754,7 @@ void i915_user_irq_put(struct drm_device *dev)
777 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
778 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
779 if (HAS_PCH_SPLIT(dev))
780 - ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
781 + ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
782 else
783 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
784 }
785 @@ -1034,7 +1034,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
786 /* enable kind of interrupts always enabled */
787 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
788 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
789 - u32 render_mask = GT_USER_INTERRUPT;
790 + u32 render_mask = GT_PIPE_NOTIFY;
791 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
792 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
793
794 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
795 index fd95bdf..30a2322 100644
796 --- a/drivers/gpu/drm/i915/i915_reg.h
797 +++ b/drivers/gpu/drm/i915/i915_reg.h
798 @@ -210,6 +210,16 @@
799 #define ASYNC_FLIP (1<<22)
800 #define DISPLAY_PLANE_A (0<<20)
801 #define DISPLAY_PLANE_B (1<<20)
802 +#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
803 +#define PIPE_CONTROL_QW_WRITE (1<<14)
804 +#define PIPE_CONTROL_DEPTH_STALL (1<<13)
805 +#define PIPE_CONTROL_WC_FLUSH (1<<12)
806 +#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
807 +#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
808 +#define PIPE_CONTROL_ISP_DIS (1<<9)
809 +#define PIPE_CONTROL_NOTIFY (1<<8)
810 +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
811 +#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
812
813 /*
814 * Fence registers
815 @@ -2111,6 +2121,7 @@
816 #define DEIER 0x4400c
817
818 /* GT interrupt */
819 +#define GT_PIPE_NOTIFY (1 << 4)
820 #define GT_SYNC_STATUS (1 << 2)
821 #define GT_USER_INTERRUPT (1 << 0)
822
823 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
824 index 4b2458d..3f00902 100644
825 --- a/drivers/gpu/drm/i915/intel_display.c
826 +++ b/drivers/gpu/drm/i915/intel_display.c
827 @@ -4683,7 +4683,7 @@ static void intel_init_display(struct drm_device *dev)
828 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
829 dev_priv->display.enable_fbc = g4x_enable_fbc;
830 dev_priv->display.disable_fbc = g4x_disable_fbc;
831 - } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
832 + } else if (IS_I965GM(dev)) {
833 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
834 dev_priv->display.enable_fbc = i8xx_enable_fbc;
835 dev_priv->display.disable_fbc = i8xx_disable_fbc;
836 diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
837 index be475e8..f16d60f 100644
838 --- a/drivers/hwmon/hp_accel.c
839 +++ b/drivers/hwmon/hp_accel.c
840 @@ -324,8 +324,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
841 lis3lv02d_joystick_disable();
842 lis3lv02d_poweroff(&lis3_dev);
843
844 - flush_work(&hpled_led.work);
845 led_classdev_unregister(&hpled_led.led_classdev);
846 + flush_work(&hpled_led.work);
847
848 return lis3lv02d_remove_fs(&lis3_dev);
849 }
850 diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
851 index 8072128..ee337df 100644
852 --- a/drivers/mmc/host/atmel-mci.c
853 +++ b/drivers/mmc/host/atmel-mci.c
854 @@ -568,9 +568,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
855 {
856 struct mmc_data *data = host->data;
857
858 - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
859 - ((data->flags & MMC_DATA_WRITE)
860 - ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
861 + if (data)
862 + dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
863 + ((data->flags & MMC_DATA_WRITE)
864 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
865 }
866
867 static void atmci_stop_dma(struct atmel_mci *host)
868 @@ -1098,8 +1099,8 @@ static void atmci_command_complete(struct atmel_mci *host,
869 "command error: status=0x%08x\n", status);
870
871 if (cmd->data) {
872 - host->data = NULL;
873 atmci_stop_dma(host);
874 + host->data = NULL;
875 mci_writel(host, IDR, MCI_NOTBUSY
876 | MCI_TXRDY | MCI_RXRDY
877 | ATMCI_DATA_ERROR_FLAGS);
878 @@ -1292,6 +1293,7 @@ static void atmci_tasklet_func(unsigned long priv)
879 } else {
880 data->bytes_xfered = data->blocks * data->blksz;
881 data->error = 0;
882 + mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
883 }
884
885 if (!data->stop) {
886 @@ -1750,13 +1752,13 @@ static int __init atmci_probe(struct platform_device *pdev)
887 ret = -ENODEV;
888 if (pdata->slot[0].bus_width) {
889 ret = atmci_init_slot(host, &pdata->slot[0],
890 - MCI_SDCSEL_SLOT_A, 0);
891 + 0, MCI_SDCSEL_SLOT_A);
892 if (!ret)
893 nr_slots++;
894 }
895 if (pdata->slot[1].bus_width) {
896 ret = atmci_init_slot(host, &pdata->slot[1],
897 - MCI_SDCSEL_SLOT_B, 1);
898 + 1, MCI_SDCSEL_SLOT_B);
899 if (!ret)
900 nr_slots++;
901 }
902 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
903 index c3ce920..8b7c267 100644
904 --- a/drivers/net/wireless/ath/ath9k/xmit.c
905 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
906 @@ -2244,7 +2244,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
907 if (ATH_TXQ_SETUP(sc, i)) {
908 txq = &sc->tx.txq[i];
909
910 - spin_lock_bh(&txq->axq_lock);
911 + spin_lock(&txq->axq_lock);
912
913 list_for_each_entry_safe(ac,
914 ac_tmp, &txq->axq_acq, list) {
915 @@ -2265,7 +2265,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
916 }
917 }
918
919 - spin_unlock_bh(&txq->axq_lock);
920 + spin_unlock(&txq->axq_lock);
921 }
922 }
923 }
924 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
925 index 3b4c5a4..82c1d2e 100644
926 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
927 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
928 @@ -581,6 +581,11 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
929
930 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
931
932 + /* make sure all queue are not stopped */
933 + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
934 + for (i = 0; i < 4; i++)
935 + atomic_set(&priv->queue_stop_count[i], 0);
936 +
937 /* reset to 0 to enable all the queue first */
938 priv->txq_ctx_active_msk = 0;
939 /* Map each Tx/cmd queue to its corresponding fifo */
940 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
941 index c610e5f..f7d41c7 100644
942 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
943 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
944 @@ -657,6 +657,11 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
945
946 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
947
948 + /* make sure all queue are not stopped */
949 + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
950 + for (i = 0; i < 4; i++)
951 + atomic_set(&priv->queue_stop_count[i], 0);
952 +
953 /* reset to 0 to enable all the queue first */
954 priv->txq_ctx_active_msk = 0;
955 /* map qos queues to fifos one-to-one */
956 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
957 index b93e491..3534d86 100644
958 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
959 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
960 @@ -298,10 +298,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
961 struct iwl_lq_sta *lq_data, u8 tid,
962 struct ieee80211_sta *sta)
963 {
964 + int ret;
965 +
966 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
967 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
968 sta->addr, tid);
969 - ieee80211_start_tx_ba_session(sta, tid);
970 + ret = ieee80211_start_tx_ba_session(sta, tid);
971 + if (ret == -EAGAIN) {
972 + /*
973 + * driver and mac80211 is out of sync
974 + * this might be cause by reloading firmware
975 + * stop the tx ba session here
976 + */
977 + IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
978 + tid);
979 + ret = ieee80211_stop_tx_ba_session(sta, tid,
980 + WLAN_BACK_INITIATOR);
981 + }
982 }
983 }
984
985 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
986 index 88470fb..e0ce039 100644
987 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
988 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
989 @@ -821,8 +821,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
990 hdr->seq_ctrl |= cpu_to_le16(seq_number);
991 seq_number += 0x10;
992 /* aggregation is on for this <sta,tid> */
993 - if (info->flags & IEEE80211_TX_CTL_AMPDU)
994 + if (info->flags & IEEE80211_TX_CTL_AMPDU &&
995 + priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
996 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
997 + }
998 }
999
1000 txq = &priv->txq[txq_id];
1001 @@ -1347,7 +1349,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1002 {
1003 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1004 struct iwl_tid_data *tid_data;
1005 - int ret, write_ptr, read_ptr;
1006 + int write_ptr, read_ptr;
1007 unsigned long flags;
1008
1009 if (!ra) {
1010 @@ -1399,13 +1401,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1011 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1012
1013 spin_lock_irqsave(&priv->lock, flags);
1014 - ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1015 + /*
1016 + * the only reason this call can fail is queue number out of range,
1017 + * which can happen if uCode is reloaded and all the station
1018 + * information are lost. if it is outside the range, there is no need
1019 + * to deactivate the uCode queue, just return "success" to allow
1020 + * mac80211 to clean up it own data.
1021 + */
1022 + priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1023 tx_fifo_id);
1024 spin_unlock_irqrestore(&priv->lock, flags);
1025
1026 - if (ret)
1027 - return ret;
1028 -
1029 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1030
1031 return 0;
1032 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1033 index 5905936..e4bd795 100644
1034 --- a/drivers/s390/block/dasd.c
1035 +++ b/drivers/s390/block/dasd.c
1036 @@ -35,6 +35,9 @@
1037 */
1038 #define DASD_CHANQ_MAX_SIZE 4
1039
1040 +#define DASD_SLEEPON_START_TAG (void *) 1
1041 +#define DASD_SLEEPON_END_TAG (void *) 2
1042 +
1043 /*
1044 * SECTION: exported variables of dasd.c
1045 */
1046 @@ -1460,7 +1463,10 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1047 */
1048 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1049 {
1050 - wake_up((wait_queue_head_t *) data);
1051 + spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1052 + cqr->callback_data = DASD_SLEEPON_END_TAG;
1053 + spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1054 + wake_up(&generic_waitq);
1055 }
1056
1057 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1058 @@ -1470,10 +1476,7 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1059
1060 device = cqr->startdev;
1061 spin_lock_irq(get_ccwdev_lock(device->cdev));
1062 - rc = ((cqr->status == DASD_CQR_DONE ||
1063 - cqr->status == DASD_CQR_NEED_ERP ||
1064 - cqr->status == DASD_CQR_TERMINATED) &&
1065 - list_empty(&cqr->devlist));
1066 + rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1067 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1068 return rc;
1069 }
1070 @@ -1561,7 +1564,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1071 wait_event(generic_waitq, !(device->stopped));
1072
1073 cqr->callback = dasd_wakeup_cb;
1074 - cqr->callback_data = (void *) &generic_waitq;
1075 + cqr->callback_data = DASD_SLEEPON_START_TAG;
1076 dasd_add_request_tail(cqr);
1077 if (interruptible) {
1078 rc = wait_event_interruptible(
1079 @@ -1640,7 +1643,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1080 }
1081
1082 cqr->callback = dasd_wakeup_cb;
1083 - cqr->callback_data = (void *) &generic_waitq;
1084 + cqr->callback_data = DASD_SLEEPON_START_TAG;
1085 cqr->status = DASD_CQR_QUEUED;
1086 list_add(&cqr->devlist, &device->ccw_queue);
1087
1088 diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
1089 index d00fcf8..fd6b135 100644
1090 --- a/drivers/serial/imx.c
1091 +++ b/drivers/serial/imx.c
1092 @@ -119,7 +119,8 @@
1093 #define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
1094 #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
1095 #define UCR3_BPEN (1<<0) /* Preset registers enable */
1096 -#define UCR4_CTSTL_32 (32<<10) /* CTS trigger level (32 chars) */
1097 +#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
1098 +#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */
1099 #define UCR4_INVR (1<<9) /* Inverted infrared reception */
1100 #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
1101 #define UCR4_WKEN (1<<7) /* Wake interrupt enable */
1102 @@ -590,6 +591,9 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
1103 return 0;
1104 }
1105
1106 +/* half the RX buffer size */
1107 +#define CTSTL 16
1108 +
1109 static int imx_startup(struct uart_port *port)
1110 {
1111 struct imx_port *sport = (struct imx_port *)port;
1112 @@ -606,6 +610,10 @@ static int imx_startup(struct uart_port *port)
1113 if (USE_IRDA(sport))
1114 temp |= UCR4_IRSC;
1115
1116 + /* set the trigger level for CTS */
1117 + temp &= ~(UCR4_CTSTL_MASK<< UCR4_CTSTL_SHF);
1118 + temp |= CTSTL<< UCR4_CTSTL_SHF;
1119 +
1120 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1121
1122 if (USE_IRDA(sport)) {
1123 diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
1124 index 2549c53..6c8b6b6 100644
1125 --- a/drivers/video/bfin-t350mcqb-fb.c
1126 +++ b/drivers/video/bfin-t350mcqb-fb.c
1127 @@ -515,9 +515,9 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
1128 fbinfo->fbops = &bfin_t350mcqb_fb_ops;
1129 fbinfo->flags = FBINFO_FLAG_DEFAULT;
1130
1131 - info->fb_buffer =
1132 - dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
1133 - GFP_KERNEL);
1134 + info->fb_buffer = dma_alloc_coherent(NULL, fbinfo->fix.smem_len +
1135 + ACTIVE_VIDEO_MEM_OFFSET,
1136 + &info->dma_handle, GFP_KERNEL);
1137
1138 if (NULL == info->fb_buffer) {
1139 printk(KERN_ERR DRIVER_NAME
1140 @@ -587,8 +587,8 @@ out7:
1141 out6:
1142 fb_dealloc_cmap(&fbinfo->cmap);
1143 out4:
1144 - dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
1145 - info->dma_handle);
1146 + dma_free_coherent(NULL, fbinfo->fix.smem_len + ACTIVE_VIDEO_MEM_OFFSET,
1147 + info->fb_buffer, info->dma_handle);
1148 out3:
1149 framebuffer_release(fbinfo);
1150 out2:
1151 @@ -611,8 +611,9 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
1152 free_irq(info->irq, info);
1153
1154 if (info->fb_buffer != NULL)
1155 - dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
1156 - info->dma_handle);
1157 + dma_free_coherent(NULL, fbinfo->fix.smem_len +
1158 + ACTIVE_VIDEO_MEM_OFFSET, info->fb_buffer,
1159 + info->dma_handle);
1160
1161 fb_dealloc_cmap(&fbinfo->cmap);
1162
1163 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1164 index 645a179..2c6ee6a 100644
1165 --- a/fs/btrfs/ioctl.c
1166 +++ b/fs/btrfs/ioctl.c
1167 @@ -964,12 +964,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1168 ret = -EBADF;
1169 goto out_drop_write;
1170 }
1171 +
1172 src = src_file->f_dentry->d_inode;
1173
1174 ret = -EINVAL;
1175 if (src == inode)
1176 goto out_fput;
1177
1178 + /* the src must be open for reading */
1179 + if (!(src_file->f_mode & FMODE_READ))
1180 + goto out_fput;
1181 +
1182 ret = -EISDIR;
1183 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
1184 goto out_fput;
1185 diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
1186 index b5808cd..039b501 100644
1187 --- a/fs/cachefiles/security.c
1188 +++ b/fs/cachefiles/security.c
1189 @@ -77,6 +77,8 @@ static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
1190 /*
1191 * check the security details of the on-disk cache
1192 * - must be called with security override in force
1193 + * - must return with a security override in force - even in the case of an
1194 + * error
1195 */
1196 int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
1197 struct dentry *root,
1198 @@ -99,6 +101,8 @@ int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
1199 * which create files */
1200 ret = set_create_files_as(new, root->d_inode);
1201 if (ret < 0) {
1202 + abort_creds(new);
1203 + cachefiles_begin_secure(cache, _saved_cred);
1204 _leave(" = %d [cfa]", ret);
1205 return ret;
1206 }
1207 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1208 index ed751bb..2568889 100644
1209 --- a/fs/cifs/cifsglob.h
1210 +++ b/fs/cifs/cifsglob.h
1211 @@ -500,6 +500,7 @@ struct dfs_info3_param {
1212 #define CIFS_FATTR_DFS_REFERRAL 0x1
1213 #define CIFS_FATTR_DELETE_PENDING 0x2
1214 #define CIFS_FATTR_NEED_REVAL 0x4
1215 +#define CIFS_FATTR_INO_COLLISION 0x8
1216
1217 struct cifs_fattr {
1218 u32 cf_flags;
1219 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1220 index e3fda97..7ec8555 100644
1221 --- a/fs/cifs/inode.c
1222 +++ b/fs/cifs/inode.c
1223 @@ -610,6 +610,16 @@ cifs_find_inode(struct inode *inode, void *opaque)
1224 if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
1225 return 0;
1226
1227 + /*
1228 + * uh oh -- it's a directory. We can't use it since hardlinked dirs are
1229 + * verboten. Disable serverino and return it as if it were found, the
1230 + * caller can discard it, generate a uniqueid and retry the find
1231 + */
1232 + if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
1233 + fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
1234 + cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
1235 + }
1236 +
1237 return 1;
1238 }
1239
1240 @@ -629,15 +639,22 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
1241 unsigned long hash;
1242 struct inode *inode;
1243
1244 +retry_iget5_locked:
1245 cFYI(1, ("looking for uniqueid=%llu", fattr->cf_uniqueid));
1246
1247 /* hash down to 32-bits on 32-bit arch */
1248 hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
1249
1250 inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
1251 -
1252 - /* we have fattrs in hand, update the inode */
1253 if (inode) {
1254 + /* was there a problematic inode number collision? */
1255 + if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
1256 + iput(inode);
1257 + fattr->cf_uniqueid = iunique(sb, ROOT_I);
1258 + fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
1259 + goto retry_iget5_locked;
1260 + }
1261 +
1262 cifs_fattr_to_inode(inode, fattr);
1263 if (sb->s_flags & MS_NOATIME)
1264 inode->i_flags |= S_NOATIME | S_NOCMTIME;
1265 diff --git a/fs/compat.c b/fs/compat.c
1266 index 00d90c2..514b623 100644
1267 --- a/fs/compat.c
1268 +++ b/fs/compat.c
1269 @@ -1530,8 +1530,6 @@ int compat_do_execve(char * filename,
1270 if (retval < 0)
1271 goto out;
1272
1273 - current->stack_start = current->mm->start_stack;
1274 -
1275 /* execve succeeded */
1276 current->fs->in_exec = 0;
1277 current->in_execve = 0;
1278 diff --git a/fs/exec.c b/fs/exec.c
1279 index 9071360..332f781 100644
1280 --- a/fs/exec.c
1281 +++ b/fs/exec.c
1282 @@ -1386,8 +1386,6 @@ int do_execve(char * filename,
1283 if (retval < 0)
1284 goto out;
1285
1286 - current->stack_start = current->mm->start_stack;
1287 -
1288 /* execve succeeded */
1289 current->fs->in_exec = 0;
1290 current->in_execve = 0;
1291 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
1292 index 8173fae..4d3ddcc 100644
1293 --- a/fs/nilfs2/super.c
1294 +++ b/fs/nilfs2/super.c
1295 @@ -746,6 +746,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
1296 sb->s_export_op = &nilfs_export_ops;
1297 sb->s_root = NULL;
1298 sb->s_time_gran = 1;
1299 + sb->s_bdi = nilfs->ns_bdi;
1300
1301 err = load_nilfs(nilfs, sbi);
1302 if (err)
1303 diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
1304 index 1afb0a1..e27960c 100644
1305 --- a/fs/notify/inotify/inotify_fsnotify.c
1306 +++ b/fs/notify/inotify/inotify_fsnotify.c
1307 @@ -28,6 +28,7 @@
1308 #include <linux/path.h> /* struct path */
1309 #include <linux/slab.h> /* kmem_* */
1310 #include <linux/types.h>
1311 +#include <linux/sched.h>
1312
1313 #include "inotify.h"
1314
1315 @@ -146,6 +147,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
1316 idr_for_each(&group->inotify_data.idr, idr_callback, group);
1317 idr_remove_all(&group->inotify_data.idr);
1318 idr_destroy(&group->inotify_data.idr);
1319 + free_uid(group->inotify_data.user);
1320 }
1321
1322 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
1323 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
1324 index a94e8bd..75aa15a 100644
1325 --- a/fs/notify/inotify/inotify_user.c
1326 +++ b/fs/notify/inotify/inotify_user.c
1327 @@ -550,21 +550,24 @@ retry:
1328 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
1329 goto out_err;
1330
1331 + /* we are putting the mark on the idr, take a reference */
1332 + fsnotify_get_mark(&tmp_ientry->fsn_entry);
1333 +
1334 spin_lock(&group->inotify_data.idr_lock);
1335 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
1336 group->inotify_data.last_wd+1,
1337 &tmp_ientry->wd);
1338 spin_unlock(&group->inotify_data.idr_lock);
1339 if (ret) {
1340 + /* we didn't get on the idr, drop the idr reference */
1341 + fsnotify_put_mark(&tmp_ientry->fsn_entry);
1342 +
1343 /* idr was out of memory allocate and try again */
1344 if (ret == -EAGAIN)
1345 goto retry;
1346 goto out_err;
1347 }
1348
1349 - /* we put the mark on the idr, take a reference */
1350 - fsnotify_get_mark(&tmp_ientry->fsn_entry);
1351 -
1352 /* we are on the idr, now get on the inode */
1353 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
1354 if (ret) {
1355 diff --git a/fs/proc/array.c b/fs/proc/array.c
1356 index 13b5d07..69eb4c4 100644
1357 --- a/fs/proc/array.c
1358 +++ b/fs/proc/array.c
1359 @@ -82,7 +82,6 @@
1360 #include <linux/pid_namespace.h>
1361 #include <linux/ptrace.h>
1362 #include <linux/tracehook.h>
1363 -#include <linux/swapops.h>
1364
1365 #include <asm/pgtable.h>
1366 #include <asm/processor.h>
1367 @@ -494,7 +493,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
1368 rsslim,
1369 mm ? mm->start_code : 0,
1370 mm ? mm->end_code : 0,
1371 - (permitted && mm) ? task->stack_start : 0,
1372 + (permitted && mm) ? mm->start_stack : 0,
1373 esp,
1374 eip,
1375 /* The signal information here is obsolete.
1376 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1377 index f277c4a..9df34a5 100644
1378 --- a/fs/proc/task_mmu.c
1379 +++ b/fs/proc/task_mmu.c
1380 @@ -243,25 +243,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
1381 } else if (vma->vm_start <= mm->start_stack &&
1382 vma->vm_end >= mm->start_stack) {
1383 name = "[stack]";
1384 - } else {
1385 - unsigned long stack_start;
1386 - struct proc_maps_private *pmp;
1387 -
1388 - pmp = m->private;
1389 - stack_start = pmp->task->stack_start;
1390 -
1391 - if (vma->vm_start <= stack_start &&
1392 - vma->vm_end >= stack_start) {
1393 - pad_len_spaces(m, len);
1394 - seq_printf(m,
1395 - "[threadstack:%08lx]",
1396 -#ifdef CONFIG_STACK_GROWSUP
1397 - vma->vm_end - stack_start
1398 -#else
1399 - stack_start - vma->vm_start
1400 -#endif
1401 - );
1402 - }
1403 }
1404 } else {
1405 name = "[vdso]";
1406 diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
1407 index e694263..6920695 100644
1408 --- a/include/asm-generic/dma-mapping-common.h
1409 +++ b/include/asm-generic/dma-mapping-common.h
1410 @@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
1411 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
1412
1413 } else
1414 - dma_sync_single_for_cpu(dev, addr, size, dir);
1415 + dma_sync_single_for_cpu(dev, addr + offset, size, dir);
1416 }
1417
1418 static inline void dma_sync_single_range_for_device(struct device *dev,
1419 @@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
1420 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
1421
1422 } else
1423 - dma_sync_single_for_device(dev, addr, size, dir);
1424 + dma_sync_single_for_device(dev, addr + offset, size, dir);
1425 }
1426
1427 static inline void
1428 diff --git a/include/linux/sched.h b/include/linux/sched.h
1429 index 1f5fa53..db821a4 100644
1430 --- a/include/linux/sched.h
1431 +++ b/include/linux/sched.h
1432 @@ -1560,7 +1560,6 @@ struct task_struct {
1433 /* bitmask of trace recursion */
1434 unsigned long trace_recursion;
1435 #endif /* CONFIG_TRACING */
1436 - unsigned long stack_start;
1437 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1438 struct memcg_batch_info {
1439 int do_batch; /* incremented when batch uncharge started */
1440 diff --git a/kernel/fork.c b/kernel/fork.c
1441 index f88bd98..0ea67a3 100644
1442 --- a/kernel/fork.c
1443 +++ b/kernel/fork.c
1444 @@ -1134,8 +1134,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1445
1446 p->bts = NULL;
1447
1448 - p->stack_start = stack_start;
1449 -
1450 /* Perform scheduler related setup. Assign this task to a CPU. */
1451 sched_fork(p, clone_flags);
1452
1453 diff --git a/kernel/profile.c b/kernel/profile.c
1454 index a55d3a3..dfadc5b 100644
1455 --- a/kernel/profile.c
1456 +++ b/kernel/profile.c
1457 @@ -127,8 +127,10 @@ int __ref profile_init(void)
1458 return 0;
1459
1460 prof_buffer = vmalloc(buffer_bytes);
1461 - if (prof_buffer)
1462 + if (prof_buffer) {
1463 + memset(prof_buffer, 0, buffer_bytes);
1464 return 0;
1465 + }
1466
1467 free_cpumask_var(prof_cpu_mask);
1468 return -ENOMEM;
1469 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1470 index fd9ba95..e8d9544 100644
1471 --- a/mm/hugetlb.c
1472 +++ b/mm/hugetlb.c
1473 @@ -1039,7 +1039,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1474 page = alloc_buddy_huge_page(h, vma, addr);
1475 if (!page) {
1476 hugetlb_put_quota(inode->i_mapping, chg);
1477 - return ERR_PTR(-VM_FAULT_OOM);
1478 + return ERR_PTR(-VM_FAULT_SIGBUS);
1479 }
1480 }
1481
1482 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1483 index 16190ca..955f0b2 100644
1484 --- a/net/ipv4/udp.c
1485 +++ b/net/ipv4/udp.c
1486 @@ -1527,6 +1527,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1487
1488 uh = udp_hdr(skb);
1489 ulen = ntohs(uh->len);
1490 + saddr = ip_hdr(skb)->saddr;
1491 + daddr = ip_hdr(skb)->daddr;
1492 +
1493 if (ulen > skb->len)
1494 goto short_packet;
1495
1496 @@ -1540,9 +1543,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1497 if (udp4_csum_init(skb, uh, proto))
1498 goto csum_error;
1499
1500 - saddr = ip_hdr(skb)->saddr;
1501 - daddr = ip_hdr(skb)->daddr;
1502 -
1503 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1504 return __udp4_lib_mcast_deliver(net, skb, uh,
1505 saddr, daddr, udptable);
1506 diff --git a/security/min_addr.c b/security/min_addr.c
1507 index e86f297..f728728 100644
1508 --- a/security/min_addr.c
1509 +++ b/security/min_addr.c
1510 @@ -33,7 +33,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
1511 {
1512 int ret;
1513
1514 - if (!capable(CAP_SYS_RAWIO))
1515 + if (write && !capable(CAP_SYS_RAWIO))
1516 return -EPERM;
1517
1518 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
1519 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1520 index fd831bd..a747871 100644
1521 --- a/sound/pci/hda/hda_intel.c
1522 +++ b/sound/pci/hda/hda_intel.c
1523 @@ -2718,6 +2718,7 @@ static struct pci_device_id azx_ids[] = {
1524 { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
1525 /* PCH */
1526 { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
1527 + { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
1528 /* CPT */
1529 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
1530 /* SCH */
1531 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1532 index 1a97c81..a978645 100644
1533 --- a/sound/pci/hda/patch_conexant.c
1534 +++ b/sound/pci/hda/patch_conexant.c
1535 @@ -1176,9 +1176,10 @@ static int patch_cxt5045(struct hda_codec *codec)
1536 case 0x103c:
1537 case 0x1631:
1538 case 0x1734:
1539 - /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
1540 - * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
1541 - * (originally it has 0x2b steps with 0dB offset 0x14)
1542 + case 0x17aa:
1543 + /* HP, Packard Bell, Fujitsu-Siemens & Lenovo laptops have
1544 + * really bad sound over 0dB on NID 0x17. Fix max PCM level to
1545 + * 0 dB (originally it has 0x2b steps with 0dB offset 0x14)
1546 */
1547 snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
1548 (0x14 << AC_AMPCAP_OFFSET_SHIFT) |
1549 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1550 index b486daa..abfc558 100644
1551 --- a/sound/pci/hda/patch_realtek.c
1552 +++ b/sound/pci/hda/patch_realtek.c
1553 @@ -17348,7 +17348,6 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
1554 ALC662_3ST_6ch_DIG),
1555 SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
1556 ALC663_ASUS_H13),
1557 - SND_PCI_QUIRK(0x8086, 0xd604, "Intel mobo", ALC662_3ST_2ch_DIG),
1558 {}
1559 };
1560
1561 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
1562 index ac2d528..cb474c0 100644
1563 --- a/sound/pci/hda/patch_sigmatel.c
1564 +++ b/sound/pci/hda/patch_sigmatel.c
1565 @@ -1539,11 +1539,9 @@ static unsigned int alienware_m17x_pin_configs[13] = {
1566 0x904601b0,
1567 };
1568
1569 -static unsigned int intel_dg45id_pin_configs[14] = {
1570 +static unsigned int intel_dg45id_pin_configs[13] = {
1571 0x02214230, 0x02A19240, 0x01013214, 0x01014210,
1572 - 0x01A19250, 0x01011212, 0x01016211, 0x40f000f0,
1573 - 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x014510A0,
1574 - 0x074510B0, 0x40f000f0
1575 + 0x01A19250, 0x01011212, 0x01016211
1576 };
1577
1578 static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
1579 diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
1580 index 3e1c20a..726fd4b 100644
1581 --- a/sound/pci/ice1712/maya44.c
1582 +++ b/sound/pci/ice1712/maya44.c
1583 @@ -347,7 +347,7 @@ static int maya_gpio_sw_put(struct snd_kcontrol *kcontrol,
1584
1585 /* known working input slots (0-4) */
1586 #define MAYA_LINE_IN 1 /* in-2 */
1587 -#define MAYA_MIC_IN 4 /* in-5 */
1588 +#define MAYA_MIC_IN 3 /* in-4 */
1589
1590 static void wm8776_select_input(struct snd_maya44 *chip, int idx, int line)
1591 {
1592 @@ -393,8 +393,8 @@ static int maya_rec_src_put(struct snd_kcontrol *kcontrol,
1593 int changed;
1594
1595 mutex_lock(&chip->mutex);
1596 - changed = maya_set_gpio_bits(chip->ice, GPIO_MIC_RELAY,
1597 - sel ? GPIO_MIC_RELAY : 0);
1598 + changed = maya_set_gpio_bits(chip->ice, 1 << GPIO_MIC_RELAY,
1599 + sel ? (1 << GPIO_MIC_RELAY) : 0);
1600 wm8776_select_input(chip, 0, sel ? MAYA_MIC_IN : MAYA_LINE_IN);
1601 mutex_unlock(&chip->mutex);
1602 return changed;
1603 diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
1604 index 16c226b..7c4986b 100644
1605 --- a/sound/pci/oxygen/xonar_cs43xx.c
1606 +++ b/sound/pci/oxygen/xonar_cs43xx.c
1607 @@ -56,6 +56,7 @@
1608 #include <sound/pcm_params.h>
1609 #include <sound/tlv.h>
1610 #include "xonar.h"
1611 +#include "cm9780.h"
1612 #include "cs4398.h"
1613 #include "cs4362a.h"
1614
1615 @@ -172,6 +173,8 @@ static void xonar_d1_init(struct oxygen *chip)
1616 oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
1617 GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
1618
1619 + oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC);
1620 +
1621 xonar_init_cs53x1(chip);
1622 xonar_enable_output(chip);
1623