Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.33-r4/0103-2.6.33.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1286 - (show annotations) (download)
Thu Feb 17 15:05:15 2011 UTC (13 years, 2 months ago) by niro
File size: 132383 byte(s)
2.6.33-alx-r4: enabled usbserial generic module for accu-chek II devices
1 diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
2 index 811743c..5f2ba8d 100644
3 --- a/arch/arm/mach-pxa/include/mach/colibri.h
4 +++ b/arch/arm/mach-pxa/include/mach/colibri.h
5 @@ -2,6 +2,7 @@
6 #define _COLIBRI_H_
7
8 #include <net/ax88796.h>
9 +#include <mach/mfp.h>
10
11 /*
12 * common settings for all modules
13 diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
14 index 7950ef4..743385d 100644
15 --- a/arch/mips/include/asm/mach-sibyte/war.h
16 +++ b/arch/mips/include/asm/mach-sibyte/war.h
17 @@ -16,7 +16,11 @@
18 #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
19 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
20
21 -#define BCM1250_M3_WAR 1
22 +#ifndef __ASSEMBLY__
23 +extern int sb1250_m3_workaround_needed(void);
24 +#endif
25 +
26 +#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
27 #define SIBYTE_1956_WAR 1
28
29 #else
30 diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
31 index 0444da1..92da315 100644
32 --- a/arch/mips/sibyte/sb1250/setup.c
33 +++ b/arch/mips/sibyte/sb1250/setup.c
34 @@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
35 return ret;
36 }
37
38 +int sb1250_m3_workaround_needed(void)
39 +{
40 + switch (soc_type) {
41 + case K_SYS_SOC_TYPE_BCM1250:
42 + case K_SYS_SOC_TYPE_BCM1250_ALT:
43 + case K_SYS_SOC_TYPE_BCM1250_ALT2:
44 + case K_SYS_SOC_TYPE_BCM1125:
45 + case K_SYS_SOC_TYPE_BCM1125H:
46 + return soc_pass < K_SYS_REVISION_BCM1250_C0;
47 +
48 + default:
49 + return 0;
50 + }
51 +}
52 +
53 static int __init setup_bcm112x(void)
54 {
55 int ret = 0;
56 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
57 index 9258074..567cd57 100644
58 --- a/arch/powerpc/kernel/head_64.S
59 +++ b/arch/powerpc/kernel/head_64.S
60 @@ -615,6 +615,17 @@ _GLOBAL(start_secondary_prolog)
61 std r3,0(r1) /* Zero the stack frame pointer */
62 bl .start_secondary
63 b .
64 +/*
65 + * Reset stack pointer and call start_secondary
66 + * to continue with online operation when woken up
67 + * from cede in cpu offline.
68 + */
69 +_GLOBAL(start_secondary_resume)
70 + ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
71 + li r3,0
72 + std r3,0(r1) /* Zero the stack frame pointer */
73 + bl .start_secondary
74 + b .
75 #endif
76
77 /*
78 diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
79 index c539472..1ce9dd5 100644
80 --- a/arch/powerpc/mm/fsl_booke_mmu.c
81 +++ b/arch/powerpc/mm/fsl_booke_mmu.c
82 @@ -155,15 +155,10 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
83 if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
84 TLBCAM[index].MAS7 = (u64)phys >> 32;
85
86 -#ifndef CONFIG_KGDB /* want user access for breakpoints */
87 if (flags & _PAGE_USER) {
88 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
89 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
90 }
91 -#else
92 - TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
93 - TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
94 -#endif
95
96 tlbcam_addrs[index].start = virt;
97 tlbcam_addrs[index].limit = virt + size - 1;
98 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
99 index 6ea4698..b842378 100644
100 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
101 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
102 @@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void)
103 if (!get_lppaca()->shared_proc)
104 get_lppaca()->donate_dedicated_cpu = 1;
105
106 - printk(KERN_INFO
107 - "cpu %u (hwid %u) ceding for offline with hint %d\n",
108 - cpu, hwcpu, cede_latency_hint);
109 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
110 extended_cede_processor(cede_latency_hint);
111 - printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
112 - cpu, hwcpu);
113 - printk(KERN_INFO
114 - "Decrementer value = %x Timebase value = %llx\n",
115 - get_dec(), get_tb());
116 }
117
118 - printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
119 - cpu, hwcpu);
120 -
121 if (!get_lppaca()->shared_proc)
122 get_lppaca()->donate_dedicated_cpu = 0;
123 get_lppaca()->idle = 0;
124 - }
125
126 - if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
127 - unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
128 + if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
129 + unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
130
131 - /*
132 - * NOTE: Calling start_secondary() here for now to
133 - * start new context.
134 - * However, need to do it cleanly by resetting the
135 - * stack pointer.
136 - */
137 - start_secondary();
138 + /*
139 + * Call to start_secondary_resume() will not return.
140 + * Kernel stack will be reset and start_secondary()
141 + * will be called to continue the online operation.
142 + */
143 + start_secondary_resume();
144 + }
145 + }
146
147 - } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
148 + /* Requested state is CPU_STATE_OFFLINE at this point */
149 + WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
150
151 - set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
152 - unregister_slb_shadow(hard_smp_processor_id(),
153 - __pa(get_slb_shadow()));
154 - rtas_stop_self();
155 - }
156 + set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
157 + unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
158 + rtas_stop_self();
159
160 /* Should never get here... */
161 BUG();
162 diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
163 index 202d869..75a6f48 100644
164 --- a/arch/powerpc/platforms/pseries/offline_states.h
165 +++ b/arch/powerpc/platforms/pseries/offline_states.h
166 @@ -35,4 +35,5 @@ static inline void set_default_offline_state(int cpu)
167
168 extern enum cpu_state_vals get_preferred_offline_state(int cpu);
169 extern int start_secondary(void);
170 +extern void start_secondary_resume(void);
171 #endif
172 diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
173 index 8b49bf9..bfa1ea4 100644
174 --- a/arch/sparc/include/asm/irqflags_64.h
175 +++ b/arch/sparc/include/asm/irqflags_64.h
176 @@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
177 */
178 static inline unsigned long __raw_local_irq_save(void)
179 {
180 - unsigned long flags = __raw_local_save_flags();
181 -
182 - raw_local_irq_disable();
183 + unsigned long flags, tmp;
184 +
185 + /* Disable interrupts to PIL_NORMAL_MAX unless we already
186 + * are using PIL_NMI, in which case PIL_NMI is retained.
187 + *
188 + * The only values we ever program into the %pil are 0,
189 + * PIL_NORMAL_MAX and PIL_NMI.
190 + *
191 + * Since PIL_NMI is the largest %pil value and all bits are
192 + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
193 + * actually is.
194 + */
195 + __asm__ __volatile__(
196 + "rdpr %%pil, %0\n\t"
197 + "or %0, %2, %1\n\t"
198 + "wrpr %1, 0x0, %%pil"
199 + : "=r" (flags), "=r" (tmp)
200 + : "i" (PIL_NORMAL_MAX)
201 + : "memory"
202 + );
203
204 return flags;
205 }
206 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
207 index 39be9f2..3df02de 100644
208 --- a/arch/sparc/include/asm/thread_info_64.h
209 +++ b/arch/sparc/include/asm/thread_info_64.h
210 @@ -121,7 +121,7 @@ struct thread_info {
211 #define THREAD_SHIFT PAGE_SHIFT
212 #endif /* PAGE_SHIFT == 13 */
213
214 -#define PREEMPT_ACTIVE 0x4000000
215 +#define PREEMPT_ACTIVE 0x10000000
216
217 /*
218 * macros/functions for gaining access to the thread information structure
219 diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
220 index b775658..8a00058 100644
221 --- a/arch/sparc/kernel/pci_common.c
222 +++ b/arch/sparc/kernel/pci_common.c
223 @@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
224 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
225
226 if (!rp) {
227 - prom_printf("Cannot allocate IOMMU resource.\n");
228 - prom_halt();
229 + pr_info("%s: Cannot allocate IOMMU resource.\n",
230 + pbm->name);
231 + return;
232 }
233 rp->name = "IOMMU";
234 rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
235 rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
236 rp->flags = IORESOURCE_BUSY;
237 - request_resource(&pbm->mem_space, rp);
238 + if (request_resource(&pbm->mem_space, rp)) {
239 + pr_info("%s: Unable to request IOMMU resource.\n",
240 + pbm->name);
241 + kfree(rp);
242 + }
243 }
244 }
245
246 diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
247 index fd3cee4..c720f0c 100644
248 --- a/arch/sparc/kernel/rtrap_64.S
249 +++ b/arch/sparc/kernel/rtrap_64.S
250 @@ -172,7 +172,17 @@ rtrap_xcall:
251 nop
252 call trace_hardirqs_on
253 nop
254 - wrpr %l4, %pil
255 + /* Do not actually set the %pil here. We will do that
256 + * below after we clear PSTATE_IE in the %pstate register.
257 + * If we re-enable interrupts here, we can recurse down
258 + * the hardirq stack potentially endlessly, causing a
259 + * stack overflow.
260 + *
261 + * It is tempting to put this test and trace_hardirqs_on
262 + * call at the 'rt_continue' label, but that will not work
263 + * as that path hits unconditionally and we do not want to
264 + * execute this in NMI return paths, for example.
265 + */
266 #endif
267 rtrap_no_irq_enable:
268 andcc %l1, TSTATE_PRIV, %l3
269 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
270 index 10f7bb9..22cd475 100644
271 --- a/arch/sparc/kernel/traps_64.c
272 +++ b/arch/sparc/kernel/traps_64.c
273 @@ -2202,27 +2202,6 @@ void dump_stack(void)
274
275 EXPORT_SYMBOL(dump_stack);
276
277 -static inline int is_kernel_stack(struct task_struct *task,
278 - struct reg_window *rw)
279 -{
280 - unsigned long rw_addr = (unsigned long) rw;
281 - unsigned long thread_base, thread_end;
282 -
283 - if (rw_addr < PAGE_OFFSET) {
284 - if (task != &init_task)
285 - return 0;
286 - }
287 -
288 - thread_base = (unsigned long) task_stack_page(task);
289 - thread_end = thread_base + sizeof(union thread_union);
290 - if (rw_addr >= thread_base &&
291 - rw_addr < thread_end &&
292 - !(rw_addr & 0x7UL))
293 - return 1;
294 -
295 - return 0;
296 -}
297 -
298 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
299 {
300 unsigned long fp = rw->ins[6];
301 @@ -2251,6 +2230,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
302 show_regs(regs);
303 add_taint(TAINT_DIE);
304 if (regs->tstate & TSTATE_PRIV) {
305 + struct thread_info *tp = current_thread_info();
306 struct reg_window *rw = (struct reg_window *)
307 (regs->u_regs[UREG_FP] + STACK_BIAS);
308
309 @@ -2258,8 +2238,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
310 * find some badly aligned kernel stack.
311 */
312 while (rw &&
313 - count++ < 30&&
314 - is_kernel_stack(current, rw)) {
315 + count++ < 30 &&
316 + kstack_valid(tp, (unsigned long) rw)) {
317 printk("Caller[%016lx]: %pS\n", rw->ins[7],
318 (void *) rw->ins[7]);
319
320 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
321 index 378ca82..95a8e9a 100644
322 --- a/arch/sparc/kernel/unaligned_64.c
323 +++ b/arch/sparc/kernel/unaligned_64.c
324 @@ -49,7 +49,7 @@ static inline enum direction decode_direction(unsigned int insn)
325 }
326
327 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
328 -static inline int decode_access_size(unsigned int insn)
329 +static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
330 {
331 unsigned int tmp;
332
333 @@ -65,7 +65,7 @@ static inline int decode_access_size(unsigned int insn)
334 return 2;
335 else {
336 printk("Impossible unaligned trap. insn=%08x\n", insn);
337 - die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
338 + die_if_kernel("Byte sized unaligned access?!?!", regs);
339
340 /* GCC should never warn that control reaches the end
341 * of this function without returning a value because
342 @@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
343 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
344 {
345 enum direction dir = decode_direction(insn);
346 - int size = decode_access_size(insn);
347 + int size = decode_access_size(regs, insn);
348 int orig_asi, asi;
349
350 current_thread_info()->kern_una_regs = regs;
351 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
352 index eb40925..ddb52b8 100644
353 --- a/arch/x86/Kconfig
354 +++ b/arch/x86/Kconfig
355 @@ -627,7 +627,7 @@ config GART_IOMMU
356 bool "GART IOMMU support" if EMBEDDED
357 default y
358 select SWIOTLB
359 - depends on X86_64 && PCI
360 + depends on X86_64 && PCI && K8_NB
361 ---help---
362 Support for full DMA access of devices with 32bit memory access only
363 on systems with more than 3GB. This is usually needed for USB,
364 @@ -2026,7 +2026,7 @@ endif # X86_32
365
366 config K8_NB
367 def_bool y
368 - depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
369 + depends on CPU_SUP_AMD && PCI
370
371 source "drivers/pcmcia/Kconfig"
372
373 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
374 index be37059..b35c160 100644
375 --- a/arch/x86/kernel/apic/io_apic.c
376 +++ b/arch/x86/kernel/apic/io_apic.c
377 @@ -2539,6 +2539,9 @@ void irq_force_complete_move(int irq)
378 struct irq_desc *desc = irq_to_desc(irq);
379 struct irq_cfg *cfg = desc->chip_data;
380
381 + if (!cfg)
382 + return;
383 +
384 __irq_complete_move(&desc, cfg->vector);
385 }
386 #else
387 diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
388 index 6e44519..3b5ea38 100644
389 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
390 +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
391 @@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
392 powernow_table[i].index = index;
393
394 /* Frequency may be rounded for these */
395 - if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
396 + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
397 + || boot_cpu_data.x86 == 0x11) {
398 powernow_table[i].frequency =
399 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
400 } else
401 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
402 index 7e1cca1..1366c7c 100644
403 --- a/arch/x86/kernel/cpu/intel.c
404 +++ b/arch/x86/kernel/cpu/intel.c
405 @@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
406 (c->x86 == 0x6 && c->x86_model >= 0x0e))
407 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
408
409 + /*
410 + * Atom erratum AAE44/AAF40/AAG38/AAH41:
411 + *
412 + * A race condition between speculative fetches and invalidating
413 + * a large page. This is worked around in microcode, but we
414 + * need the microcode to have already been loaded... so if it is
415 + * not, recommend a BIOS update and disable large pages.
416 + */
417 + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
418 + u32 ucode, junk;
419 +
420 + wrmsr(MSR_IA32_UCODE_REV, 0, 0);
421 + sync_core();
422 + rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
423 +
424 + if (ucode < 0x20e) {
425 + printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
426 + clear_cpu_cap(c, X86_FEATURE_PSE);
427 + }
428 + }
429 +
430 #ifdef CONFIG_X86_64
431 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
432 #else
433 diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
434 index cbc4332..9b89546 100644
435 --- a/arch/x86/kernel/k8.c
436 +++ b/arch/x86/kernel/k8.c
437 @@ -121,3 +121,17 @@ void k8_flush_garts(void)
438 }
439 EXPORT_SYMBOL_GPL(k8_flush_garts);
440
441 +static __init int init_k8_nbs(void)
442 +{
443 + int err = 0;
444 +
445 + err = cache_k8_northbridges();
446 +
447 + if (err < 0)
448 + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
449 +
450 + return err;
451 +}
452 +
453 +/* This has to go after the PCI subsystem */
454 +fs_initcall(init_k8_nbs);
455 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
456 index 4f41b29..0ae24d9 100644
457 --- a/arch/x86/kernel/pci-gart_64.c
458 +++ b/arch/x86/kernel/pci-gart_64.c
459 @@ -738,7 +738,7 @@ int __init gart_iommu_init(void)
460 unsigned long scratch;
461 long i;
462
463 - if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
464 + if (num_k8_northbridges == 0)
465 return 0;
466
467 #ifndef CONFIG_AGP_AMD64
468 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
469 index 126f0b4..11d0702 100644
470 --- a/arch/x86/kernel/process_64.c
471 +++ b/arch/x86/kernel/process_64.c
472 @@ -282,12 +282,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
473
474 set_tsk_thread_flag(p, TIF_FORK);
475
476 - p->thread.fs = me->thread.fs;
477 - p->thread.gs = me->thread.gs;
478 p->thread.io_bitmap_ptr = NULL;
479
480 savesegment(gs, p->thread.gsindex);
481 + p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
482 savesegment(fs, p->thread.fsindex);
483 + p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
484 savesegment(es, p->thread.es);
485 savesegment(ds, p->thread.ds);
486
487 diff --git a/block/blk-timeout.c b/block/blk-timeout.c
488 index 1ba7e0a..4f0c06c 100644
489 --- a/block/blk-timeout.c
490 +++ b/block/blk-timeout.c
491 @@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
492 struct request_queue *q = (struct request_queue *) data;
493 unsigned long flags, next = 0;
494 struct request *rq, *tmp;
495 + int next_set = 0;
496
497 spin_lock_irqsave(q->queue_lock, flags);
498
499 @@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
500 if (blk_mark_rq_complete(rq))
501 continue;
502 blk_rq_timed_out(rq);
503 - } else if (!next || time_after(next, rq->deadline))
504 + } else if (!next_set || time_after(next, rq->deadline)) {
505 next = rq->deadline;
506 + next_set = 1;
507 + }
508 }
509
510 - /*
511 - * next can never be 0 here with the list non-empty, since we always
512 - * bump ->deadline to 1 so we can detect if the timer was ever added
513 - * or not. See comment in blk_add_timer()
514 - */
515 - if (next)
516 + if (next_set)
517 mod_timer(&q->timeout, round_jiffies_up(next));
518
519 spin_unlock_irqrestore(q->queue_lock, flags);
520 diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
521 index 943f2ab..ce038d8 100644
522 --- a/crypto/async_tx/async_raid6_recov.c
523 +++ b/crypto/async_tx/async_raid6_recov.c
524 @@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
525 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
526 struct page **blocks, struct async_submit_ctl *submit)
527 {
528 + void *scribble = submit->scribble;
529 int non_zero_srcs, i;
530
531 BUG_ON(faila == failb);
532 @@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
533
534 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
535
536 - /* we need to preserve the contents of 'blocks' for the async
537 - * case, so punt to synchronous if a scribble buffer is not available
538 + /* if a dma resource is not available or a scribble buffer is not
539 + * available punt to the synchronous path. In the 'dma not
540 + * available' case be sure to use the scribble buffer to
541 + * preserve the content of 'blocks' as the caller intended.
542 */
543 - if (!submit->scribble) {
544 - void **ptrs = (void **) blocks;
545 + if (!async_dma_find_channel(DMA_PQ) || !scribble) {
546 + void **ptrs = scribble ? scribble : (void **) blocks;
547
548 async_tx_quiesce(&submit->depend_tx);
549 for (i = 0; i < disks; i++)
550 @@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
551
552 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
553
554 - /* we need to preserve the contents of 'blocks' for the async
555 - * case, so punt to synchronous if a scribble buffer is not available
556 + /* if a dma resource is not available or a scribble buffer is not
557 + * available punt to the synchronous path. In the 'dma not
558 + * available' case be sure to use the scribble buffer to
559 + * preserve the content of 'blocks' as the caller intended.
560 */
561 - if (!scribble) {
562 - void **ptrs = (void **) blocks;
563 + if (!async_dma_find_channel(DMA_PQ) || !scribble) {
564 + void **ptrs = scribble ? scribble : (void **) blocks;
565
566 async_tx_quiesce(&submit->depend_tx);
567 for (i = 0; i < disks; i++)
568 diff --git a/drivers/Makefile b/drivers/Makefile
569 index 6ee53c7..8b0b948 100644
570 --- a/drivers/Makefile
571 +++ b/drivers/Makefile
572 @@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/
573 obj-$(CONFIG_PNP) += pnp/
574 obj-$(CONFIG_ARM_AMBA) += amba/
575
576 +obj-$(CONFIG_VIRTIO) += virtio/
577 obj-$(CONFIG_XEN) += xen/
578
579 # regulators early, since some subsystems rely on them to initialize
580 @@ -106,7 +107,6 @@ obj-$(CONFIG_HID) += hid/
581 obj-$(CONFIG_PPC_PS3) += ps3/
582 obj-$(CONFIG_OF) += of/
583 obj-$(CONFIG_SSB) += ssb/
584 -obj-$(CONFIG_VIRTIO) += virtio/
585 obj-$(CONFIG_VLYNQ) += vlynq/
586 obj-$(CONFIG_STAGING) += staging/
587 obj-y += platform/
588 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
589 index dc4ffad..e02d93c 100644
590 --- a/drivers/acpi/power_meter.c
591 +++ b/drivers/acpi/power_meter.c
592 @@ -34,7 +34,7 @@
593 #define ACPI_POWER_METER_NAME "power_meter"
594 ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
595 #define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
596 -#define ACPI_POWER_METER_CLASS "power_meter_resource"
597 +#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
598
599 #define NUM_SENSORS 17
600
601 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
602 index 79d33d9..7c85265 100644
603 --- a/drivers/acpi/sleep.c
604 +++ b/drivers/acpi/sleep.c
605 @@ -450,6 +450,126 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
606 },
607 },
608 {
609 + .callback = init_set_sci_en_on_resume,
610 + .ident = "Lenovo ThinkPad T410",
611 + .matches = {
612 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
613 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
614 + },
615 + },
616 + {
617 + .callback = init_set_sci_en_on_resume,
618 + .ident = "Lenovo ThinkPad T510",
619 + .matches = {
620 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
621 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
622 + },
623 + },
624 + {
625 + .callback = init_set_sci_en_on_resume,
626 + .ident = "Lenovo ThinkPad W510",
627 + .matches = {
628 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
629 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
630 + },
631 + },
632 + {
633 + .callback = init_set_sci_en_on_resume,
634 + .ident = "Lenovo ThinkPad X201",
635 + .matches = {
636 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
637 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
638 + },
639 + },
640 + {
641 + .callback = init_set_sci_en_on_resume,
642 + .ident = "Lenovo ThinkPad X201",
643 + .matches = {
644 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
645 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
646 + },
647 + },
648 + {
649 + .callback = init_set_sci_en_on_resume,
650 + .ident = "Lenovo ThinkPad T410",
651 + .matches = {
652 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
653 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
654 + },
655 + },
656 + {
657 + .callback = init_set_sci_en_on_resume,
658 + .ident = "Lenovo ThinkPad T510",
659 + .matches = {
660 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
661 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
662 + },
663 + },
664 + {
665 + .callback = init_set_sci_en_on_resume,
666 + .ident = "Lenovo ThinkPad W510",
667 + .matches = {
668 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
669 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
670 + },
671 + },
672 + {
673 + .callback = init_set_sci_en_on_resume,
674 + .ident = "Lenovo ThinkPad X201",
675 + .matches = {
676 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
677 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
678 + },
679 + },
680 + {
681 + .callback = init_set_sci_en_on_resume,
682 + .ident = "Lenovo ThinkPad X201",
683 + .matches = {
684 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
685 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
686 + },
687 + },
688 + {
689 + .callback = init_set_sci_en_on_resume,
690 + .ident = "Lenovo ThinkPad T410",
691 + .matches = {
692 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
693 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
694 + },
695 + },
696 + {
697 + .callback = init_set_sci_en_on_resume,
698 + .ident = "Lenovo ThinkPad T510",
699 + .matches = {
700 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
701 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
702 + },
703 + },
704 + {
705 + .callback = init_set_sci_en_on_resume,
706 + .ident = "Lenovo ThinkPad W510",
707 + .matches = {
708 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
709 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
710 + },
711 + },
712 + {
713 + .callback = init_set_sci_en_on_resume,
714 + .ident = "Lenovo ThinkPad X201",
715 + .matches = {
716 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
717 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
718 + },
719 + },
720 + {
721 + .callback = init_set_sci_en_on_resume,
722 + .ident = "Lenovo ThinkPad X201",
723 + .matches = {
724 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
725 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
726 + },
727 + },
728 + {
729 .callback = init_old_suspend_ordering,
730 .ident = "Panasonic CF51-2L",
731 .matches = {
732 @@ -458,6 +578,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
733 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
734 },
735 },
736 + {
737 + .callback = init_set_sci_en_on_resume,
738 + .ident = "Dell Studio 1558",
739 + .matches = {
740 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
741 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
742 + },
743 + },
744 + {
745 + .callback = init_set_sci_en_on_resume,
746 + .ident = "Dell Studio 1557",
747 + .matches = {
748 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
749 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
750 + },
751 + },
752 + {
753 + .callback = init_set_sci_en_on_resume,
754 + .ident = "Dell Studio 1555",
755 + .matches = {
756 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
757 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
758 + },
759 + },
760 {},
761 };
762 #endif /* CONFIG_SUSPEND */
763 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
764 index 9f6cfac..228740f 100644
765 --- a/drivers/ata/libata-eh.c
766 +++ b/drivers/ata/libata-eh.c
767 @@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
768 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
769 {
770 struct ata_port *ap = qc->ap;
771 + struct request_queue *q = qc->scsicmd->device->request_queue;
772 + unsigned long flags;
773
774 WARN_ON(!ap->ops->error_handler);
775
776 @@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
777 * Note that ATA_QCFLAG_FAILED is unconditionally set after
778 * this function completes.
779 */
780 + spin_lock_irqsave(q->queue_lock, flags);
781 blk_abort_request(qc->scsicmd->request);
782 + spin_unlock_irqrestore(q->queue_lock, flags);
783 }
784
785 /**
786 @@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
787 }
788
789 /* okay, this error is ours */
790 + memset(&tf, 0, sizeof(tf));
791 rc = ata_eh_read_log_10h(dev, &tag, &tf);
792 if (rc) {
793 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
794 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
795 index bd02505..d7d77d4 100644
796 --- a/drivers/base/memory.c
797 +++ b/drivers/base/memory.c
798 @@ -311,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
799 static ssize_t
800 print_block_size(struct class *class, char *buf)
801 {
802 - return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
803 + return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
804 }
805
806 static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
807 diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
808 index 2fb3a48..4b66c69 100644
809 --- a/drivers/char/agp/Kconfig
810 +++ b/drivers/char/agp/Kconfig
811 @@ -57,7 +57,7 @@ config AGP_AMD
812
813 config AGP_AMD64
814 tristate "AMD Opteron/Athlon64 on-CPU GART support"
815 - depends on AGP && X86
816 + depends on AGP && X86 && K8_NB
817 help
818 This option gives you AGP support for the GLX component of
819 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
820 diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
821 index 73655ae..f8e57c6 100644
822 --- a/drivers/cpuidle/governors/menu.c
823 +++ b/drivers/cpuidle/governors/menu.c
824 @@ -101,7 +101,6 @@ struct menu_device {
825
826 unsigned int expected_us;
827 u64 predicted_us;
828 - unsigned int measured_us;
829 unsigned int exit_us;
830 unsigned int bucket;
831 u64 correction_factor[BUCKETS];
832 @@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev)
833 int i;
834 int multiplier;
835
836 - data->last_state_idx = 0;
837 - data->exit_us = 0;
838 -
839 if (data->needs_update) {
840 menu_update(dev);
841 data->needs_update = 0;
842 }
843
844 + data->last_state_idx = 0;
845 + data->exit_us = 0;
846 +
847 /* Special case when user has set very strict latency requirement */
848 if (unlikely(latency_req == 0))
849 return 0;
850 @@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev)
851 new_factor = data->correction_factor[data->bucket]
852 * (DECAY - 1) / DECAY;
853
854 - if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
855 + if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
856 new_factor += RESOLUTION * measured_us / data->expected_us;
857 else
858 /*
859 diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
860 index f5b6d9f..97e64bc 100644
861 --- a/drivers/edac/edac_mce_amd.c
862 +++ b/drivers/edac/edac_mce_amd.c
863 @@ -294,7 +294,6 @@ wrong_ls_mce:
864 void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
865 {
866 u32 ec = ERROR_CODE(regs->nbsl);
867 - u32 xec = EXT_ERROR_CODE(regs->nbsl);
868
869 if (!handle_errors)
870 return;
871 @@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
872 pr_cont("\n");
873 }
874
875 - pr_emerg("%s.\n", EXT_ERR_MSG(xec));
876 + pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
877
878 if (BUS_ERROR(ec) && nb_bus_decoder)
879 nb_bus_decoder(node_id, regs);
880 @@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
881 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
882
883 /* do the two bits[14:13] together */
884 - ecc = m->status & (3ULL << 45);
885 + ecc = (m->status >> 45) & 0x3;
886 if (ecc)
887 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
888
889 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
890 index a894ade..1372796 100644
891 --- a/drivers/gpu/drm/i915/i915_debugfs.c
892 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
893 @@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
894 struct drm_device *dev = node->minor->dev;
895 drm_i915_private_t *dev_priv = dev->dev_private;
896
897 - if (!IS_IRONLAKE(dev)) {
898 + if (!HAS_PCH_SPLIT(dev)) {
899 seq_printf(m, "Interrupt enable: %08x\n",
900 I915_READ(IER));
901 seq_printf(m, "Interrupt identity: %08x\n",
902 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
903 index 2307f98..d642efd 100644
904 --- a/drivers/gpu/drm/i915/i915_dma.c
905 +++ b/drivers/gpu/drm/i915/i915_dma.c
906 @@ -978,15 +978,21 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
907 * Some of the preallocated space is taken by the GTT
908 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
909 */
910 - if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
911 + if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
912 overhead = 4096;
913 else
914 overhead = (*aperture_size / 1024) + 4096;
915
916 switch (tmp & INTEL_GMCH_GMS_MASK) {
917 case INTEL_855_GMCH_GMS_DISABLED:
918 - DRM_ERROR("video memory is disabled\n");
919 - return -1;
920 + /* XXX: This is what my A1 silicon has. */
921 + if (IS_GEN6(dev)) {
922 + stolen = 64 * 1024 * 1024;
923 + } else {
924 + DRM_ERROR("video memory is disabled\n");
925 + return -1;
926 + }
927 + break;
928 case INTEL_855_GMCH_GMS_STOLEN_1M:
929 stolen = 1 * 1024 * 1024;
930 break;
931 @@ -1064,7 +1070,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
932 int gtt_offset, gtt_size;
933
934 if (IS_I965G(dev)) {
935 - if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
936 + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
937 gtt_offset = 2*1024*1024;
938 gtt_size = 2*1024*1024;
939 } else {
940 @@ -1445,7 +1451,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
941
942 dev->driver->get_vblank_counter = i915_get_vblank_counter;
943 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
944 - if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
945 + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
946 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
947 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
948 }
949 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
950 index b99b6a8..16ce3ba 100644
951 --- a/drivers/gpu/drm/i915/i915_drv.h
952 +++ b/drivers/gpu/drm/i915/i915_drv.h
953 @@ -1026,7 +1026,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
954 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
955 #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
956 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
957 -#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
958 +#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
959 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
960 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
961 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
962 @@ -1045,8 +1045,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
963 #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
964 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
965
966 +#define IS_GEN3(dev) (IS_I915G(dev) || \
967 + IS_I915GM(dev) || \
968 + IS_I945G(dev) || \
969 + IS_I945GM(dev) || \
970 + IS_G33(dev) || \
971 + IS_PINEVIEW(dev))
972 +#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
973 + (dev)->pci_device == 0x2982 || \
974 + (dev)->pci_device == 0x2992 || \
975 + (dev)->pci_device == 0x29A2 || \
976 + (dev)->pci_device == 0x2A02 || \
977 + (dev)->pci_device == 0x2A12 || \
978 + (dev)->pci_device == 0x2E02 || \
979 + (dev)->pci_device == 0x2E12 || \
980 + (dev)->pci_device == 0x2E22 || \
981 + (dev)->pci_device == 0x2E32 || \
982 + (dev)->pci_device == 0x2A42 || \
983 + (dev)->pci_device == 0x2E42)
984 +
985 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
986
987 +#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
988 +
989 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
990 * rows, which changed the alignment requirements and fence programming.
991 */
992 @@ -1067,6 +1088,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
993 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
994 #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
995
996 +#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
997 + IS_GEN6(dev))
998 +
999 #define PRIMARY_RINGBUFFER_SIZE (128*1024)
1000
1001 #endif
1002 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1003 index fd099a1..6458400 100644
1004 --- a/drivers/gpu/drm/i915/i915_gem.c
1005 +++ b/drivers/gpu/drm/i915/i915_gem.c
1006 @@ -1819,7 +1819,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1007 return -EIO;
1008
1009 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1010 - if (IS_IRONLAKE(dev))
1011 + if (HAS_PCH_SPLIT(dev))
1012 ier = I915_READ(DEIER) | I915_READ(GTIER);
1013 else
1014 ier = I915_READ(IER);
1015 @@ -2316,6 +2316,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1016 pitch_val = obj_priv->stride / tile_width;
1017 pitch_val = ffs(pitch_val) - 1;
1018
1019 + if (obj_priv->tiling_mode == I915_TILING_Y &&
1020 + HAS_128_BYTE_Y_TILING(dev))
1021 + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
1022 + else
1023 + WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
1024 +
1025 val = obj_priv->gtt_offset;
1026 if (obj_priv->tiling_mode == I915_TILING_Y)
1027 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1028 diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
1029 index df278b2..040e80c 100644
1030 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
1031 +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
1032 @@ -209,7 +209,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
1033 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
1034 bool need_disable;
1035
1036 - if (IS_IRONLAKE(dev)) {
1037 + if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1038 /* On Ironlake whatever DRAM config, GPU always do
1039 * same swizzling setup.
1040 */
1041 @@ -357,21 +357,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
1042 * reg, so dont bother to check the size */
1043 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
1044 return false;
1045 - } else if (IS_I9XX(dev)) {
1046 - uint32_t pitch_val = ffs(stride / tile_width) - 1;
1047 -
1048 - /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
1049 - * instead of 4 (2KB) on 945s.
1050 - */
1051 - if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
1052 - size > (I830_FENCE_MAX_SIZE_VAL << 20))
1053 + } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
1054 + if (stride > 8192)
1055 return false;
1056 - } else {
1057 - uint32_t pitch_val = ffs(stride / tile_width) - 1;
1058
1059 - if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
1060 - size > (I830_FENCE_MAX_SIZE_VAL << 19))
1061 - return false;
1062 + if (IS_GEN3(dev)) {
1063 + if (size > I830_FENCE_MAX_SIZE_VAL << 20)
1064 + return false;
1065 + } else {
1066 + if (size > I830_FENCE_MAX_SIZE_VAL << 19)
1067 + return false;
1068 + }
1069 }
1070
1071 /* 965+ just needs multiples of tile width */
1072 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1073 index a17d6bd..032f667 100644
1074 --- a/drivers/gpu/drm/i915/i915_irq.c
1075 +++ b/drivers/gpu/drm/i915/i915_irq.c
1076 @@ -576,7 +576,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1077
1078 atomic_inc(&dev_priv->irq_received);
1079
1080 - if (IS_IRONLAKE(dev))
1081 + if (HAS_PCH_SPLIT(dev))
1082 return ironlake_irq_handler(dev);
1083
1084 iir = I915_READ(IIR);
1085 @@ -737,7 +737,7 @@ void i915_user_irq_get(struct drm_device *dev)
1086
1087 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1088 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1089 - if (IS_IRONLAKE(dev))
1090 + if (HAS_PCH_SPLIT(dev))
1091 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1092 else
1093 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1094 @@ -753,7 +753,7 @@ void i915_user_irq_put(struct drm_device *dev)
1095 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1096 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1097 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1098 - if (IS_IRONLAKE(dev))
1099 + if (HAS_PCH_SPLIT(dev))
1100 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
1101 else
1102 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1103 @@ -861,7 +861,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1104 return -EINVAL;
1105
1106 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1107 - if (IS_IRONLAKE(dev))
1108 + if (HAS_PCH_SPLIT(dev))
1109 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1110 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1111 else if (IS_I965G(dev))
1112 @@ -883,7 +883,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1113 unsigned long irqflags;
1114
1115 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1116 - if (IS_IRONLAKE(dev))
1117 + if (HAS_PCH_SPLIT(dev))
1118 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1119 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1120 else
1121 @@ -897,7 +897,7 @@ void i915_enable_interrupt (struct drm_device *dev)
1122 {
1123 struct drm_i915_private *dev_priv = dev->dev_private;
1124
1125 - if (!IS_IRONLAKE(dev))
1126 + if (!HAS_PCH_SPLIT(dev))
1127 opregion_enable_asle(dev);
1128 dev_priv->irq_enabled = 1;
1129 }
1130 @@ -1076,7 +1076,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1131 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1132 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1133
1134 - if (IS_IRONLAKE(dev)) {
1135 + if (HAS_PCH_SPLIT(dev)) {
1136 ironlake_irq_preinstall(dev);
1137 return;
1138 }
1139 @@ -1108,7 +1108,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1140
1141 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1142
1143 - if (IS_IRONLAKE(dev))
1144 + if (HAS_PCH_SPLIT(dev))
1145 return ironlake_irq_postinstall(dev);
1146
1147 /* Unmask the interrupts that we always want on. */
1148 @@ -1196,7 +1196,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1149
1150 dev_priv->vblank_pipe = 0;
1151
1152 - if (IS_IRONLAKE(dev)) {
1153 + if (HAS_PCH_SPLIT(dev)) {
1154 ironlake_irq_uninstall(dev);
1155 return;
1156 }
1157 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1158 index ab1bd2d..fd95bdf 100644
1159 --- a/drivers/gpu/drm/i915/i915_reg.h
1160 +++ b/drivers/gpu/drm/i915/i915_reg.h
1161 @@ -221,7 +221,7 @@
1162 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
1163 #define I830_FENCE_PITCH_SHIFT 4
1164 #define I830_FENCE_REG_VALID (1<<0)
1165 -#define I915_FENCE_MAX_PITCH_VAL 0x10
1166 +#define I915_FENCE_MAX_PITCH_VAL 4
1167 #define I830_FENCE_MAX_PITCH_VAL 6
1168 #define I830_FENCE_MAX_SIZE_VAL (1<<8)
1169
1170 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1171 index 15fbc1b..70c9d4b 100644
1172 --- a/drivers/gpu/drm/i915/intel_bios.c
1173 +++ b/drivers/gpu/drm/i915/intel_bios.c
1174 @@ -247,6 +247,7 @@ static void
1175 parse_general_features(struct drm_i915_private *dev_priv,
1176 struct bdb_header *bdb)
1177 {
1178 + struct drm_device *dev = dev_priv->dev;
1179 struct bdb_general_features *general;
1180
1181 /* Set sensible defaults in case we can't find the general block */
1182 @@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
1183 if (IS_I85X(dev_priv->dev))
1184 dev_priv->lvds_ssc_freq =
1185 general->ssc_freq ? 66 : 48;
1186 - else if (IS_IRONLAKE(dev_priv->dev))
1187 + else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
1188 dev_priv->lvds_ssc_freq =
1189 general->ssc_freq ? 100 : 120;
1190 else
1191 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1192 index 79dd402..fccf074 100644
1193 --- a/drivers/gpu/drm/i915/intel_crt.c
1194 +++ b/drivers/gpu/drm/i915/intel_crt.c
1195 @@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
1196 struct drm_i915_private *dev_priv = dev->dev_private;
1197 u32 temp, reg;
1198
1199 - if (IS_IRONLAKE(dev))
1200 + if (HAS_PCH_SPLIT(dev))
1201 reg = PCH_ADPA;
1202 else
1203 reg = ADPA;
1204 @@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1205 else
1206 dpll_md_reg = DPLL_B_MD;
1207
1208 - if (IS_IRONLAKE(dev))
1209 + if (HAS_PCH_SPLIT(dev))
1210 adpa_reg = PCH_ADPA;
1211 else
1212 adpa_reg = ADPA;
1213 @@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1214 * Disable separate mode multiplier used when cloning SDVO to CRT
1215 * XXX this needs to be adjusted when we really are cloning
1216 */
1217 - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
1218 + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
1219 dpll_md = I915_READ(dpll_md_reg);
1220 I915_WRITE(dpll_md_reg,
1221 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
1222 @@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
1223
1224 if (intel_crtc->pipe == 0) {
1225 adpa |= ADPA_PIPE_A_SELECT;
1226 - if (!IS_IRONLAKE(dev))
1227 + if (!HAS_PCH_SPLIT(dev))
1228 I915_WRITE(BCLRPAT_A, 0);
1229 } else {
1230 adpa |= ADPA_PIPE_B_SELECT;
1231 - if (!IS_IRONLAKE(dev))
1232 + if (!HAS_PCH_SPLIT(dev))
1233 I915_WRITE(BCLRPAT_B, 0);
1234 }
1235
1236 @@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
1237 u32 hotplug_en;
1238 int i, tries = 0;
1239
1240 - if (IS_IRONLAKE(dev))
1241 + if (HAS_PCH_SPLIT(dev))
1242 return intel_ironlake_crt_detect_hotplug(connector);
1243
1244 /*
1245 @@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
1246 &intel_output->enc);
1247
1248 /* Set up the DDC bus. */
1249 - if (IS_IRONLAKE(dev))
1250 + if (HAS_PCH_SPLIT(dev))
1251 i2c_reg = PCH_GPIOA;
1252 else {
1253 i2c_reg = GPIOA;
1254 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1255 index b27202d..4b2458d 100644
1256 --- a/drivers/gpu/drm/i915/intel_display.c
1257 +++ b/drivers/gpu/drm/i915/intel_display.c
1258 @@ -232,7 +232,7 @@ struct intel_limit {
1259 #define G4X_P2_DISPLAY_PORT_FAST 10
1260 #define G4X_P2_DISPLAY_PORT_LIMIT 0
1261
1262 -/* Ironlake */
1263 +/* Ironlake / Sandybridge */
1264 /* as we calculate clock using (register_value + 2) for
1265 N/M1/M2, so here the range value for them is (actual_value-2).
1266 */
1267 @@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
1268 struct drm_device *dev = crtc->dev;
1269 const intel_limit_t *limit;
1270
1271 - if (IS_IRONLAKE(dev))
1272 + if (HAS_PCH_SPLIT(dev))
1273 limit = intel_ironlake_limit(crtc);
1274 else if (IS_G4X(dev)) {
1275 limit = intel_g4x_limit(crtc);
1276 @@ -1366,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1277 dspcntr &= ~DISPPLANE_TILED;
1278 }
1279
1280 - if (IS_IRONLAKE(dev))
1281 + if (HAS_PCH_SPLIT(dev))
1282 /* must disable */
1283 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1284
1285 @@ -1427,7 +1427,7 @@ static void i915_disable_vga (struct drm_device *dev)
1286 u8 sr1;
1287 u32 vga_reg;
1288
1289 - if (IS_IRONLAKE(dev))
1290 + if (HAS_PCH_SPLIT(dev))
1291 vga_reg = CPU_VGACNTRL;
1292 else
1293 vga_reg = VGACNTRL;
1294 @@ -2111,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1295 struct drm_display_mode *adjusted_mode)
1296 {
1297 struct drm_device *dev = crtc->dev;
1298 - if (IS_IRONLAKE(dev)) {
1299 + if (HAS_PCH_SPLIT(dev)) {
1300 /* FDI link clock is fixed at 2.7G */
1301 if (mode->clock * 3 > 27000 * 4)
1302 return MODE_CLOCK_HIGH;
1303 @@ -2967,7 +2967,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1304 refclk / 1000);
1305 } else if (IS_I9XX(dev)) {
1306 refclk = 96000;
1307 - if (IS_IRONLAKE(dev))
1308 + if (HAS_PCH_SPLIT(dev))
1309 refclk = 120000; /* 120Mhz refclk */
1310 } else {
1311 refclk = 48000;
1312 @@ -3025,7 +3025,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1313 }
1314
1315 /* FDI link */
1316 - if (IS_IRONLAKE(dev)) {
1317 + if (HAS_PCH_SPLIT(dev)) {
1318 int lane, link_bw, bpp;
1319 /* eDP doesn't require FDI link, so just set DP M/N
1320 according to current link config */
1321 @@ -3102,7 +3102,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1322 * PCH B stepping, previous chipset stepping should be
1323 * ignoring this setting.
1324 */
1325 - if (IS_IRONLAKE(dev)) {
1326 + if (HAS_PCH_SPLIT(dev)) {
1327 temp = I915_READ(PCH_DREF_CONTROL);
1328 /* Always enable nonspread source */
1329 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1330 @@ -3149,7 +3149,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1331 reduced_clock.m2;
1332 }
1333
1334 - if (!IS_IRONLAKE(dev))
1335 + if (!HAS_PCH_SPLIT(dev))
1336 dpll = DPLL_VGA_MODE_DIS;
1337
1338 if (IS_I9XX(dev)) {
1339 @@ -3162,7 +3162,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1340 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1341 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1342 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
1343 - else if (IS_IRONLAKE(dev))
1344 + else if (HAS_PCH_SPLIT(dev))
1345 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1346 }
1347 if (is_dp)
1348 @@ -3174,7 +3174,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1349 else {
1350 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1351 /* also FPA1 */
1352 - if (IS_IRONLAKE(dev))
1353 + if (HAS_PCH_SPLIT(dev))
1354 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1355 if (IS_G4X(dev) && has_reduced_clock)
1356 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1357 @@ -3193,7 +3193,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1358 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1359 break;
1360 }
1361 - if (IS_I965G(dev) && !IS_IRONLAKE(dev))
1362 + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
1363 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1364 } else {
1365 if (is_lvds) {
1366 @@ -3227,7 +3227,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1367
1368 /* Ironlake's plane is forced to pipe, bit 24 is to
1369 enable color space conversion */
1370 - if (!IS_IRONLAKE(dev)) {
1371 + if (!HAS_PCH_SPLIT(dev)) {
1372 if (pipe == 0)
1373 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
1374 else
1375 @@ -3254,14 +3254,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1376
1377
1378 /* Disable the panel fitter if it was on our pipe */
1379 - if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
1380 + if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
1381 I915_WRITE(PFIT_CONTROL, 0);
1382
1383 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
1384 drm_mode_debug_printmodeline(mode);
1385
1386 /* assign to Ironlake registers */
1387 - if (IS_IRONLAKE(dev)) {
1388 + if (HAS_PCH_SPLIT(dev)) {
1389 fp_reg = pch_fp_reg;
1390 dpll_reg = pch_dpll_reg;
1391 }
1392 @@ -3282,7 +3282,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1393 if (is_lvds) {
1394 u32 lvds;
1395
1396 - if (IS_IRONLAKE(dev))
1397 + if (HAS_PCH_SPLIT(dev))
1398 lvds_reg = PCH_LVDS;
1399
1400 lvds = I915_READ(lvds_reg);
1401 @@ -3328,7 +3328,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1402 /* Wait for the clocks to stabilize. */
1403 udelay(150);
1404
1405 - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
1406 + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
1407 if (is_sdvo) {
1408 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1409 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1410 @@ -3375,14 +3375,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1411 /* pipesrc and dspsize control the size that is scaled from, which should
1412 * always be the user's requested size.
1413 */
1414 - if (!IS_IRONLAKE(dev)) {
1415 + if (!HAS_PCH_SPLIT(dev)) {
1416 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
1417 (mode->hdisplay - 1));
1418 I915_WRITE(dsppos_reg, 0);
1419 }
1420 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
1421
1422 - if (IS_IRONLAKE(dev)) {
1423 + if (HAS_PCH_SPLIT(dev)) {
1424 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
1425 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
1426 I915_WRITE(link_m1_reg, m_n.link_m);
1427 @@ -3403,7 +3403,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1428
1429 intel_wait_for_vblank(dev);
1430
1431 - if (IS_IRONLAKE(dev)) {
1432 + if (HAS_PCH_SPLIT(dev)) {
1433 /* enable address swizzle for tiling buffer */
1434 temp = I915_READ(DISP_ARB_CTL);
1435 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
1436 @@ -3438,7 +3438,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
1437 return;
1438
1439 /* use legacy palette for Ironlake */
1440 - if (IS_IRONLAKE(dev))
1441 + if (HAS_PCH_SPLIT(dev))
1442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
1443 LGC_PALETTE_B;
1444
1445 @@ -3922,7 +3922,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
1446 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1447 int dpll = I915_READ(dpll_reg);
1448
1449 - if (IS_IRONLAKE(dev))
1450 + if (HAS_PCH_SPLIT(dev))
1451 return;
1452
1453 if (!dev_priv->lvds_downclock_avail)
1454 @@ -3961,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
1455 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
1456 int dpll = I915_READ(dpll_reg);
1457
1458 - if (IS_IRONLAKE(dev))
1459 + if (HAS_PCH_SPLIT(dev))
1460 return;
1461
1462 if (!dev_priv->lvds_downclock_avail)
1463 @@ -4382,7 +4382,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1464 if (IS_MOBILE(dev) && !IS_I830(dev))
1465 intel_lvds_init(dev);
1466
1467 - if (IS_IRONLAKE(dev)) {
1468 + if (HAS_PCH_SPLIT(dev)) {
1469 int found;
1470
1471 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
1472 @@ -4451,7 +4451,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1473 DRM_DEBUG_KMS("probing DP_D\n");
1474 intel_dp_init(dev, DP_D);
1475 }
1476 - } else if (IS_I8XX(dev))
1477 + } else if (IS_GEN2(dev))
1478 intel_dvo_init(dev);
1479
1480 if (SUPPORTS_TV(dev))
1481 @@ -4599,7 +4599,7 @@ void intel_init_clock_gating(struct drm_device *dev)
1482 * Disable clock gating reported to work incorrectly according to the
1483 * specs, but enable as much else as we can.
1484 */
1485 - if (IS_IRONLAKE(dev)) {
1486 + if (HAS_PCH_SPLIT(dev)) {
1487 return;
1488 } else if (IS_G4X(dev)) {
1489 uint32_t dspclk_gate;
1490 @@ -4672,7 +4672,7 @@ static void intel_init_display(struct drm_device *dev)
1491 struct drm_i915_private *dev_priv = dev->dev_private;
1492
1493 /* We always want a DPMS function */
1494 - if (IS_IRONLAKE(dev))
1495 + if (HAS_PCH_SPLIT(dev))
1496 dev_priv->display.dpms = ironlake_crtc_dpms;
1497 else
1498 dev_priv->display.dpms = i9xx_crtc_dpms;
1499 @@ -4715,7 +4715,7 @@ static void intel_init_display(struct drm_device *dev)
1500 i830_get_display_clock_speed;
1501
1502 /* For FIFO watermark updates */
1503 - if (IS_IRONLAKE(dev))
1504 + if (HAS_PCH_SPLIT(dev))
1505 dev_priv->display.update_wm = NULL;
1506 else if (IS_G4X(dev))
1507 dev_priv->display.update_wm = g4x_update_wm;
1508 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1509 index 1238bc9..66df0c3 100644
1510 --- a/drivers/gpu/drm/i915/intel_lvds.c
1511 +++ b/drivers/gpu/drm/i915/intel_lvds.c
1512 @@ -661,7 +661,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
1513 /* ACPI lid methods were generally unreliable in this generation, so
1514 * don't even bother.
1515 */
1516 - if (IS_I8XX(dev))
1517 + if (IS_GEN2(dev))
1518 return connector_status_connected;
1519
1520 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
1521 diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
1522 index 63f569b..6b89042 100644
1523 --- a/drivers/gpu/drm/i915/intel_overlay.c
1524 +++ b/drivers/gpu/drm/i915/intel_overlay.c
1525 @@ -172,7 +172,7 @@ struct overlay_registers {
1526 #define OFC_UPDATE 0x1
1527
1528 #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
1529 -#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
1530 +#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
1531
1532
1533 static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1534 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
1535 index 10be7b5..855911e 100644
1536 --- a/drivers/i2c/i2c-core.c
1537 +++ b/drivers/i2c/i2c-core.c
1538 @@ -1210,12 +1210,23 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1539 return 0;
1540
1541 /* Make sure there is something at this address */
1542 - if (i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) < 0)
1543 - return 0;
1544 + if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
1545 + /* Special probe for FSC hwmon chips */
1546 + union i2c_smbus_data dummy;
1547
1548 - /* Prevent 24RF08 corruption */
1549 - if ((addr & ~0x0f) == 0x50)
1550 - i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL);
1551 + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
1552 + I2C_SMBUS_BYTE_DATA, &dummy) < 0)
1553 + return 0;
1554 + } else {
1555 + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1556 + I2C_SMBUS_QUICK, NULL) < 0)
1557 + return 0;
1558 +
1559 + /* Prevent 24RF08 corruption */
1560 + if ((addr & ~0x0f) == 0x50)
1561 + i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1562 + I2C_SMBUS_QUICK, NULL);
1563 + }
1564
1565 /* Finally call the custom detection function */
1566 memset(&info, 0, sizeof(struct i2c_board_info));
1567 diff --git a/drivers/md/md.c b/drivers/md/md.c
1568 index a20a71e..2ecd1d5 100644
1569 --- a/drivers/md/md.c
1570 +++ b/drivers/md/md.c
1571 @@ -2108,12 +2108,18 @@ repeat:
1572 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1573 /* .. if the array isn't clean, an 'even' event must also go
1574 * to spares. */
1575 - if ((mddev->events&1)==0)
1576 + if ((mddev->events&1)==0) {
1577 nospares = 0;
1578 + sync_req = 2; /* force a second update to get the
1579 + * even/odd in sync */
1580 + }
1581 } else {
1582 /* otherwise an 'odd' event must go to spares */
1583 - if ((mddev->events&1))
1584 + if ((mddev->events&1)) {
1585 nospares = 0;
1586 + sync_req = 2; /* force a second update to get the
1587 + * even/odd in sync */
1588 + }
1589 }
1590 }
1591
1592 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1593 index ceb24af..0468f5b 100644
1594 --- a/drivers/md/raid5.c
1595 +++ b/drivers/md/raid5.c
1596 @@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1597
1598 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1599 atomic_inc(&rdev->read_errors);
1600 - if (conf->mddev->degraded)
1601 + if (conf->mddev->degraded >= conf->max_degraded)
1602 printk_rl(KERN_WARNING
1603 "raid5:%s: read error not correctable "
1604 "(sector %llu on %s).\n",
1605 @@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1606 int previous, int *dd_idx,
1607 struct stripe_head *sh)
1608 {
1609 - long stripe;
1610 - unsigned long chunk_number;
1611 + sector_t stripe, stripe2;
1612 + sector_t chunk_number;
1613 unsigned int chunk_offset;
1614 int pd_idx, qd_idx;
1615 int ddf_layout = 0;
1616 @@ -1670,18 +1670,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1617 */
1618 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1619 chunk_number = r_sector;
1620 - BUG_ON(r_sector != chunk_number);
1621
1622 /*
1623 * Compute the stripe number
1624 */
1625 - stripe = chunk_number / data_disks;
1626 -
1627 - /*
1628 - * Compute the data disk and parity disk indexes inside the stripe
1629 - */
1630 - *dd_idx = chunk_number % data_disks;
1631 -
1632 + stripe = chunk_number;
1633 + *dd_idx = sector_div(stripe, data_disks);
1634 + stripe2 = stripe;
1635 /*
1636 * Select the parity disk based on the user selected algorithm.
1637 */
1638 @@ -1693,21 +1688,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1639 case 5:
1640 switch (algorithm) {
1641 case ALGORITHM_LEFT_ASYMMETRIC:
1642 - pd_idx = data_disks - stripe % raid_disks;
1643 + pd_idx = data_disks - sector_div(stripe2, raid_disks);
1644 if (*dd_idx >= pd_idx)
1645 (*dd_idx)++;
1646 break;
1647 case ALGORITHM_RIGHT_ASYMMETRIC:
1648 - pd_idx = stripe % raid_disks;
1649 + pd_idx = sector_div(stripe2, raid_disks);
1650 if (*dd_idx >= pd_idx)
1651 (*dd_idx)++;
1652 break;
1653 case ALGORITHM_LEFT_SYMMETRIC:
1654 - pd_idx = data_disks - stripe % raid_disks;
1655 + pd_idx = data_disks - sector_div(stripe2, raid_disks);
1656 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1657 break;
1658 case ALGORITHM_RIGHT_SYMMETRIC:
1659 - pd_idx = stripe % raid_disks;
1660 + pd_idx = sector_div(stripe2, raid_disks);
1661 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1662 break;
1663 case ALGORITHM_PARITY_0:
1664 @@ -1727,7 +1722,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1665
1666 switch (algorithm) {
1667 case ALGORITHM_LEFT_ASYMMETRIC:
1668 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
1669 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1670 qd_idx = pd_idx + 1;
1671 if (pd_idx == raid_disks-1) {
1672 (*dd_idx)++; /* Q D D D P */
1673 @@ -1736,7 +1731,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1674 (*dd_idx) += 2; /* D D P Q D */
1675 break;
1676 case ALGORITHM_RIGHT_ASYMMETRIC:
1677 - pd_idx = stripe % raid_disks;
1678 + pd_idx = sector_div(stripe2, raid_disks);
1679 qd_idx = pd_idx + 1;
1680 if (pd_idx == raid_disks-1) {
1681 (*dd_idx)++; /* Q D D D P */
1682 @@ -1745,12 +1740,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1683 (*dd_idx) += 2; /* D D P Q D */
1684 break;
1685 case ALGORITHM_LEFT_SYMMETRIC:
1686 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
1687 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1688 qd_idx = (pd_idx + 1) % raid_disks;
1689 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1690 break;
1691 case ALGORITHM_RIGHT_SYMMETRIC:
1692 - pd_idx = stripe % raid_disks;
1693 + pd_idx = sector_div(stripe2, raid_disks);
1694 qd_idx = (pd_idx + 1) % raid_disks;
1695 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1696 break;
1697 @@ -1769,7 +1764,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1698 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1699 * of blocks for computing Q is different.
1700 */
1701 - pd_idx = stripe % raid_disks;
1702 + pd_idx = sector_div(stripe2, raid_disks);
1703 qd_idx = pd_idx + 1;
1704 if (pd_idx == raid_disks-1) {
1705 (*dd_idx)++; /* Q D D D P */
1706 @@ -1784,7 +1779,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1707 * D D D P Q rather than
1708 * Q D D D P
1709 */
1710 - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1711 + stripe2 += 1;
1712 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1713 qd_idx = pd_idx + 1;
1714 if (pd_idx == raid_disks-1) {
1715 (*dd_idx)++; /* Q D D D P */
1716 @@ -1796,7 +1792,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1717
1718 case ALGORITHM_ROTATING_N_CONTINUE:
1719 /* Same as left_symmetric but Q is before P */
1720 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
1721 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1722 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1723 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1724 ddf_layout = 1;
1725 @@ -1804,27 +1800,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1726
1727 case ALGORITHM_LEFT_ASYMMETRIC_6:
1728 /* RAID5 left_asymmetric, with Q on last device */
1729 - pd_idx = data_disks - stripe % (raid_disks-1);
1730 + pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1731 if (*dd_idx >= pd_idx)
1732 (*dd_idx)++;
1733 qd_idx = raid_disks - 1;
1734 break;
1735
1736 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1737 - pd_idx = stripe % (raid_disks-1);
1738 + pd_idx = sector_div(stripe2, raid_disks-1);
1739 if (*dd_idx >= pd_idx)
1740 (*dd_idx)++;
1741 qd_idx = raid_disks - 1;
1742 break;
1743
1744 case ALGORITHM_LEFT_SYMMETRIC_6:
1745 - pd_idx = data_disks - stripe % (raid_disks-1);
1746 + pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1747 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1748 qd_idx = raid_disks - 1;
1749 break;
1750
1751 case ALGORITHM_RIGHT_SYMMETRIC_6:
1752 - pd_idx = stripe % (raid_disks-1);
1753 + pd_idx = sector_div(stripe2, raid_disks-1);
1754 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1755 qd_idx = raid_disks - 1;
1756 break;
1757 @@ -1869,14 +1865,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1758 : conf->algorithm;
1759 sector_t stripe;
1760 int chunk_offset;
1761 - int chunk_number, dummy1, dd_idx = i;
1762 + sector_t chunk_number;
1763 + int dummy1, dd_idx = i;
1764 sector_t r_sector;
1765 struct stripe_head sh2;
1766
1767
1768 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1769 stripe = new_sector;
1770 - BUG_ON(new_sector != stripe);
1771
1772 if (i == sh->pd_idx)
1773 return 0;
1774 @@ -1969,7 +1965,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1775 }
1776
1777 chunk_number = stripe * data_disks + i;
1778 - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1779 + r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1780
1781 check = raid5_compute_sector(conf, r_sector,
1782 previous, &dummy1, &sh2);
1783 diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
1784 index e48380c..95a463c 100644
1785 --- a/drivers/media/dvb/ttpci/budget.c
1786 +++ b/drivers/media/dvb/ttpci/budget.c
1787 @@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget)
1788 &budget->i2c_adap,
1789 &tt1600_isl6423_config);
1790
1791 - } else {
1792 - dvb_frontend_detach(budget->dvb_frontend);
1793 - budget->dvb_frontend = NULL;
1794 }
1795 }
1796 break;
1797 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
1798 index 65df1de..a555c90 100644
1799 --- a/drivers/net/bnx2.c
1800 +++ b/drivers/net/bnx2.c
1801 @@ -4772,8 +4772,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
1802 rc = bnx2_alloc_bad_rbuf(bp);
1803 }
1804
1805 - if (bp->flags & BNX2_FLAG_USING_MSIX)
1806 + if (bp->flags & BNX2_FLAG_USING_MSIX) {
1807 bnx2_setup_msix_tbl(bp);
1808 + /* Prevent MSIX table reads and write from timing out */
1809 + REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
1810 + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
1811 + }
1812
1813 return rc;
1814 }
1815 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1816 index 3db85da..787befc 100644
1817 --- a/drivers/net/r8169.c
1818 +++ b/drivers/net/r8169.c
1819 @@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
1820 spin_lock_irq(&tp->lock);
1821
1822 RTL_W8(Cfg9346, Cfg9346_Unlock);
1823 - RTL_W32(MAC0, low);
1824 +
1825 RTL_W32(MAC4, high);
1826 + RTL_R32(MAC4);
1827 +
1828 + RTL_W32(MAC0, low);
1829 + RTL_R32(MAC0);
1830 +
1831 RTL_W8(Cfg9346, Cfg9346_Lock);
1832
1833 spin_unlock_irq(&tp->lock);
1834 @@ -4316,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
1835
1836 tp->cur_tx += frags + 1;
1837
1838 - smp_wmb();
1839 + wmb();
1840
1841 RTL_W8(TxPoll, NPQ); /* set polling bit */
1842
1843 @@ -4675,7 +4680,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
1844 * until it does.
1845 */
1846 tp->intr_mask = 0xffff;
1847 - smp_wmb();
1848 + wmb();
1849 RTL_W16(IntrMask, tp->intr_event);
1850 }
1851
1852 @@ -4813,8 +4818,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
1853 mc_filter[1] = swab32(data);
1854 }
1855
1856 - RTL_W32(MAR0 + 0, mc_filter[0]);
1857 RTL_W32(MAR0 + 4, mc_filter[1]);
1858 + RTL_W32(MAR0 + 0, mc_filter[0]);
1859
1860 RTL_W32(RxConfig, tmp);
1861
1862 diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
1863 index 46997e1..fb52e47 100644
1864 --- a/drivers/net/sfc/efx.c
1865 +++ b/drivers/net/sfc/efx.c
1866 @@ -1862,6 +1862,7 @@ out:
1867 }
1868
1869 if (disabled) {
1870 + dev_close(efx->net_dev);
1871 EFX_ERR(efx, "has been disabled\n");
1872 efx->state = STATE_DISABLED;
1873 } else {
1874 @@ -1885,8 +1886,7 @@ static void efx_reset_work(struct work_struct *data)
1875 }
1876
1877 rtnl_lock();
1878 - if (efx_reset(efx, efx->reset_pending))
1879 - dev_close(efx->net_dev);
1880 + (void)efx_reset(efx, efx->reset_pending);
1881 rtnl_unlock();
1882 }
1883
1884 diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
1885 index 9d009c4..e20a824 100644
1886 --- a/drivers/net/sfc/falcon.c
1887 +++ b/drivers/net/sfc/falcon.c
1888 @@ -1317,7 +1317,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1889
1890 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1891
1892 - falcon_probe_board(efx, board_rev);
1893 + rc = falcon_probe_board(efx, board_rev);
1894 + if (rc)
1895 + goto fail2;
1896
1897 kfree(nvconfig);
1898 return 0;
1899 diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
1900 index 5712fdd..c7a933a 100644
1901 --- a/drivers/net/sfc/falcon_boards.c
1902 +++ b/drivers/net/sfc/falcon_boards.c
1903 @@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
1904 },
1905 };
1906
1907 -static const struct falcon_board_type falcon_dummy_board = {
1908 - .init = efx_port_dummy_op_int,
1909 - .init_phy = efx_port_dummy_op_void,
1910 - .fini = efx_port_dummy_op_void,
1911 - .set_id_led = efx_port_dummy_op_set_id_led,
1912 - .monitor = efx_port_dummy_op_int,
1913 -};
1914 -
1915 -void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1916 +int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1917 {
1918 struct falcon_board *board = falcon_board(efx);
1919 u8 type_id = FALCON_BOARD_TYPE(revision_info);
1920 @@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
1921 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
1922 ? board->type->ref_model : board->type->gen_type,
1923 'A' + board->major, board->minor);
1924 + return 0;
1925 } else {
1926 EFX_ERR(efx, "unknown board type %d\n", type_id);
1927 - board->type = &falcon_dummy_board;
1928 + return -ENODEV;
1929 }
1930 }
1931 diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
1932 index 9351c03..3166baf 100644
1933 --- a/drivers/net/sfc/nic.h
1934 +++ b/drivers/net/sfc/nic.h
1935 @@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
1936 **************************************************************************
1937 */
1938
1939 -extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
1940 +extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
1941
1942 /* TX data path */
1943 extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
1944 diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
1945 index f8c6771..afbac2d 100644
1946 --- a/drivers/net/sfc/siena.c
1947 +++ b/drivers/net/sfc/siena.c
1948 @@ -454,8 +454,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
1949
1950 static void siena_update_nic_stats(struct efx_nic *efx)
1951 {
1952 - while (siena_try_update_nic_stats(efx) == -EAGAIN)
1953 - cpu_relax();
1954 + int retry;
1955 +
1956 + /* If we're unlucky enough to read statistics wduring the DMA, wait
1957 + * up to 10ms for it to finish (typically takes <500us) */
1958 + for (retry = 0; retry < 100; ++retry) {
1959 + if (siena_try_update_nic_stats(efx) == 0)
1960 + return;
1961 + udelay(100);
1962 + }
1963 +
1964 + /* Use the old values instead */
1965 }
1966
1967 static void siena_start_nic_stats(struct efx_nic *efx)
1968 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1969 index 17d1493..8405fb8 100644
1970 --- a/drivers/net/tg3.c
1971 +++ b/drivers/net/tg3.c
1972 @@ -8572,6 +8572,7 @@ static int tg3_test_msi(struct tg3 *tp)
1973 pci_disable_msi(tp->pdev);
1974
1975 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
1976 + tp->napi[0].irq_vec = tp->pdev->irq;
1977
1978 err = tg3_request_irq(tp, 0);
1979 if (err)
1980 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1981 index 2834a01..909b73d 100644
1982 --- a/drivers/net/tun.c
1983 +++ b/drivers/net/tun.c
1984 @@ -380,6 +380,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1985 }
1986 }
1987
1988 + /* Orphan the skb - required as we might hang on to it
1989 + * for indefinite time. */
1990 + skb_orphan(skb);
1991 +
1992 /* Enqueue packet */
1993 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
1994 dev->trans_start = jiffies;
1995 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1996 index 5f3b9ea..8a6e027 100644
1997 --- a/drivers/net/usb/cdc_ether.c
1998 +++ b/drivers/net/usb/cdc_ether.c
1999 @@ -433,6 +433,7 @@ static const struct driver_info mbm_info = {
2000 .bind = cdc_bind,
2001 .unbind = usbnet_cdc_unbind,
2002 .status = cdc_status,
2003 + .manage_power = cdc_manage_power,
2004 };
2005
2006 /*-------------------------------------------------------------------------*/
2007 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
2008 index 3d406f9..c60625b 100644
2009 --- a/drivers/net/usb/dm9601.c
2010 +++ b/drivers/net/usb/dm9601.c
2011 @@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
2012 goto out;
2013
2014 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
2015 - dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
2016 + dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
2017
2018 for (i = 0; i < DM_TIMEOUT; i++) {
2019 u8 tmp;
2020 diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
2021 index b9b9d6b..941f053 100644
2022 --- a/drivers/net/wan/hdlc_ppp.c
2023 +++ b/drivers/net/wan/hdlc_ppp.c
2024 @@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
2025 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
2026 }
2027
2028 +static void ppp_close(struct net_device *dev)
2029 +{
2030 + ppp_tx_flush();
2031 +}
2032 +
2033 static struct hdlc_proto proto = {
2034 .start = ppp_start,
2035 .stop = ppp_stop,
2036 + .close = ppp_close,
2037 .type_trans = ppp_type_trans,
2038 .ioctl = ppp_ioctl,
2039 .netif_rx = ppp_rx,
2040 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2041 index 7b1eab4..e55f718 100644
2042 --- a/drivers/net/wireless/ath/ath9k/main.c
2043 +++ b/drivers/net/wireless/ath/ath9k/main.c
2044 @@ -1358,9 +1358,9 @@ void ath_cleanup(struct ath_softc *sc)
2045 free_irq(sc->irq, sc);
2046 ath_bus_cleanup(common);
2047 kfree(sc->sec_wiphy);
2048 - ieee80211_free_hw(sc->hw);
2049
2050 ath9k_uninit_hw(sc);
2051 + ieee80211_free_hw(sc->hw);
2052 }
2053
2054 static int ath9k_reg_notifier(struct wiphy *wiphy,
2055 diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
2056 index 4bf4c21..41d33cd 100644
2057 --- a/drivers/net/wireless/p54/p54pci.c
2058 +++ b/drivers/net/wireless/p54/p54pci.c
2059 @@ -245,7 +245,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
2060 u32 idx, i;
2061
2062 i = (*index) % ring_limit;
2063 - (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
2064 + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
2065 idx %= ring_limit;
2066
2067 while (i != idx) {
2068 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
2069 index 8742640..b3c4fbd 100644
2070 --- a/drivers/net/wireless/p54/p54usb.c
2071 +++ b/drivers/net/wireless/p54/p54usb.c
2072 @@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
2073 /* Version 1 devices (pci chip + net2280) */
2074 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
2075 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
2076 + {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
2077 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
2078 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
2079 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
2080 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
2081 index b6dda2b..9d147de 100644
2082 --- a/drivers/net/wireless/p54/txrx.c
2083 +++ b/drivers/net/wireless/p54/txrx.c
2084 @@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
2085 struct ieee80211_tx_queue_stats *queue;
2086 unsigned long flags;
2087
2088 - if (WARN_ON(p54_queue > P54_QUEUE_NUM))
2089 + if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
2090 return -EINVAL;
2091
2092 queue = &priv->tx_stats[p54_queue];
2093 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2094 index c4fead1..b8eb5e7 100644
2095 --- a/drivers/pci/pci.c
2096 +++ b/drivers/pci/pci.c
2097 @@ -624,7 +624,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
2098 */
2099 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
2100 {
2101 - return state > PCI_D0 ?
2102 + return state >= PCI_D0 ?
2103 pci_platform_power_transition(dev, state) : -EINVAL;
2104 }
2105 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
2106 @@ -661,10 +661,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2107 */
2108 return 0;
2109
2110 - /* Check if we're already there */
2111 - if (dev->current_state == state)
2112 - return 0;
2113 -
2114 __pci_start_power_transition(dev, state);
2115
2116 /* This device is quirked not to be put into D3, so
2117 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2118 index e6b67f2..741672f 100644
2119 --- a/drivers/scsi/libiscsi.c
2120 +++ b/drivers/scsi/libiscsi.c
2121 @@ -470,12 +470,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
2122
2123 WARN_ON(hdrlength >= 256);
2124 hdr->hlength = hdrlength & 0xFF;
2125 + hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
2126
2127 if (session->tt->init_task && session->tt->init_task(task))
2128 return -EIO;
2129
2130 task->state = ISCSI_TASK_RUNNING;
2131 - hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
2132 session->cmdsn++;
2133
2134 conn->scsicmd_pdus_cnt++;
2135 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
2136 index e155011..816ab97 100644
2137 --- a/drivers/scsi/libsas/sas_ata.c
2138 +++ b/drivers/scsi/libsas/sas_ata.c
2139 @@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
2140 void sas_ata_task_abort(struct sas_task *task)
2141 {
2142 struct ata_queued_cmd *qc = task->uldd_task;
2143 + struct request_queue *q = qc->scsicmd->device->request_queue;
2144 struct completion *waiting;
2145 + unsigned long flags;
2146
2147 /* Bounce SCSI-initiated commands to the SCSI EH */
2148 if (qc->scsicmd) {
2149 + spin_lock_irqsave(q->queue_lock, flags);
2150 blk_abort_request(qc->scsicmd->request);
2151 + spin_unlock_irqrestore(q->queue_lock, flags);
2152 scsi_schedule_eh(qc->scsicmd->device->host);
2153 return;
2154 }
2155 diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
2156 index 14b1319..b672d10 100644
2157 --- a/drivers/scsi/libsas/sas_scsi_host.c
2158 +++ b/drivers/scsi/libsas/sas_scsi_host.c
2159 @@ -1029,6 +1029,8 @@ int __sas_task_abort(struct sas_task *task)
2160 void sas_task_abort(struct sas_task *task)
2161 {
2162 struct scsi_cmnd *sc = task->uldd_task;
2163 + struct request_queue *q = sc->device->request_queue;
2164 + unsigned long flags;
2165
2166 /* Escape for libsas internal commands */
2167 if (!sc) {
2168 @@ -1043,7 +1045,9 @@ void sas_task_abort(struct sas_task *task)
2169 return;
2170 }
2171
2172 + spin_lock_irqsave(q->queue_lock, flags);
2173 blk_abort_request(sc->request);
2174 + spin_unlock_irqrestore(q->queue_lock, flags);
2175 scsi_schedule_eh(sc->device->host);
2176 }
2177
2178 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2179 index 0b575c8..aa2a2dc 100644
2180 --- a/drivers/scsi/scsi_debug.c
2181 +++ b/drivers/scsi/scsi_debug.c
2182 @@ -956,7 +956,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
2183 static sector_t get_sdebug_capacity(void)
2184 {
2185 if (scsi_debug_virtual_gb > 0)
2186 - return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
2187 + return (sector_t)scsi_debug_virtual_gb *
2188 + (1073741824 / scsi_debug_sector_size);
2189 else
2190 return sdebug_store_sectors;
2191 }
2192 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2193 index 08ed506..e46155b 100644
2194 --- a/drivers/scsi/scsi_error.c
2195 +++ b/drivers/scsi/scsi_error.c
2196 @@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
2197 if (scmd->device->allow_restart &&
2198 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
2199 return FAILED;
2200 - return SUCCESS;
2201 +
2202 + if (blk_barrier_rq(scmd->request))
2203 + /*
2204 + * barrier requests should always retry on UA
2205 + * otherwise block will get a spurious error
2206 + */
2207 + return NEEDS_RETRY;
2208 + else
2209 + /*
2210 + * for normal (non barrier) commands, pass the
2211 + * UA upwards for a determination in the
2212 + * completion functions
2213 + */
2214 + return SUCCESS;
2215
2216 /* these three are not supported */
2217 case COPY_ABORTED:
2218 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2219 index c664242..5697709 100644
2220 --- a/drivers/scsi/scsi_lib.c
2221 +++ b/drivers/scsi/scsi_lib.c
2222 @@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2223 * we already took a copy of the original into rq->errors which
2224 * is what gets returned to the user
2225 */
2226 - if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
2227 - if (!(req->cmd_flags & REQ_QUIET))
2228 + if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
2229 + /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
2230 + * print since caller wants ATA registers. Only occurs on
2231 + * SCSI ATA PASS_THROUGH commands when CK_COND=1
2232 + */
2233 + if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
2234 + ;
2235 + else if (!(req->cmd_flags & REQ_QUIET))
2236 scsi_print_sense("", cmd);
2237 result = 0;
2238 /* BLOCK_PC may have set error */
2239 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2240 index 255da53..bf15920 100644
2241 --- a/drivers/scsi/sd.c
2242 +++ b/drivers/scsi/sd.c
2243 @@ -1039,6 +1039,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
2244 {
2245 rq->cmd_type = REQ_TYPE_BLOCK_PC;
2246 rq->timeout = SD_TIMEOUT;
2247 + rq->retries = SD_MAX_RETRIES;
2248 rq->cmd[0] = SYNCHRONIZE_CACHE;
2249 rq->cmd_len = 10;
2250 }
2251 diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
2252 index 24485cc..4822cb5 100644
2253 --- a/drivers/serial/8250_pnp.c
2254 +++ b/drivers/serial/8250_pnp.c
2255 @@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
2256 { "FUJ02E6", 0 },
2257 /* Fujitsu Wacom 2FGT Tablet PC device */
2258 { "FUJ02E7", 0 },
2259 + /* Fujitsu Wacom 1FGT Tablet PC device */
2260 + { "FUJ02E9", 0 },
2261 /*
2262 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
2263 * disguise)
2264 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
2265 index c2809f2..b12237f 100644
2266 --- a/drivers/staging/hv/Hv.c
2267 +++ b/drivers/staging/hv/Hv.c
2268 @@ -306,9 +306,9 @@ void HvCleanup(void)
2269 DPRINT_ENTER(VMBUS);
2270
2271 if (gHvContext.SignalEventBuffer) {
2272 + kfree(gHvContext.SignalEventBuffer);
2273 gHvContext.SignalEventBuffer = NULL;
2274 gHvContext.SignalEventParam = NULL;
2275 - kfree(gHvContext.SignalEventBuffer);
2276 }
2277
2278 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
2279 diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c
2280 index 26d7997..f05f4e1 100644
2281 --- a/drivers/staging/hv/RndisFilter.c
2282 +++ b/drivers/staging/hv/RndisFilter.c
2283 @@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
2284
2285 ret = RndisFilterSetPacketFilter(Device,
2286 NDIS_PACKET_TYPE_BROADCAST |
2287 + NDIS_PACKET_TYPE_ALL_MULTICAST |
2288 NDIS_PACKET_TYPE_DIRECTED);
2289 if (ret == 0)
2290 Device->State = RNDIS_DEV_DATAINITIALIZED;
2291 diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
2292 index 0d7459e..4c3c8bc 100644
2293 --- a/drivers/staging/hv/netvsc_drv.c
2294 +++ b/drivers/staging/hv/netvsc_drv.c
2295 @@ -413,8 +413,7 @@ static int netvsc_probe(struct device *device)
2296 if (!net_drv_obj->Base.OnDeviceAdd)
2297 return -1;
2298
2299 - net = alloc_netdev(sizeof(struct net_device_context), "seth%d",
2300 - ether_setup);
2301 + net = alloc_etherdev(sizeof(struct net_device_context));
2302 if (!net)
2303 return -1;
2304
2305 diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
2306 index 6da1021..a2566f1 100644
2307 --- a/drivers/staging/usbip/usbip_event.c
2308 +++ b/drivers/staging/usbip/usbip_event.c
2309 @@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud)
2310 {
2311 struct usbip_task *eh = &ud->eh;
2312
2313 + if (eh->thread == current)
2314 + return; /* do not wait for myself */
2315 +
2316 wait_for_completion(&eh->thread_done);
2317 usbip_dbg_eh("usbip_eh has finished\n");
2318 }
2319 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2320 index ca479a7..d9d0bf5 100644
2321 --- a/drivers/usb/core/driver.c
2322 +++ b/drivers/usb/core/driver.c
2323 @@ -1255,9 +1255,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
2324 udev->state == USB_STATE_SUSPENDED)
2325 goto done;
2326
2327 - udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
2328 -
2329 if (msg.event & PM_EVENT_AUTO) {
2330 + udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
2331 status = autosuspend_check(udev, 0);
2332 if (status < 0)
2333 goto done;
2334 @@ -1789,6 +1788,34 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
2335 return status;
2336 }
2337
2338 +static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
2339 +{
2340 + int w, i;
2341 + struct usb_interface *intf;
2342 +
2343 + /* Remote wakeup is needed only when we actually go to sleep.
2344 + * For things like FREEZE and QUIESCE, if the device is already
2345 + * autosuspended then its current wakeup setting is okay.
2346 + */
2347 + if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
2348 + udev->do_remote_wakeup = 0;
2349 + return;
2350 + }
2351 +
2352 + /* If remote wakeup is permitted, see whether any interface drivers
2353 + * actually want it.
2354 + */
2355 + w = 0;
2356 + if (device_may_wakeup(&udev->dev) && udev->actconfig) {
2357 + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
2358 + intf = udev->actconfig->interface[i];
2359 + w |= intf->needs_remote_wakeup;
2360 + }
2361 + }
2362 +
2363 + udev->do_remote_wakeup = w;
2364 +}
2365 +
2366 int usb_suspend(struct device *dev, pm_message_t msg)
2367 {
2368 struct usb_device *udev;
2369 @@ -1808,6 +1835,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
2370 }
2371
2372 udev->skip_sys_resume = 0;
2373 + choose_wakeup(udev, msg);
2374 return usb_external_suspend_device(udev, msg);
2375 }
2376
2377 diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
2378 index bdf87a8..2c95153 100644
2379 --- a/drivers/usb/core/generic.c
2380 +++ b/drivers/usb/core/generic.c
2381 @@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
2382 * than a vendor-specific driver. */
2383 else if (udev->descriptor.bDeviceClass !=
2384 USB_CLASS_VENDOR_SPEC &&
2385 - (!desc || desc->bInterfaceClass !=
2386 + (desc && desc->bInterfaceClass !=
2387 USB_CLASS_VENDOR_SPEC)) {
2388 best = c;
2389 break;
2390 diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
2391 index 97b40ce..4a6366a 100644
2392 --- a/drivers/usb/core/inode.c
2393 +++ b/drivers/usb/core/inode.c
2394 @@ -515,13 +515,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
2395 *dentry = NULL;
2396 mutex_lock(&parent->d_inode->i_mutex);
2397 *dentry = lookup_one_len(name, parent, strlen(name));
2398 - if (!IS_ERR(dentry)) {
2399 + if (!IS_ERR(*dentry)) {
2400 if ((mode & S_IFMT) == S_IFDIR)
2401 error = usbfs_mkdir (parent->d_inode, *dentry, mode);
2402 else
2403 error = usbfs_create (parent->d_inode, *dentry, mode);
2404 } else
2405 - error = PTR_ERR(dentry);
2406 + error = PTR_ERR(*dentry);
2407 mutex_unlock(&parent->d_inode->i_mutex);
2408
2409 return error;
2410 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2411 index 9bc95fe..1a48aac 100644
2412 --- a/drivers/usb/core/message.c
2413 +++ b/drivers/usb/core/message.c
2414 @@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
2415 /* If not, reinstate the old alternate settings */
2416 if (retval < 0) {
2417 reset_old_alts:
2418 - for (; i >= 0; i--) {
2419 + for (i--; i >= 0; i--) {
2420 struct usb_interface *intf = config->interface[i];
2421 struct usb_host_interface *alt;
2422
2423 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2424 index 9c90b67..efa0372 100644
2425 --- a/drivers/usb/host/ehci-hcd.c
2426 +++ b/drivers/usb/host/ehci-hcd.c
2427 @@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
2428 */
2429 ehci->periodic_size = DEFAULT_I_TDPS;
2430 INIT_LIST_HEAD(&ehci->cached_itd_list);
2431 + INIT_LIST_HEAD(&ehci->cached_sitd_list);
2432 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
2433 return retval;
2434
2435 diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
2436 index aeda96e..1f3f01e 100644
2437 --- a/drivers/usb/host/ehci-mem.c
2438 +++ b/drivers/usb/host/ehci-mem.c
2439 @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
2440
2441 static void ehci_mem_cleanup (struct ehci_hcd *ehci)
2442 {
2443 - free_cached_itd_list(ehci);
2444 + free_cached_lists(ehci);
2445 if (ehci->async)
2446 qh_put (ehci->async);
2447 ehci->async = NULL;
2448 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2449 index df533ce..2064045 100644
2450 --- a/drivers/usb/host/ehci-sched.c
2451 +++ b/drivers/usb/host/ehci-sched.c
2452 @@ -2137,13 +2137,27 @@ sitd_complete (
2453 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2454 }
2455 iso_stream_put (ehci, stream);
2456 - /* OK to recycle this SITD now that its completion callback ran. */
2457 +
2458 done:
2459 sitd->urb = NULL;
2460 - sitd->stream = NULL;
2461 - list_move(&sitd->sitd_list, &stream->free_list);
2462 - iso_stream_put(ehci, stream);
2463 -
2464 + if (ehci->clock_frame != sitd->frame) {
2465 + /* OK to recycle this SITD now. */
2466 + sitd->stream = NULL;
2467 + list_move(&sitd->sitd_list, &stream->free_list);
2468 + iso_stream_put(ehci, stream);
2469 + } else {
2470 + /* HW might remember this SITD, so we can't recycle it yet.
2471 + * Move it to a safe place until a new frame starts.
2472 + */
2473 + list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2474 + if (stream->refcount == 2) {
2475 + /* If iso_stream_put() were called here, stream
2476 + * would be freed. Instead, just prevent reuse.
2477 + */
2478 + stream->ep->hcpriv = NULL;
2479 + stream->ep = NULL;
2480 + }
2481 + }
2482 return retval;
2483 }
2484
2485 @@ -2209,9 +2223,10 @@ done:
2486
2487 /*-------------------------------------------------------------------------*/
2488
2489 -static void free_cached_itd_list(struct ehci_hcd *ehci)
2490 +static void free_cached_lists(struct ehci_hcd *ehci)
2491 {
2492 struct ehci_itd *itd, *n;
2493 + struct ehci_sitd *sitd, *sn;
2494
2495 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2496 struct ehci_iso_stream *stream = itd->stream;
2497 @@ -2219,6 +2234,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2498 list_move(&itd->itd_list, &stream->free_list);
2499 iso_stream_put(ehci, stream);
2500 }
2501 +
2502 + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2503 + struct ehci_iso_stream *stream = sitd->stream;
2504 + sitd->stream = NULL;
2505 + list_move(&sitd->sitd_list, &stream->free_list);
2506 + iso_stream_put(ehci, stream);
2507 + }
2508 }
2509
2510 /*-------------------------------------------------------------------------*/
2511 @@ -2245,7 +2267,7 @@ scan_periodic (struct ehci_hcd *ehci)
2512 clock_frame = -1;
2513 }
2514 if (ehci->clock_frame != clock_frame) {
2515 - free_cached_itd_list(ehci);
2516 + free_cached_lists(ehci);
2517 ehci->clock_frame = clock_frame;
2518 }
2519 clock %= mod;
2520 @@ -2408,7 +2430,7 @@ restart:
2521 clock = now;
2522 clock_frame = clock >> 3;
2523 if (ehci->clock_frame != clock_frame) {
2524 - free_cached_itd_list(ehci);
2525 + free_cached_lists(ehci);
2526 ehci->clock_frame = clock_frame;
2527 }
2528 } else {
2529 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2530 index b1dce96..556c0b4 100644
2531 --- a/drivers/usb/host/ehci.h
2532 +++ b/drivers/usb/host/ehci.h
2533 @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
2534 int next_uframe; /* scan periodic, start here */
2535 unsigned periodic_sched; /* periodic activity count */
2536
2537 - /* list of itds completed while clock_frame was still active */
2538 + /* list of itds & sitds completed while clock_frame was still active */
2539 struct list_head cached_itd_list;
2540 + struct list_head cached_sitd_list;
2541 unsigned clock_frame;
2542
2543 /* per root hub port */
2544 @@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
2545 clear_bit (action, &ehci->actions);
2546 }
2547
2548 -static void free_cached_itd_list(struct ehci_hcd *ehci);
2549 +static void free_cached_lists(struct ehci_hcd *ehci);
2550
2551 /*-------------------------------------------------------------------------*/
2552
2553 diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
2554 index 32bbce9..65cac8c 100644
2555 --- a/drivers/usb/host/ohci-hub.c
2556 +++ b/drivers/usb/host/ohci-hub.c
2557 @@ -697,7 +697,7 @@ static int ohci_hub_control (
2558 u16 wLength
2559 ) {
2560 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
2561 - int ports = hcd_to_bus (hcd)->root_hub->maxchild;
2562 + int ports = ohci->num_ports;
2563 u32 temp;
2564 int retval = 0;
2565
2566 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2567 index bffcef7..6c1f673 100644
2568 --- a/drivers/usb/host/xhci-mem.c
2569 +++ b/drivers/usb/host/xhci-mem.c
2570 @@ -549,6 +549,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
2571 return EP_INTERVAL(interval);
2572 }
2573
2574 +/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
2575 + * High speed endpoint descriptors can define "the number of additional
2576 + * transaction opportunities per microframe", but that goes in the Max Burst
2577 + * endpoint context field.
2578 + */
2579 +static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
2580 + struct usb_host_endpoint *ep)
2581 +{
2582 + if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
2583 + return 0;
2584 + return ep->ss_ep_comp->desc.bmAttributes;
2585 +}
2586 +
2587 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
2588 struct usb_host_endpoint *ep)
2589 {
2590 @@ -579,6 +592,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
2591 return type;
2592 }
2593
2594 +/* Return the maximum endpoint service interval time (ESIT) payload.
2595 + * Basically, this is the maxpacket size, multiplied by the burst size
2596 + * and mult size.
2597 + */
2598 +static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
2599 + struct usb_device *udev,
2600 + struct usb_host_endpoint *ep)
2601 +{
2602 + int max_burst;
2603 + int max_packet;
2604 +
2605 + /* Only applies for interrupt or isochronous endpoints */
2606 + if (usb_endpoint_xfer_control(&ep->desc) ||
2607 + usb_endpoint_xfer_bulk(&ep->desc))
2608 + return 0;
2609 +
2610 + if (udev->speed == USB_SPEED_SUPER) {
2611 + if (ep->ss_ep_comp)
2612 + return ep->ss_ep_comp->desc.wBytesPerInterval;
2613 + xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
2614 + /* Assume no bursts, no multiple opportunities to send. */
2615 + return ep->desc.wMaxPacketSize;
2616 + }
2617 +
2618 + max_packet = ep->desc.wMaxPacketSize & 0x3ff;
2619 + max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
2620 + /* A 0 in max burst means 1 transfer per ESIT */
2621 + return max_packet * (max_burst + 1);
2622 +}
2623 +
2624 int xhci_endpoint_init(struct xhci_hcd *xhci,
2625 struct xhci_virt_device *virt_dev,
2626 struct usb_device *udev,
2627 @@ -590,6 +633,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2628 struct xhci_ring *ep_ring;
2629 unsigned int max_packet;
2630 unsigned int max_burst;
2631 + u32 max_esit_payload;
2632
2633 ep_index = xhci_get_endpoint_index(&ep->desc);
2634 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2635 @@ -611,6 +655,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2636 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
2637
2638 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
2639 + ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
2640
2641 /* FIXME dig Mult and streams info out of ep companion desc */
2642
2643 @@ -656,6 +701,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2644 default:
2645 BUG();
2646 }
2647 + max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
2648 + ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
2649 +
2650 + /*
2651 + * XXX no idea how to calculate the average TRB buffer length for bulk
2652 + * endpoints, as the driver gives us no clue how big each scatter gather
2653 + * list entry (or buffer) is going to be.
2654 + *
2655 + * For isochronous and interrupt endpoints, we set it to the max
2656 + * available, until we have new API in the USB core to allow drivers to
2657 + * declare how much bandwidth they actually need.
2658 + *
2659 + * Normally, it would be calculated by taking the total of the buffer
2660 + * lengths in the TD and then dividing by the number of TRBs in a TD,
2661 + * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
2662 + * use Event Data TRBs, and we don't chain in a link TRB on short
2663 + * transfers, we're basically dividing by 1.
2664 + */
2665 + ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
2666 +
2667 /* FIXME Debug endpoint context */
2668 return 0;
2669 }
2670 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2671 index 8778135..9e904a6 100644
2672 --- a/drivers/usb/host/xhci.h
2673 +++ b/drivers/usb/host/xhci.h
2674 @@ -609,6 +609,10 @@ struct xhci_ep_ctx {
2675 #define MAX_PACKET_MASK (0xffff << 16)
2676 #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
2677
2678 +/* tx_info bitmasks */
2679 +#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
2680 +#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
2681 +
2682
2683 /**
2684 * struct xhci_input_control_context
2685 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
2686 index 0cfd621..a442989 100644
2687 --- a/drivers/usb/serial/sierra.c
2688 +++ b/drivers/usb/serial/sierra.c
2689 @@ -229,6 +229,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
2690 static struct usb_device_id id_table [] = {
2691 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
2692 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
2693 + { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
2694 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
2695
2696 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
2697 diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
2698 index 1ed3d55..17726a0 100644
2699 --- a/drivers/w1/slaves/w1_therm.c
2700 +++ b/drivers/w1/slaves/w1_therm.c
2701 @@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
2702
2703 static inline int w1_DS18B20_convert_temp(u8 rom[9])
2704 {
2705 - int t = ((s16)rom[1] << 8) | rom[0];
2706 - t = t*1000/16;
2707 - return t;
2708 + s16 t = le16_to_cpup((__le16 *)rom);
2709 + return t*1000/16;
2710 }
2711
2712 static inline int w1_DS18S20_convert_temp(u8 rom[9])
2713 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2714 index c568779..cae75c1 100644
2715 --- a/fs/ext4/extents.c
2716 +++ b/fs/ext4/extents.c
2717 @@ -3767,7 +3767,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2718 __u64 start, __u64 len)
2719 {
2720 ext4_lblk_t start_blk;
2721 - ext4_lblk_t len_blks;
2722 int error = 0;
2723
2724 /* fallback to generic here if not in extents fmt */
2725 @@ -3781,8 +3780,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2726 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
2727 error = ext4_xattr_fiemap(inode, fieinfo);
2728 } else {
2729 + ext4_lblk_t len_blks;
2730 + __u64 last_blk;
2731 +
2732 start_blk = start >> inode->i_sb->s_blocksize_bits;
2733 - len_blks = len >> inode->i_sb->s_blocksize_bits;
2734 + last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
2735 + if (last_blk >= EXT_MAX_BLOCK)
2736 + last_blk = EXT_MAX_BLOCK-1;
2737 + len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
2738
2739 /*
2740 * Walk the extent tree gathering extent information.
2741 diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
2742 index 7f24a0b..1aba003 100644
2743 --- a/fs/jfs/resize.c
2744 +++ b/fs/jfs/resize.c
2745 @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2746 struct inode *iplist[1];
2747 struct jfs_superblock *j_sb, *j_sb2;
2748 uint old_agsize;
2749 + int agsizechanged = 0;
2750 struct buffer_head *bh, *bh2;
2751
2752 /* If the volume hasn't grown, get out now */
2753 @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2754 */
2755 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
2756 goto error_out;
2757 +
2758 + agsizechanged |= (bmp->db_agsize != old_agsize);
2759 +
2760 /*
2761 * the map now has extended to cover additional nblocks:
2762 * dn_mapsize = oldMapsize + nblocks;
2763 @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2764 * will correctly identify the new ag);
2765 */
2766 /* if new AG size the same as old AG size, done! */
2767 - if (bmp->db_agsize != old_agsize) {
2768 + if (agsizechanged) {
2769 if ((rc = diExtendFS(ipimap, ipbmap)))
2770 goto error_out;
2771
2772 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
2773 index bd39abc..37d555c 100644
2774 --- a/fs/nfs/client.c
2775 +++ b/fs/nfs/client.c
2776 @@ -965,6 +965,8 @@ out_error:
2777 static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
2778 {
2779 target->flags = source->flags;
2780 + target->rsize = source->rsize;
2781 + target->wsize = source->wsize;
2782 target->acregmin = source->acregmin;
2783 target->acregmax = source->acregmax;
2784 target->acdirmin = source->acdirmin;
2785 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2786 index af6948d..b5d55d3 100644
2787 --- a/fs/nfs/dir.c
2788 +++ b/fs/nfs/dir.c
2789 @@ -837,6 +837,8 @@ out_zap_parent:
2790 /* If we have submounts, don't unhash ! */
2791 if (have_submounts(dentry))
2792 goto out_valid;
2793 + if (dentry->d_flags & DCACHE_DISCONNECTED)
2794 + goto out_valid;
2795 shrink_dcache_parent(dentry);
2796 }
2797 d_drop(dentry);
2798 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2799 index bbf72d8..718f3fb 100644
2800 --- a/fs/nfsd/nfs4xdr.c
2801 +++ b/fs/nfsd/nfs4xdr.c
2802 @@ -160,10 +160,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
2803 argp->p = page_address(argp->pagelist[0]);
2804 argp->pagelist++;
2805 if (argp->pagelen < PAGE_SIZE) {
2806 - argp->end = p + (argp->pagelen>>2);
2807 + argp->end = argp->p + (argp->pagelen>>2);
2808 argp->pagelen = 0;
2809 } else {
2810 - argp->end = p + (PAGE_SIZE>>2);
2811 + argp->end = argp->p + (PAGE_SIZE>>2);
2812 argp->pagelen -= PAGE_SIZE;
2813 }
2814 memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
2815 @@ -1425,10 +1425,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
2816 argp->p = page_address(argp->pagelist[0]);
2817 argp->pagelist++;
2818 if (argp->pagelen < PAGE_SIZE) {
2819 - argp->end = p + (argp->pagelen>>2);
2820 + argp->end = argp->p + (argp->pagelen>>2);
2821 argp->pagelen = 0;
2822 } else {
2823 - argp->end = p + (PAGE_SIZE>>2);
2824 + argp->end = argp->p + (PAGE_SIZE>>2);
2825 argp->pagelen -= PAGE_SIZE;
2826 }
2827 }
2828 diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
2829 index 21c808f..b18c6d6 100644
2830 --- a/fs/ocfs2/buffer_head_io.c
2831 +++ b/fs/ocfs2/buffer_head_io.c
2832 @@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2833 struct buffer_head *bh)
2834 {
2835 int ret = 0;
2836 + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2837
2838 mlog_entry_void();
2839
2840 @@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2841
2842 get_bh(bh); /* for end_buffer_write_sync() */
2843 bh->b_end_io = end_buffer_write_sync;
2844 + ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
2845 submit_bh(WRITE, bh);
2846
2847 wait_on_buffer(bh);
2848 diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
2849 index 02bf178..18bc101 100644
2850 --- a/fs/ocfs2/dlm/dlmfs.c
2851 +++ b/fs/ocfs2/dlm/dlmfs.c
2852 @@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
2853 if ((count + *ppos) > i_size_read(inode))
2854 readlen = i_size_read(inode) - *ppos;
2855 else
2856 - readlen = count - *ppos;
2857 + readlen = count;
2858
2859 lvb_buf = kmalloc(readlen, GFP_NOFS);
2860 if (!lvb_buf)
2861 diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
2862 index 88459bd..ec4d97f 100644
2863 --- a/fs/ocfs2/inode.c
2864 +++ b/fs/ocfs2/inode.c
2865 @@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
2866 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2867 if (IS_ERR(handle)) {
2868 status = PTR_ERR(handle);
2869 + handle = NULL;
2870 mlog_errno(status);
2871 goto out;
2872 }
2873 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2874 index 8ae65c9..a8e8572 100644
2875 --- a/fs/ocfs2/refcounttree.c
2876 +++ b/fs/ocfs2/refcounttree.c
2877 @@ -4083,6 +4083,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
2878 di->i_attr = s_di->i_attr;
2879
2880 if (preserve) {
2881 + t_inode->i_uid = s_inode->i_uid;
2882 + t_inode->i_gid = s_inode->i_gid;
2883 + t_inode->i_mode = s_inode->i_mode;
2884 di->i_uid = s_di->i_uid;
2885 di->i_gid = s_di->i_gid;
2886 di->i_mode = s_di->i_mode;
2887 diff --git a/fs/proc/base.c b/fs/proc/base.c
2888 index 3cd449d..8dce96c 100644
2889 --- a/fs/proc/base.c
2890 +++ b/fs/proc/base.c
2891 @@ -2910,7 +2910,7 @@ out_no_task:
2892 */
2893 static const struct pid_entry tid_base_stuff[] = {
2894 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2895 - DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
2896 + DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2897 REG("environ", S_IRUSR, proc_environ_operations),
2898 INF("auxv", S_IRUSR, proc_pid_auxv),
2899 ONE("status", S_IRUGO, proc_pid_status),
2900 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
2901 index c094f58..1e686ee 100644
2902 --- a/fs/reiserfs/dir.c
2903 +++ b/fs/reiserfs/dir.c
2904 @@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
2905 struct reiserfs_de_head *deh)
2906 {
2907 struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
2908 - if (reiserfs_expose_privroot(dir->d_sb))
2909 - return 0;
2910 return (dir == dir->d_parent && privroot->d_inode &&
2911 deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
2912 }
2913 diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
2914 index 81f09fa..0392e82 100644
2915 --- a/fs/reiserfs/xattr.c
2916 +++ b/fs/reiserfs/xattr.c
2917 @@ -557,7 +557,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
2918 if (!err && new_size < i_size_read(dentry->d_inode)) {
2919 struct iattr newattrs = {
2920 .ia_ctime = current_fs_time(inode->i_sb),
2921 - .ia_size = buffer_size,
2922 + .ia_size = new_size,
2923 .ia_valid = ATTR_SIZE | ATTR_CTIME,
2924 };
2925
2926 @@ -976,21 +976,13 @@ int reiserfs_permission(struct inode *inode, int mask)
2927 return generic_permission(inode, mask, NULL);
2928 }
2929
2930 -/* This will catch lookups from the fs root to .reiserfs_priv */
2931 -static int
2932 -xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
2933 +static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
2934 {
2935 - struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
2936 - if (container_of(q1, struct dentry, d_name) == priv_root)
2937 - return -ENOENT;
2938 - if (q1->len == name->len &&
2939 - !memcmp(q1->name, name->name, name->len))
2940 - return 0;
2941 - return 1;
2942 + return -EPERM;
2943 }
2944
2945 static const struct dentry_operations xattr_lookup_poison_ops = {
2946 - .d_compare = xattr_lookup_poison,
2947 + .d_revalidate = xattr_hide_revalidate,
2948 };
2949
2950 int reiserfs_lookup_privroot(struct super_block *s)
2951 @@ -1004,8 +996,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
2952 strlen(PRIVROOT_NAME));
2953 if (!IS_ERR(dentry)) {
2954 REISERFS_SB(s)->priv_root = dentry;
2955 - if (!reiserfs_expose_privroot(s))
2956 - s->s_root->d_op = &xattr_lookup_poison_ops;
2957 + dentry->d_op = &xattr_lookup_poison_ops;
2958 if (dentry->d_inode)
2959 dentry->d_inode->i_flags |= S_PRIVATE;
2960 } else
2961 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
2962 index 77414db..146d491 100644
2963 --- a/fs/xfs/linux-2.6/xfs_super.c
2964 +++ b/fs/xfs/linux-2.6/xfs_super.c
2965 @@ -1160,6 +1160,7 @@ xfs_fs_put_super(
2966
2967 xfs_unmountfs(mp);
2968 xfs_freesb(mp);
2969 + xfs_inode_shrinker_unregister(mp);
2970 xfs_icsb_destroy_counters(mp);
2971 xfs_close_devices(mp);
2972 xfs_dmops_put(mp);
2973 @@ -1523,6 +1524,8 @@ xfs_fs_fill_super(
2974 if (error)
2975 goto fail_vnrele;
2976
2977 + xfs_inode_shrinker_register(mp);
2978 +
2979 kfree(mtpt);
2980 return 0;
2981
2982 @@ -1767,6 +1770,7 @@ init_xfs_fs(void)
2983 goto out_cleanup_procfs;
2984
2985 vfs_initquota();
2986 + xfs_inode_shrinker_init();
2987
2988 error = register_filesystem(&xfs_fs_type);
2989 if (error)
2990 @@ -1794,6 +1798,7 @@ exit_xfs_fs(void)
2991 {
2992 vfs_exitquota();
2993 unregister_filesystem(&xfs_fs_type);
2994 + xfs_inode_shrinker_destroy();
2995 xfs_sysctl_unregister();
2996 xfs_cleanup_procfs();
2997 xfs_buf_terminate();
2998 diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
2999 index 6b6b394..57adf2d 100644
3000 --- a/fs/xfs/linux-2.6/xfs_sync.c
3001 +++ b/fs/xfs/linux-2.6/xfs_sync.c
3002 @@ -95,7 +95,8 @@ xfs_inode_ag_walk(
3003 struct xfs_perag *pag, int flags),
3004 int flags,
3005 int tag,
3006 - int exclusive)
3007 + int exclusive,
3008 + int *nr_to_scan)
3009 {
3010 struct xfs_perag *pag = &mp->m_perag[ag];
3011 uint32_t first_index;
3012 @@ -135,7 +136,7 @@ restart:
3013 if (error == EFSCORRUPTED)
3014 break;
3015
3016 - } while (1);
3017 + } while ((*nr_to_scan)--);
3018
3019 if (skipped) {
3020 delay(1);
3021 @@ -153,23 +154,30 @@ xfs_inode_ag_iterator(
3022 struct xfs_perag *pag, int flags),
3023 int flags,
3024 int tag,
3025 - int exclusive)
3026 + int exclusive,
3027 + int *nr_to_scan)
3028 {
3029 int error = 0;
3030 int last_error = 0;
3031 xfs_agnumber_t ag;
3032 + int nr;
3033
3034 + nr = nr_to_scan ? *nr_to_scan : INT_MAX;
3035 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
3036 if (!mp->m_perag[ag].pag_ici_init)
3037 continue;
3038 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
3039 - exclusive);
3040 + exclusive, &nr);
3041 if (error) {
3042 last_error = error;
3043 if (error == EFSCORRUPTED)
3044 break;
3045 }
3046 + if (nr <= 0)
3047 + break;
3048 }
3049 + if (nr_to_scan)
3050 + *nr_to_scan = nr;
3051 return XFS_ERROR(last_error);
3052 }
3053
3054 @@ -289,7 +297,7 @@ xfs_sync_data(
3055 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
3056
3057 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
3058 - XFS_ICI_NO_TAG, 0);
3059 + XFS_ICI_NO_TAG, 0, NULL);
3060 if (error)
3061 return XFS_ERROR(error);
3062
3063 @@ -311,7 +319,7 @@ xfs_sync_attr(
3064 ASSERT((flags & ~SYNC_WAIT) == 0);
3065
3066 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
3067 - XFS_ICI_NO_TAG, 0);
3068 + XFS_ICI_NO_TAG, 0, NULL);
3069 }
3070
3071 STATIC int
3072 @@ -679,6 +687,7 @@ __xfs_inode_set_reclaim_tag(
3073 radix_tree_tag_set(&pag->pag_ici_root,
3074 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
3075 XFS_ICI_RECLAIM_TAG);
3076 + pag->pag_ici_reclaimable++;
3077 }
3078
3079 /*
3080 @@ -710,6 +719,7 @@ __xfs_inode_clear_reclaim_tag(
3081 {
3082 radix_tree_tag_clear(&pag->pag_ici_root,
3083 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
3084 + pag->pag_ici_reclaimable--;
3085 }
3086
3087 STATIC int
3088 @@ -770,5 +780,88 @@ xfs_reclaim_inodes(
3089 int mode)
3090 {
3091 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
3092 - XFS_ICI_RECLAIM_TAG, 1);
3093 + XFS_ICI_RECLAIM_TAG, 1, NULL);
3094 +}
3095 +
3096 +/*
3097 + * Shrinker infrastructure.
3098 + *
3099 + * This is all far more complex than it needs to be. It adds a global list of
3100 + * mounts because the shrinkers can only call a global context. We need to make
3101 + * the shrinkers pass a context to avoid the need for global state.
3102 + */
3103 +static LIST_HEAD(xfs_mount_list);
3104 +static struct rw_semaphore xfs_mount_list_lock;
3105 +
3106 +static int
3107 +xfs_reclaim_inode_shrink(
3108 + int nr_to_scan,
3109 + gfp_t gfp_mask)
3110 +{
3111 + struct xfs_mount *mp;
3112 + xfs_agnumber_t ag;
3113 + int reclaimable = 0;
3114 +
3115 + if (nr_to_scan) {
3116 + if (!(gfp_mask & __GFP_FS))
3117 + return -1;
3118 +
3119 + down_read(&xfs_mount_list_lock);
3120 + list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
3121 + xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
3122 + XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
3123 + if (nr_to_scan <= 0)
3124 + break;
3125 + }
3126 + up_read(&xfs_mount_list_lock);
3127 + }
3128 +
3129 + down_read(&xfs_mount_list_lock);
3130 + list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
3131 + for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
3132 +
3133 + if (!mp->m_perag[ag].pag_ici_init)
3134 + continue;
3135 + reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
3136 + }
3137 + }
3138 + up_read(&xfs_mount_list_lock);
3139 + return reclaimable;
3140 +}
3141 +
3142 +static struct shrinker xfs_inode_shrinker = {
3143 + .shrink = xfs_reclaim_inode_shrink,
3144 + .seeks = DEFAULT_SEEKS,
3145 +};
3146 +
3147 +void __init
3148 +xfs_inode_shrinker_init(void)
3149 +{
3150 + init_rwsem(&xfs_mount_list_lock);
3151 + register_shrinker(&xfs_inode_shrinker);
3152 +}
3153 +
3154 +void
3155 +xfs_inode_shrinker_destroy(void)
3156 +{
3157 + ASSERT(list_empty(&xfs_mount_list));
3158 + unregister_shrinker(&xfs_inode_shrinker);
3159 +}
3160 +
3161 +void
3162 +xfs_inode_shrinker_register(
3163 + struct xfs_mount *mp)
3164 +{
3165 + down_write(&xfs_mount_list_lock);
3166 + list_add_tail(&mp->m_mplist, &xfs_mount_list);
3167 + up_write(&xfs_mount_list_lock);
3168 +}
3169 +
3170 +void
3171 +xfs_inode_shrinker_unregister(
3172 + struct xfs_mount *mp)
3173 +{
3174 + down_write(&xfs_mount_list_lock);
3175 + list_del(&mp->m_mplist);
3176 + up_write(&xfs_mount_list_lock);
3177 }
3178 diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
3179 index ea932b4..0b28c13 100644
3180 --- a/fs/xfs/linux-2.6/xfs_sync.h
3181 +++ b/fs/xfs/linux-2.6/xfs_sync.h
3182 @@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
3183 int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
3184 int xfs_inode_ag_iterator(struct xfs_mount *mp,
3185 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
3186 - int flags, int tag, int write_lock);
3187 + int flags, int tag, int write_lock, int *nr_to_scan);
3188 +
3189 +void xfs_inode_shrinker_init(void);
3190 +void xfs_inode_shrinker_destroy(void);
3191 +void xfs_inode_shrinker_register(struct xfs_mount *mp);
3192 +void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
3193
3194 #endif
3195 diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
3196 index 873e07e..145f596 100644
3197 --- a/fs/xfs/quota/xfs_qm_syscalls.c
3198 +++ b/fs/xfs/quota/xfs_qm_syscalls.c
3199 @@ -891,7 +891,8 @@ xfs_qm_dqrele_all_inodes(
3200 uint flags)
3201 {
3202 ASSERT(mp->m_quotainfo);
3203 - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
3204 + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
3205 + XFS_ICI_NO_TAG, 0, NULL);
3206 }
3207
3208 /*------------------------------------------------------------------------*/
3209 diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
3210 index 6702bd8..1182604 100644
3211 --- a/fs/xfs/xfs_ag.h
3212 +++ b/fs/xfs/xfs_ag.h
3213 @@ -229,6 +229,7 @@ typedef struct xfs_perag
3214 int pag_ici_init; /* incore inode cache initialised */
3215 rwlock_t pag_ici_lock; /* incore inode lock */
3216 struct radix_tree_root pag_ici_root; /* incore inode cache root */
3217 + int pag_ici_reclaimable; /* reclaimable inodes */
3218 #endif
3219 } xfs_perag_t;
3220
3221 diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
3222 index 1df7e45..c95f81a 100644
3223 --- a/fs/xfs/xfs_mount.h
3224 +++ b/fs/xfs/xfs_mount.h
3225 @@ -257,6 +257,7 @@ typedef struct xfs_mount {
3226 wait_queue_head_t m_wait_single_sync_task;
3227 __int64_t m_update_flags; /* sb flags we need to update
3228 on the next remount,rw */
3229 + struct list_head m_mplist; /* inode shrinker mount list */
3230 } xfs_mount_t;
3231
3232 /*
3233 diff --git a/include/linux/ata.h b/include/linux/ata.h
3234 index 20f3156..f8bd0f9 100644
3235 --- a/include/linux/ata.h
3236 +++ b/include/linux/ata.h
3237 @@ -1024,8 +1024,8 @@ static inline int ata_ok(u8 status)
3238
3239 static inline int lba_28_ok(u64 block, u32 n_block)
3240 {
3241 - /* check the ending block number */
3242 - return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
3243 + /* check the ending block number: must be LESS THAN 0x0fffffff */
3244 + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
3245 }
3246
3247 static inline int lba_48_ok(u64 block, u32 n_block)
3248 diff --git a/include/linux/poison.h b/include/linux/poison.h
3249 index 2110a81..34066ff 100644
3250 --- a/include/linux/poison.h
3251 +++ b/include/linux/poison.h
3252 @@ -48,6 +48,15 @@
3253 #define POISON_FREE 0x6b /* for use-after-free poisoning */
3254 #define POISON_END 0xa5 /* end-byte of poisoning */
3255
3256 +/********** mm/hugetlb.c **********/
3257 +/*
3258 + * Private mappings of hugetlb pages use this poisoned value for
3259 + * page->mapping. The core VM should not be doing anything with this mapping
3260 + * but futex requires the existence of some page->mapping value even though it
3261 + * is unused if PAGE_MAPPING_ANON is set.
3262 + */
3263 +#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
3264 +
3265 /********** arch/$ARCH/mm/init.c **********/
3266 #define POISON_FREE_INITMEM 0xcc
3267
3268 diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
3269 index 8be5135..2c55a7e 100644
3270 --- a/include/net/sctp/command.h
3271 +++ b/include/net/sctp/command.h
3272 @@ -107,6 +107,7 @@ typedef enum {
3273 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
3274 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
3275 SCTP_CMD_SEND_MSG, /* Send the whole use message */
3276 + SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
3277 SCTP_CMD_LAST
3278 } sctp_verb_t;
3279
3280 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
3281 index 78740ec..fa6cde5 100644
3282 --- a/include/net/sctp/sctp.h
3283 +++ b/include/net/sctp/sctp.h
3284 @@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
3285 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
3286 int sctp_inet_listen(struct socket *sock, int backlog);
3287 void sctp_write_space(struct sock *sk);
3288 +void sctp_data_ready(struct sock *sk, int len);
3289 unsigned int sctp_poll(struct file *file, struct socket *sock,
3290 poll_table *wait);
3291 void sctp_sock_rfree(struct sk_buff *skb);
3292 diff --git a/init/initramfs.c b/init/initramfs.c
3293 index b37d34b..b27d045 100644
3294 --- a/init/initramfs.c
3295 +++ b/init/initramfs.c
3296 @@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
3297 compress_name);
3298 message = msg_buf;
3299 }
3300 - }
3301 + } else
3302 + error("junk in compressed archive");
3303 if (state != Reset)
3304 error("junk in compressed archive");
3305 this_header = saved_offset + my_inptr;
3306 diff --git a/kernel/cred.c b/kernel/cred.c
3307 index 1ed8ca1..099f5e6 100644
3308 --- a/kernel/cred.c
3309 +++ b/kernel/cred.c
3310 @@ -786,8 +786,6 @@ bool creds_are_invalid(const struct cred *cred)
3311 {
3312 if (cred->magic != CRED_MAGIC)
3313 return true;
3314 - if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
3315 - return true;
3316 #ifdef CONFIG_SECURITY_SELINUX
3317 if (selinux_is_enabled()) {
3318 if ((unsigned long) cred->security < PAGE_SIZE)
3319 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
3320 index 32d0ae2..e928e1a 100644
3321 --- a/kernel/perf_event.c
3322 +++ b/kernel/perf_event.c
3323 @@ -4811,7 +4811,7 @@ err_fput_free_put_context:
3324
3325 err_free_put_context:
3326 if (err < 0)
3327 - kfree(event);
3328 + free_event(event);
3329
3330 err_put_context:
3331 if (err < 0)
3332 diff --git a/lib/flex_array.c b/lib/flex_array.c
3333 index 66eef2e..41b1804 100644
3334 --- a/lib/flex_array.c
3335 +++ b/lib/flex_array.c
3336 @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
3337 ret->element_size = element_size;
3338 ret->total_nr_elements = total;
3339 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
3340 - memset(ret->parts[0], FLEX_ARRAY_FREE,
3341 + memset(&ret->parts[0], FLEX_ARRAY_FREE,
3342 FLEX_ARRAY_BASE_BYTES_LEFT);
3343 return ret;
3344 }
3345 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3346 index 2d16fa6..fd9ba95 100644
3347 --- a/mm/hugetlb.c
3348 +++ b/mm/hugetlb.c
3349 @@ -546,6 +546,7 @@ static void free_huge_page(struct page *page)
3350
3351 mapping = (struct address_space *) page_private(page);
3352 set_page_private(page, 0);
3353 + page->mapping = NULL;
3354 BUG_ON(page_count(page));
3355 INIT_LIST_HEAD(&page->lru);
3356
3357 @@ -2447,8 +2448,10 @@ retry:
3358 spin_lock(&inode->i_lock);
3359 inode->i_blocks += blocks_per_huge_page(h);
3360 spin_unlock(&inode->i_lock);
3361 - } else
3362 + } else {
3363 lock_page(page);
3364 + page->mapping = HUGETLB_POISON;
3365 + }
3366 }
3367
3368 /*
3369 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3370 index 954032b..dff3379 100644
3371 --- a/mm/memcontrol.c
3372 +++ b/mm/memcontrol.c
3373 @@ -2215,12 +2215,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
3374 }
3375 unlock_page_cgroup(pc);
3376
3377 + *ptr = mem;
3378 if (mem) {
3379 - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
3380 + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
3381 page);
3382 css_put(&mem->css);
3383 }
3384 - *ptr = mem;
3385 return ret;
3386 }
3387
3388 diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
3389 index bad1c49..72340dd 100644
3390 --- a/net/ieee802154/af_ieee802154.c
3391 +++ b/net/ieee802154/af_ieee802154.c
3392 @@ -147,6 +147,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
3393 dev_load(sock_net(sk), ifr.ifr_name);
3394 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
3395
3396 + if (!dev)
3397 + return -ENODEV;
3398 +
3399 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
3400 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
3401
3402 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3403 index 564a0f8..03c55ac 100644
3404 --- a/net/ipv4/tcp.c
3405 +++ b/net/ipv4/tcp.c
3406 @@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
3407 sk_eat_skb(sk, skb, 0);
3408 if (!desc->count)
3409 break;
3410 + tp->copied_seq = seq;
3411 }
3412 tp->copied_seq = seq;
3413
3414 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3415 index 112c611..16190ca 100644
3416 --- a/net/ipv4/udp.c
3417 +++ b/net/ipv4/udp.c
3418 @@ -471,8 +471,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
3419 if (hslot->count < hslot2->count)
3420 goto begin;
3421
3422 - result = udp4_lib_lookup2(net, INADDR_ANY, sport,
3423 - daddr, hnum, dif,
3424 + result = udp4_lib_lookup2(net, saddr, sport,
3425 + INADDR_ANY, hnum, dif,
3426 hslot2, slot2);
3427 }
3428 rcu_read_unlock();
3429 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3430 index 548a06e..d2ef3a3 100644
3431 --- a/net/ipv6/tcp_ipv6.c
3432 +++ b/net/ipv6/tcp_ipv6.c
3433 @@ -1006,7 +1006,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
3434 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
3435
3436 t1 = (struct tcphdr *) skb_push(buff, tot_len);
3437 - skb_reset_transport_header(skb);
3438 + skb_reset_transport_header(buff);
3439
3440 /* Swap the send and the receive. */
3441 memset(t1, 0, sizeof(*t1));
3442 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3443 index d9714d2..4f57cd2 100644
3444 --- a/net/ipv6/udp.c
3445 +++ b/net/ipv6/udp.c
3446 @@ -258,8 +258,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
3447 if (hslot->count < hslot2->count)
3448 goto begin;
3449
3450 - result = udp6_lib_lookup2(net, &in6addr_any, sport,
3451 - daddr, hnum, dif,
3452 + result = udp6_lib_lookup2(net, saddr, sport,
3453 + &in6addr_any, hnum, dif,
3454 hslot2, slot2);
3455 }
3456 rcu_read_unlock();
3457 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3458 index 304b0b6..dfdc138 100644
3459 --- a/net/mac80211/agg-tx.c
3460 +++ b/net/mac80211/agg-tx.c
3461 @@ -183,7 +183,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
3462 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
3463 HT_ADDBA_REQUESTED_MSK) {
3464 spin_unlock_bh(&sta->lock);
3465 - *state = HT_AGG_STATE_IDLE;
3466 #ifdef CONFIG_MAC80211_HT_DEBUG
3467 printk(KERN_DEBUG "timer expired on tid %d but we are not "
3468 "(or no longer) expecting addBA response there",
3469 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3470 index df5abbf..99c93ee 100644
3471 --- a/net/sctp/associola.c
3472 +++ b/net/sctp/associola.c
3473 @@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
3474 /* Remove any peer addresses not present in the new association. */
3475 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
3476 trans = list_entry(pos, struct sctp_transport, transports);
3477 - if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
3478 - sctp_assoc_del_peer(asoc, &trans->ipaddr);
3479 + if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
3480 + sctp_assoc_rm_peer(asoc, trans);
3481 + continue;
3482 + }
3483
3484 if (asoc->state >= SCTP_STATE_ESTABLISHED)
3485 sctp_transport_reset(trans);
3486 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
3487 index 905fda5..7ec09ba 100644
3488 --- a/net/sctp/endpointola.c
3489 +++ b/net/sctp/endpointola.c
3490 @@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
3491 /* Use SCTP specific send buffer space queues. */
3492 ep->sndbuf_policy = sctp_sndbuf_policy;
3493
3494 + sk->sk_data_ready = sctp_data_ready;
3495 sk->sk_write_space = sctp_write_space;
3496 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3497
3498 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
3499 index 9e73291..224db01 100644
3500 --- a/net/sctp/sm_make_chunk.c
3501 +++ b/net/sctp/sm_make_chunk.c
3502 @@ -207,7 +207,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3503 sp = sctp_sk(asoc->base.sk);
3504 num_types = sp->pf->supported_addrs(sp, types);
3505
3506 - chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
3507 + chunksize = sizeof(init) + addrs_len;
3508 + chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
3509 chunksize += sizeof(ecap_param);
3510
3511 if (sctp_prsctp_enable)
3512 @@ -237,14 +238,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3513 /* Add HMACS parameter length if any were defined */
3514 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
3515 if (auth_hmacs->length)
3516 - chunksize += ntohs(auth_hmacs->length);
3517 + chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
3518 else
3519 auth_hmacs = NULL;
3520
3521 /* Add CHUNKS parameter length */
3522 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
3523 if (auth_chunks->length)
3524 - chunksize += ntohs(auth_chunks->length);
3525 + chunksize += WORD_ROUND(ntohs(auth_chunks->length));
3526 else
3527 auth_chunks = NULL;
3528
3529 @@ -254,7 +255,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
3530
3531 /* If we have any extensions to report, account for that */
3532 if (num_ext)
3533 - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
3534 + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
3535 + num_ext);
3536
3537 /* RFC 2960 3.3.2 Initiation (INIT) (1)
3538 *
3539 @@ -396,13 +398,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
3540
3541 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
3542 if (auth_hmacs->length)
3543 - chunksize += ntohs(auth_hmacs->length);
3544 + chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
3545 else
3546 auth_hmacs = NULL;
3547
3548 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
3549 if (auth_chunks->length)
3550 - chunksize += ntohs(auth_chunks->length);
3551 + chunksize += WORD_ROUND(ntohs(auth_chunks->length));
3552 else
3553 auth_chunks = NULL;
3554
3555 @@ -411,7 +413,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
3556 }
3557
3558 if (num_ext)
3559 - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
3560 + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
3561 + num_ext);
3562
3563 /* Now allocate and fill out the chunk. */
3564 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
3565 @@ -3314,21 +3317,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3566 sctp_chunk_free(asconf);
3567 asoc->addip_last_asconf = NULL;
3568
3569 - /* Send the next asconf chunk from the addip chunk queue. */
3570 - if (!list_empty(&asoc->addip_chunk_list)) {
3571 - struct list_head *entry = asoc->addip_chunk_list.next;
3572 - asconf = list_entry(entry, struct sctp_chunk, list);
3573 -
3574 - list_del_init(entry);
3575 -
3576 - /* Hold the chunk until an ASCONF_ACK is received. */
3577 - sctp_chunk_hold(asconf);
3578 - if (sctp_primitive_ASCONF(asoc, asconf))
3579 - sctp_chunk_free(asconf);
3580 - else
3581 - asoc->addip_last_asconf = asconf;
3582 - }
3583 -
3584 return retval;
3585 }
3586
3587 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
3588 index 4e4ca65..42bbb24 100644
3589 --- a/net/sctp/sm_sideeffect.c
3590 +++ b/net/sctp/sm_sideeffect.c
3591 @@ -961,6 +961,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
3592 }
3593
3594
3595 +/* Sent the next ASCONF packet currently stored in the association.
3596 + * This happens after the ASCONF_ACK was succeffully processed.
3597 + */
3598 +static void sctp_cmd_send_asconf(struct sctp_association *asoc)
3599 +{
3600 + /* Send the next asconf chunk from the addip chunk
3601 + * queue.
3602 + */
3603 + if (!list_empty(&asoc->addip_chunk_list)) {
3604 + struct list_head *entry = asoc->addip_chunk_list.next;
3605 + struct sctp_chunk *asconf = list_entry(entry,
3606 + struct sctp_chunk, list);
3607 + list_del_init(entry);
3608 +
3609 + /* Hold the chunk until an ASCONF_ACK is received. */
3610 + sctp_chunk_hold(asconf);
3611 + if (sctp_primitive_ASCONF(asoc, asconf))
3612 + sctp_chunk_free(asconf);
3613 + else
3614 + asoc->addip_last_asconf = asconf;
3615 + }
3616 +}
3617 +
3618
3619 /* These three macros allow us to pull the debugging code out of the
3620 * main flow of sctp_do_sm() to keep attention focused on the real
3621 @@ -1616,6 +1639,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
3622 }
3623 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
3624 break;
3625 + case SCTP_CMD_SEND_NEXT_ASCONF:
3626 + sctp_cmd_send_asconf(asoc);
3627 + break;
3628 default:
3629 printk(KERN_WARNING "Impossible command: %u, %p\n",
3630 cmd->verb, cmd->obj.ptr);
3631 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
3632 index 47bc20d..c3f75e7 100644
3633 --- a/net/sctp/sm_statefuns.c
3634 +++ b/net/sctp/sm_statefuns.c
3635 @@ -3675,8 +3675,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3636 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3637
3638 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3639 - asconf_ack))
3640 + asconf_ack)) {
3641 + /* Successfully processed ASCONF_ACK. We can
3642 + * release the next asconf if we have one.
3643 + */
3644 + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3645 + SCTP_NULL());
3646 return SCTP_DISPOSITION_CONSUME;
3647 + }
3648
3649 abort = sctp_make_abort(asoc, asconf_ack,
3650 sizeof(sctp_errhdr_t));
3651 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3652 index 9bd9d82..aa3ba60 100644
3653 --- a/net/sctp/socket.c
3654 +++ b/net/sctp/socket.c
3655 @@ -3718,12 +3718,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3656 sp->hmac = NULL;
3657
3658 SCTP_DBG_OBJCNT_INC(sock);
3659 - percpu_counter_inc(&sctp_sockets_allocated);
3660
3661 /* Set socket backlog limit. */
3662 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3663
3664 local_bh_disable();
3665 + percpu_counter_inc(&sctp_sockets_allocated);
3666 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3667 local_bh_enable();
3668
3669 @@ -3740,8 +3740,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3670 /* Release our hold on the endpoint. */
3671 ep = sctp_sk(sk)->ep;
3672 sctp_endpoint_free(ep);
3673 - percpu_counter_dec(&sctp_sockets_allocated);
3674 local_bh_disable();
3675 + percpu_counter_dec(&sctp_sockets_allocated);
3676 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3677 local_bh_enable();
3678 }
3679 @@ -6188,6 +6188,16 @@ do_nonblock:
3680 goto out;
3681 }
3682
3683 +void sctp_data_ready(struct sock *sk, int len)
3684 +{
3685 + read_lock_bh(&sk->sk_callback_lock);
3686 + if (sk_has_sleeper(sk))
3687 + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
3688 + POLLRDNORM | POLLRDBAND);
3689 + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
3690 + read_unlock_bh(&sk->sk_callback_lock);
3691 +}
3692 +
3693 /* If socket sndbuf has changed, wake up all per association waiters. */
3694 void sctp_write_space(struct sock *sk)
3695 {
3696 diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
3697 index 327011f..7809137 100644
3698 --- a/net/tipc/bearer.c
3699 +++ b/net/tipc/bearer.c
3700 @@ -45,10 +45,10 @@
3701
3702 #define MAX_ADDR_STR 32
3703
3704 -static struct media *media_list = NULL;
3705 +static struct media media_list[MAX_MEDIA];
3706 static u32 media_count = 0;
3707
3708 -struct bearer *tipc_bearers = NULL;
3709 +struct bearer tipc_bearers[MAX_BEARERS];
3710
3711 /**
3712 * media_name_valid - validate media name
3713 @@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
3714 int res = -EINVAL;
3715
3716 write_lock_bh(&tipc_net_lock);
3717 - if (!media_list)
3718 - goto exit;
3719
3720 + if (tipc_mode != TIPC_NET_MODE) {
3721 + warn("Media <%s> rejected, not in networked mode yet\n", name);
3722 + goto exit;
3723 + }
3724 if (!media_name_valid(name)) {
3725 warn("Media <%s> rejected, illegal name\n", name);
3726 goto exit;
3727 @@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
3728
3729
3730
3731 -int tipc_bearer_init(void)
3732 -{
3733 - int res;
3734 -
3735 - write_lock_bh(&tipc_net_lock);
3736 - tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
3737 - media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
3738 - if (tipc_bearers && media_list) {
3739 - res = 0;
3740 - } else {
3741 - kfree(tipc_bearers);
3742 - kfree(media_list);
3743 - tipc_bearers = NULL;
3744 - media_list = NULL;
3745 - res = -ENOMEM;
3746 - }
3747 - write_unlock_bh(&tipc_net_lock);
3748 - return res;
3749 -}
3750 -
3751 void tipc_bearer_stop(void)
3752 {
3753 u32 i;
3754
3755 - if (!tipc_bearers)
3756 - return;
3757 -
3758 for (i = 0; i < MAX_BEARERS; i++) {
3759 if (tipc_bearers[i].active)
3760 tipc_bearers[i].publ.blocked = 1;
3761 @@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
3762 if (tipc_bearers[i].active)
3763 bearer_disable(tipc_bearers[i].publ.name);
3764 }
3765 - kfree(tipc_bearers);
3766 - kfree(media_list);
3767 - tipc_bearers = NULL;
3768 - media_list = NULL;
3769 media_count = 0;
3770 }
3771
3772 diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
3773 index ca57348..000228e 100644
3774 --- a/net/tipc/bearer.h
3775 +++ b/net/tipc/bearer.h
3776 @@ -114,7 +114,7 @@ struct bearer_name {
3777
3778 struct link;
3779
3780 -extern struct bearer *tipc_bearers;
3781 +extern struct bearer tipc_bearers[];
3782
3783 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
3784 struct sk_buff *tipc_media_get_names(void);
3785 diff --git a/net/tipc/net.c b/net/tipc/net.c
3786 index 7906608..f25b1cd 100644
3787 --- a/net/tipc/net.c
3788 +++ b/net/tipc/net.c
3789 @@ -116,7 +116,8 @@
3790 */
3791
3792 DEFINE_RWLOCK(tipc_net_lock);
3793 -struct network tipc_net = { NULL };
3794 +struct _zone *tipc_zones[256] = { NULL, };
3795 +struct network tipc_net = { tipc_zones };
3796
3797 struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
3798 {
3799 @@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
3800 }
3801 }
3802
3803 -static int net_init(void)
3804 -{
3805 - memset(&tipc_net, 0, sizeof(tipc_net));
3806 - tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
3807 - if (!tipc_net.zones) {
3808 - return -ENOMEM;
3809 - }
3810 - return 0;
3811 -}
3812 -
3813 static void net_stop(void)
3814 {
3815 u32 z_num;
3816
3817 - if (!tipc_net.zones)
3818 - return;
3819 -
3820 - for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
3821 + for (z_num = 1; z_num <= tipc_max_zones; z_num++)
3822 tipc_zone_delete(tipc_net.zones[z_num]);
3823 - }
3824 - kfree(tipc_net.zones);
3825 - tipc_net.zones = NULL;
3826 }
3827
3828 static void net_route_named_msg(struct sk_buff *buf)
3829 @@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
3830 tipc_named_reinit();
3831 tipc_port_reinit();
3832
3833 - if ((res = tipc_bearer_init()) ||
3834 - (res = net_init()) ||
3835 - (res = tipc_cltr_init()) ||
3836 + if ((res = tipc_cltr_init()) ||
3837 (res = tipc_bclink_init())) {
3838 return res;
3839 }
3840 diff --git a/security/inode.c b/security/inode.c
3841 index c3a7938..1c812e8 100644
3842 --- a/security/inode.c
3843 +++ b/security/inode.c
3844 @@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode,
3845
3846 mutex_lock(&parent->d_inode->i_mutex);
3847 *dentry = lookup_one_len(name, parent, strlen(name));
3848 - if (!IS_ERR(dentry)) {
3849 + if (!IS_ERR(*dentry)) {
3850 if ((mode & S_IFMT) == S_IFDIR)
3851 error = mkdir(parent->d_inode, *dentry, mode);
3852 else
3853 error = create(parent->d_inode, *dentry, mode);
3854 } else
3855 - error = PTR_ERR(dentry);
3856 + error = PTR_ERR(*dentry);
3857 mutex_unlock(&parent->d_inode->i_mutex);
3858
3859 return error;
3860 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
3861 index 03fe63e..9ac7bfd 100644
3862 --- a/security/keys/request_key.c
3863 +++ b/security/keys/request_key.c
3864 @@ -336,8 +336,10 @@ static int construct_alloc_key(struct key_type *type,
3865
3866 key_already_present:
3867 mutex_unlock(&key_construction_mutex);
3868 - if (dest_keyring)
3869 + if (dest_keyring) {
3870 + __key_link(dest_keyring, key_ref_to_ptr(key_ref));
3871 up_write(&dest_keyring->sem);
3872 + }
3873 mutex_unlock(&user->cons_lock);
3874 key_put(key);
3875 *_key = key = key_ref_to_ptr(key_ref);
3876 @@ -428,6 +430,11 @@ struct key *request_key_and_link(struct key_type *type,
3877
3878 if (!IS_ERR(key_ref)) {
3879 key = key_ref_to_ptr(key_ref);
3880 + if (dest_keyring) {
3881 + construct_get_dest_keyring(&dest_keyring);
3882 + key_link(dest_keyring, key);
3883 + key_put(dest_keyring);
3884 + }
3885 } else if (PTR_ERR(key_ref) != -EAGAIN) {
3886 key = ERR_CAST(key_ref);
3887 } else {
3888 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3889 index 062a8b0..fd831bd 100644
3890 --- a/sound/pci/hda/hda_intel.c
3891 +++ b/sound/pci/hda/hda_intel.c
3892 @@ -2273,6 +2273,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
3893 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
3894 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
3895 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
3896 + SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
3897 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
3898 {}
3899 };
3900 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3901 index 71b7a96..1a97c81 100644
3902 --- a/sound/pci/hda/patch_conexant.c
3903 +++ b/sound/pci/hda/patch_conexant.c
3904 @@ -1174,9 +1174,10 @@ static int patch_cxt5045(struct hda_codec *codec)
3905
3906 switch (codec->subsystem_id >> 16) {
3907 case 0x103c:
3908 + case 0x1631:
3909 case 0x1734:
3910 - /* HP & Fujitsu-Siemens laptops have really bad sound over 0dB
3911 - * on NID 0x17. Fix max PCM level to 0 dB
3912 + /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
3913 + * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
3914 * (originally it has 0x2b steps with 0dB offset 0x14)
3915 */
3916 snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
3917 @@ -2471,6 +2472,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3918 CXT5066_DELL_LAPTOP),
3919 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
3920 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
3921 + SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3922 + SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3923 {}
3924 };
3925
3926 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3927 index bd8a567..b486daa 100644
3928 --- a/sound/pci/hda/patch_realtek.c
3929 +++ b/sound/pci/hda/patch_realtek.c
3930 @@ -4033,7 +4033,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
3931 SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
3932 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
3933 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
3934 - SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
3935 + SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
3936 SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
3937 SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
3938 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
3939 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3940 index 799ba25..ac2d528 100644
3941 --- a/sound/pci/hda/patch_sigmatel.c
3942 +++ b/sound/pci/hda/patch_sigmatel.c
3943 @@ -1602,6 +1602,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
3944 "Dell Studio 1555", STAC_DELL_M6_DMIC),
3945 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
3946 "Dell Studio 1557", STAC_DELL_M6_DMIC),
3947 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
3948 + "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
3949 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
3950 + "Dell Studio 1558", STAC_DELL_M6_BOTH),
3951 {} /* terminator */
3952 };
3953
3954 @@ -1725,6 +1729,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
3955 "HP HDX", STAC_HP_HDX), /* HDX16 */
3956 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
3957 "HP dv6", STAC_HP_DV5),
3958 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
3959 + "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
3960 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
3961 "HP", STAC_HP_DV5),
3962 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
3963 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
3964 index 75283fb..c2311f8 100644
3965 --- a/sound/pci/maestro3.c
3966 +++ b/sound/pci/maestro3.c
3967 @@ -849,6 +849,7 @@ struct snd_m3 {
3968 struct snd_kcontrol *master_switch;
3969 struct snd_kcontrol *master_volume;
3970 struct tasklet_struct hwvol_tq;
3971 + unsigned int in_suspend;
3972
3973 #ifdef CONFIG_PM
3974 u16 *suspend_mem;
3975 @@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[] = {
3976 MODULE_DEVICE_TABLE(pci, snd_m3_ids);
3977
3978 static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
3979 + SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
3980 SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
3981 SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
3982 SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
3983 @@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
3984 outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
3985 outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
3986
3987 + /* Ignore spurious HV interrupts during suspend / resume, this avoids
3988 + mistaking them for a mute button press. */
3989 + if (chip->in_suspend)
3990 + return;
3991 +
3992 if (!chip->master_switch || !chip->master_volume)
3993 return;
3994
3995 @@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
3996 if (chip->suspend_mem == NULL)
3997 return 0;
3998
3999 + chip->in_suspend = 1;
4000 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
4001 snd_pcm_suspend_all(chip->pcm);
4002 snd_ac97_suspend(chip->ac97);
4003 @@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci)
4004 snd_m3_hv_init(chip);
4005
4006 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
4007 + chip->in_suspend = 0;
4008 return 0;
4009 }
4010 #endif /* CONFIG_PM */