Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.33-r3/0102-2.6.33.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1260 - (show annotations) (download)
Wed Jan 26 11:54:32 2011 UTC (13 years, 3 months ago) by niro
File size: 169949 byte(s)
2.6.33-alx-r3: always enable kms support at runtime for xorg-server-1.9.x
1 diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
2 index 81c0c59..e1bb5b2 100644
3 --- a/Documentation/i2c/busses/i2c-i801
4 +++ b/Documentation/i2c/busses/i2c-i801
5 @@ -15,7 +15,8 @@ Supported adapters:
6 * Intel 82801I (ICH9)
7 * Intel EP80579 (Tolapai)
8 * Intel 82801JI (ICH10)
9 - * Intel PCH
10 + * Intel 3400/5 Series (PCH)
11 + * Intel Cougar Point (PCH)
12 Datasheets: Publicly available at the Intel website
13
14 Authors:
15 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
16 index 6b84a04..cbeb6e0 100644
17 --- a/arch/arm/boot/compressed/head.S
18 +++ b/arch/arm/boot/compressed/head.S
19 @@ -172,7 +172,7 @@ not_angel:
20 adr r0, LC0
21 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
22 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
23 - THUMB( ldr sp, [r0, #28] )
24 + THUMB( ldr sp, [r0, #32] )
25 subs r0, r0, r1 @ calculate the delta offset
26
27 @ if delta is zero, we are
28 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
29 index 5fdeec5..d76279a 100644
30 --- a/arch/ia64/kvm/kvm-ia64.c
31 +++ b/arch/ia64/kvm/kvm-ia64.c
32 @@ -1794,7 +1794,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
33 {
34 struct kvm_memory_slot *memslot;
35 int r, i;
36 - long n, base;
37 + long base;
38 + unsigned long n;
39 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
40 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
41
42 @@ -1807,7 +1808,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
43 if (!memslot->dirty_bitmap)
44 goto out;
45
46 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
47 + n = kvm_dirty_bitmap_bytes(memslot);
48 base = memslot->base_gfn / BITS_PER_LONG;
49
50 for (i = 0; i < n/sizeof(long); ++i) {
51 @@ -1823,7 +1824,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
52 struct kvm_dirty_log *log)
53 {
54 int r;
55 - int n;
56 + unsigned long n;
57 struct kvm_memory_slot *memslot;
58 int is_dirty = 0;
59
60 @@ -1841,7 +1842,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
61 if (is_dirty) {
62 kvm_flush_remote_tlbs(kvm);
63 memslot = &kvm->memslots[log->slot];
64 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
65 + n = kvm_dirty_bitmap_bytes(memslot);
66 memset(memslot->dirty_bitmap, 0, n);
67 }
68 r = 0;
69 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
70 index 3e294bd..e6dc595 100644
71 --- a/arch/powerpc/kvm/book3s.c
72 +++ b/arch/powerpc/kvm/book3s.c
73 @@ -848,7 +848,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
74 struct kvm_vcpu *vcpu;
75 ulong ga, ga_end;
76 int is_dirty = 0;
77 - int r, n;
78 + int r;
79 + unsigned long n;
80
81 down_write(&kvm->slots_lock);
82
83 @@ -866,7 +867,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
84 kvm_for_each_vcpu(n, vcpu, kvm)
85 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
86
87 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
88 + n = kvm_dirty_bitmap_bytes(memslot);
89 memset(memslot->dirty_bitmap, 0, n);
90 }
91
92 diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
93 index 22574e0..202d869 100644
94 --- a/arch/powerpc/platforms/pseries/offline_states.h
95 +++ b/arch/powerpc/platforms/pseries/offline_states.h
96 @@ -9,10 +9,30 @@ enum cpu_state_vals {
97 CPU_MAX_OFFLINE_STATES
98 };
99
100 +#ifdef CONFIG_HOTPLUG_CPU
101 extern enum cpu_state_vals get_cpu_current_state(int cpu);
102 extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
103 -extern enum cpu_state_vals get_preferred_offline_state(int cpu);
104 extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
105 extern void set_default_offline_state(int cpu);
106 +#else
107 +static inline enum cpu_state_vals get_cpu_current_state(int cpu)
108 +{
109 + return CPU_STATE_ONLINE;
110 +}
111 +
112 +static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
113 +{
114 +}
115 +
116 +static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
117 +{
118 +}
119 +
120 +static inline void set_default_offline_state(int cpu)
121 +{
122 +}
123 +#endif
124 +
125 +extern enum cpu_state_vals get_preferred_offline_state(int cpu);
126 extern int start_secondary(void);
127 #endif
128 diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
129 index 300ab01..5f91a38 100644
130 --- a/arch/s390/mm/vmem.c
131 +++ b/arch/s390/mm/vmem.c
132 @@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
133 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
134 if (!pte)
135 return NULL;
136 - if (MACHINE_HAS_HPAGE)
137 - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
138 - PTRS_PER_PTE * sizeof(pte_t));
139 - else
140 - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
141 - PTRS_PER_PTE * sizeof(pte_t));
142 + clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
143 + PTRS_PER_PTE * sizeof(pte_t));
144 return pte;
145 }
146
147 @@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
148 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
149 (address + HPAGE_SIZE <= start + size) &&
150 (address >= HPAGE_SIZE)) {
151 - pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
152 - _SEGMENT_ENTRY_CO;
153 + pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
154 pmd_val(*pm_dir) = pte_val(pte);
155 address += HPAGE_SIZE - PAGE_SIZE;
156 continue;
157 diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
158 index ac04255..ce830fa 100644
159 --- a/arch/sh/include/asm/elf.h
160 +++ b/arch/sh/include/asm/elf.h
161 @@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
162
163 #define VSYSCALL_AUX_ENT \
164 if (vdso_enabled) \
165 - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
166 + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
167 + else \
168 + NEW_AUX_ENT(AT_IGNORE, 0);
169 #else
170 #define VSYSCALL_AUX_ENT
171 #endif /* CONFIG_VSYSCALL */
172 @@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
173 #ifdef CONFIG_SH_FPU
174 #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
175 #else
176 -#define FPU_AUX_ENT
177 +#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
178 #endif
179
180 extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
181 diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
182 index 983e079..1d19c19 100644
183 --- a/arch/sh/kernel/smp.c
184 +++ b/arch/sh/kernel/smp.c
185 @@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
186 unsigned int cpu;
187 struct mm_struct *mm = &init_mm;
188
189 + enable_mmu();
190 atomic_inc(&mm->mm_count);
191 atomic_inc(&mm->mm_users);
192 current->active_mm = mm;
193 diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
194 index 7e3dfd9..e608f39 100644
195 --- a/arch/sparc/kernel/ptrace_32.c
196 +++ b/arch/sparc/kernel/ptrace_32.c
197 @@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
198 *k++ = regs->u_regs[pos++];
199
200 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
201 + reg_window -= 16;
202 for (; count > 0 && pos < 32; count--) {
203 if (get_user(*k++, &reg_window[pos++]))
204 return -EFAULT;
205 @@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
206 }
207
208 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
209 + reg_window -= 16;
210 for (; count > 0 && pos < 32; count--) {
211 if (get_user(reg, &reg_window[pos++]) ||
212 put_user(reg, u++))
213 @@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
214 regs->u_regs[pos++] = *k++;
215
216 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
217 + reg_window -= 16;
218 for (; count > 0 && pos < 32; count--) {
219 if (put_user(*k++, &reg_window[pos++]))
220 return -EFAULT;
221 @@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
222 }
223
224 reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
225 + reg_window -= 16;
226 for (; count > 0 && pos < 32; count--) {
227 if (get_user(reg, u++) ||
228 put_user(reg, &reg_window[pos++]))
229 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
230 index 2f6524d..aa90da0 100644
231 --- a/arch/sparc/kernel/ptrace_64.c
232 +++ b/arch/sparc/kernel/ptrace_64.c
233 @@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
234 *k++ = regs->u_regs[pos++];
235
236 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
237 + reg_window -= 16;
238 if (target == current) {
239 for (; count > 0 && pos < 32; count--) {
240 if (get_user(*k++, &reg_window[pos++]))
241 @@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
242 }
243
244 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
245 + reg_window -= 16;
246 if (target == current) {
247 for (; count > 0 && pos < 32; count--) {
248 if (get_user(reg, &reg_window[pos++]) ||
249 @@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
250 regs->u_regs[pos++] = *k++;
251
252 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
253 + reg_window -= 16;
254 if (target == current) {
255 for (; count > 0 && pos < 32; count--) {
256 if (put_user(*k++, &reg_window[pos++]))
257 @@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
258 }
259
260 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
261 + reg_window -= 16;
262 if (target == current) {
263 for (; count > 0 && pos < 32; count--) {
264 if (get_user(reg, u++) ||
265 diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
266 index 2201e9c..c1ea9eb 100644
267 --- a/arch/um/sys-x86_64/Makefile
268 +++ b/arch/um/sys-x86_64/Makefile
269 @@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
270 setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
271 sysrq.o ksyms.o tls.o
272
273 -subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
274 +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
275 + lib/rwsem_64.o
276 subarch-obj-$(CONFIG_MODULES) += kernel/module.o
277
278 ldt-y = ../sys-i386/ldt.o
279 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
280 index f20ddf8..a198293 100644
281 --- a/arch/x86/Kconfig.cpu
282 +++ b/arch/x86/Kconfig.cpu
283 @@ -319,7 +319,7 @@ config X86_L1_CACHE_SHIFT
284
285 config X86_XADD
286 def_bool y
287 - depends on X86_32 && !M386
288 + depends on X86_64 || !M386
289
290 config X86_PPRO_FENCE
291 bool "PentiumPro memory ordering errata workaround"
292 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
293 index ca7517d..606ede1 100644
294 --- a/arch/x86/include/asm/rwsem.h
295 +++ b/arch/x86/include/asm/rwsem.h
296 @@ -41,6 +41,7 @@
297 #include <linux/list.h>
298 #include <linux/spinlock.h>
299 #include <linux/lockdep.h>
300 +#include <asm/asm.h>
301
302 struct rwsem_waiter;
303
304 @@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
305
306 /*
307 * the semaphore definition
308 + *
309 + * The bias values and the counter type limits the number of
310 + * potential readers/writers to 32767 for 32 bits and 2147483647
311 + * for 64 bits.
312 */
313
314 -#define RWSEM_UNLOCKED_VALUE 0x00000000
315 -#define RWSEM_ACTIVE_BIAS 0x00000001
316 -#define RWSEM_ACTIVE_MASK 0x0000ffff
317 -#define RWSEM_WAITING_BIAS (-0x00010000)
318 +#ifdef CONFIG_X86_64
319 +# define RWSEM_ACTIVE_MASK 0xffffffffL
320 +#else
321 +# define RWSEM_ACTIVE_MASK 0x0000ffffL
322 +#endif
323 +
324 +#define RWSEM_UNLOCKED_VALUE 0x00000000L
325 +#define RWSEM_ACTIVE_BIAS 0x00000001L
326 +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
327 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
328 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
329
330 +typedef signed long rwsem_count_t;
331 +
332 struct rw_semaphore {
333 - signed long count;
334 + rwsem_count_t count;
335 spinlock_t wait_lock;
336 struct list_head wait_list;
337 #ifdef CONFIG_DEBUG_LOCK_ALLOC
338 @@ -105,7 +117,7 @@ do { \
339 static inline void __down_read(struct rw_semaphore *sem)
340 {
341 asm volatile("# beginning down_read\n\t"
342 - LOCK_PREFIX " incl (%%eax)\n\t"
343 + LOCK_PREFIX _ASM_INC "(%1)\n\t"
344 /* adds 0x00000001, returns the old value */
345 " jns 1f\n"
346 " call call_rwsem_down_read_failed\n"
347 @@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem)
348 */
349 static inline int __down_read_trylock(struct rw_semaphore *sem)
350 {
351 - __s32 result, tmp;
352 + rwsem_count_t result, tmp;
353 asm volatile("# beginning __down_read_trylock\n\t"
354 - " movl %0,%1\n\t"
355 + " mov %0,%1\n\t"
356 "1:\n\t"
357 - " movl %1,%2\n\t"
358 - " addl %3,%2\n\t"
359 + " mov %1,%2\n\t"
360 + " add %3,%2\n\t"
361 " jle 2f\n\t"
362 - LOCK_PREFIX " cmpxchgl %2,%0\n\t"
363 + LOCK_PREFIX " cmpxchg %2,%0\n\t"
364 " jnz 1b\n\t"
365 "2:\n\t"
366 "# ending __down_read_trylock\n\t"
367 @@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
368 */
369 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
370 {
371 - int tmp;
372 + rwsem_count_t tmp;
373
374 tmp = RWSEM_ACTIVE_WRITE_BIAS;
375 asm volatile("# beginning down_write\n\t"
376 - LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
377 + LOCK_PREFIX " xadd %1,(%2)\n\t"
378 /* subtract 0x0000ffff, returns the old value */
379 - " testl %%edx,%%edx\n\t"
380 + " test %1,%1\n\t"
381 /* was the count 0 before? */
382 " jz 1f\n"
383 " call call_rwsem_down_write_failed\n"
384 @@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
385 */
386 static inline int __down_write_trylock(struct rw_semaphore *sem)
387 {
388 - signed long ret = cmpxchg(&sem->count,
389 - RWSEM_UNLOCKED_VALUE,
390 - RWSEM_ACTIVE_WRITE_BIAS);
391 + rwsem_count_t ret = cmpxchg(&sem->count,
392 + RWSEM_UNLOCKED_VALUE,
393 + RWSEM_ACTIVE_WRITE_BIAS);
394 if (ret == RWSEM_UNLOCKED_VALUE)
395 return 1;
396 return 0;
397 @@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
398 */
399 static inline void __up_read(struct rw_semaphore *sem)
400 {
401 - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
402 + rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
403 asm volatile("# beginning __up_read\n\t"
404 - LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
405 + LOCK_PREFIX " xadd %1,(%2)\n\t"
406 /* subtracts 1, returns the old value */
407 " jns 1f\n\t"
408 " call call_rwsem_wake\n"
409 @@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem)
410 */
411 static inline void __up_write(struct rw_semaphore *sem)
412 {
413 + rwsem_count_t tmp;
414 asm volatile("# beginning __up_write\n\t"
415 - " movl %2,%%edx\n\t"
416 - LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
417 + LOCK_PREFIX " xadd %1,(%2)\n\t"
418 /* tries to transition
419 0xffff0001 -> 0x00000000 */
420 " jz 1f\n"
421 " call call_rwsem_wake\n"
422 "1:\n\t"
423 "# ending __up_write\n"
424 - : "+m" (sem->count)
425 - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
426 - : "memory", "cc", "edx");
427 + : "+m" (sem->count), "=d" (tmp)
428 + : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
429 + : "memory", "cc");
430 }
431
432 /*
433 @@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
434 static inline void __downgrade_write(struct rw_semaphore *sem)
435 {
436 asm volatile("# beginning __downgrade_write\n\t"
437 - LOCK_PREFIX " addl %2,(%%eax)\n\t"
438 - /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
439 + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
440 + /*
441 + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
442 + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
443 + */
444 " jns 1f\n\t"
445 " call call_rwsem_downgrade_wake\n"
446 "1:\n\t"
447 "# ending __downgrade_write\n"
448 : "+m" (sem->count)
449 - : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
450 + : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
451 : "memory", "cc");
452 }
453
454 /*
455 * implement atomic add functionality
456 */
457 -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
458 +static inline void rwsem_atomic_add(rwsem_count_t delta,
459 + struct rw_semaphore *sem)
460 {
461 - asm volatile(LOCK_PREFIX "addl %1,%0"
462 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
463 : "+m" (sem->count)
464 - : "ir" (delta));
465 + : "er" (delta));
466 }
467
468 /*
469 * implement exchange and add functionality
470 */
471 -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
472 +static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
473 + struct rw_semaphore *sem)
474 {
475 - int tmp = delta;
476 + rwsem_count_t tmp = delta;
477
478 asm volatile(LOCK_PREFIX "xadd %0,%1"
479 : "+r" (tmp), "+m" (sem->count)
480 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
481 index 1e79678..4cfc908 100644
482 --- a/arch/x86/include/asm/smp.h
483 +++ b/arch/x86/include/asm/smp.h
484 @@ -135,6 +135,8 @@ int native_cpu_disable(void);
485 void native_cpu_die(unsigned int cpu);
486 void native_play_dead(void);
487 void play_dead_common(void);
488 +void wbinvd_on_cpu(int cpu);
489 +int wbinvd_on_all_cpus(void);
490
491 void native_send_call_func_ipi(const struct cpumask *mask);
492 void native_send_call_func_single_ipi(int cpu);
493 @@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
494 {
495 return cpumask_weight(cpu_callout_mask);
496 }
497 +#else /* !CONFIG_SMP */
498 +#define wbinvd_on_cpu(cpu) wbinvd()
499 +static inline int wbinvd_on_all_cpus(void)
500 +{
501 + wbinvd();
502 + return 0;
503 +}
504 #endif /* CONFIG_SMP */
505
506 extern unsigned disabled_cpus __cpuinitdata;
507 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
508 index adb0ba0..2e77516 100644
509 --- a/arch/x86/kernel/amd_iommu.c
510 +++ b/arch/x86/kernel/amd_iommu.c
511 @@ -2298,7 +2298,7 @@ static void cleanup_domain(struct protection_domain *domain)
512 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
513 struct device *dev = dev_data->dev;
514
515 - do_detach(dev);
516 + __detach_device(dev);
517 atomic_set(&dev_data->bind, 0);
518 }
519
520 @@ -2379,9 +2379,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
521
522 free_pagetable(domain);
523
524 - domain_id_free(domain->id);
525 -
526 - kfree(domain);
527 + protection_domain_free(domain);
528
529 dom->priv = NULL;
530 }
531 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
532 index 9dc91b4..883d619 100644
533 --- a/arch/x86/kernel/amd_iommu_init.c
534 +++ b/arch/x86/kernel/amd_iommu_init.c
535 @@ -1288,6 +1288,8 @@ static int __init amd_iommu_init(void)
536 if (ret)
537 goto free;
538
539 + enable_iommus();
540 +
541 if (iommu_pass_through)
542 ret = amd_iommu_init_passthrough();
543 else
544 @@ -1300,8 +1302,6 @@ static int __init amd_iommu_init(void)
545
546 amd_iommu_init_notifier();
547
548 - enable_iommus();
549 -
550 if (iommu_pass_through)
551 goto out;
552
553 @@ -1315,6 +1315,7 @@ out:
554 return ret;
555
556 free:
557 + disable_iommus();
558
559 amd_iommu_uninit_devices();
560
561 diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
562 index f147a95..19f2c70 100644
563 --- a/arch/x86/kernel/aperture_64.c
564 +++ b/arch/x86/kernel/aperture_64.c
565 @@ -394,6 +394,7 @@ void __init gart_iommu_hole_init(void)
566 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
567 int bus;
568 int dev_base, dev_limit;
569 + u32 ctl;
570
571 bus = bus_dev_ranges[i].bus;
572 dev_base = bus_dev_ranges[i].dev_base;
573 @@ -407,7 +408,19 @@ void __init gart_iommu_hole_init(void)
574 gart_iommu_aperture = 1;
575 x86_init.iommu.iommu_init = gart_iommu_init;
576
577 - aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
578 + ctl = read_pci_config(bus, slot, 3,
579 + AMD64_GARTAPERTURECTL);
580 +
581 + /*
582 + * Before we do anything else disable the GART. It may
583 + * still be enabled if we boot into a crash-kernel here.
584 + * Reconfiguring the GART while it is enabled could have
585 + * unknown side-effects.
586 + */
587 + ctl &= ~GARTEN;
588 + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
589 +
590 + aper_order = (ctl >> 1) & 7;
591 aper_size = (32 * 1024 * 1024) << aper_order;
592 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
593 aper_base <<= 25;
594 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
595 index dfca210..d4df517 100644
596 --- a/arch/x86/kernel/apic/apic.c
597 +++ b/arch/x86/kernel/apic/apic.c
598 @@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void)
599 }
600 #endif
601
602 +#ifndef CONFIG_SMP
603 enable_IR_x2apic();
604 default_setup_apic_routing();
605 +#endif
606
607 verify_local_APIC();
608 connect_bsp_APIC();
609 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
610 index fc6c8ef..d440123 100644
611 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
612 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
613 @@ -18,6 +18,7 @@
614 #include <asm/processor.h>
615 #include <linux/smp.h>
616 #include <asm/k8.h>
617 +#include <asm/smp.h>
618
619 #define LVL_1_INST 1
620 #define LVL_1_DATA 2
621 @@ -150,7 +151,8 @@ struct _cpuid4_info {
622 union _cpuid4_leaf_ebx ebx;
623 union _cpuid4_leaf_ecx ecx;
624 unsigned long size;
625 - unsigned long can_disable;
626 + bool can_disable;
627 + unsigned int l3_indices;
628 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
629 };
630
631 @@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
632 union _cpuid4_leaf_ebx ebx;
633 union _cpuid4_leaf_ecx ecx;
634 unsigned long size;
635 - unsigned long can_disable;
636 + bool can_disable;
637 + unsigned int l3_indices;
638 };
639
640 unsigned short num_cache_leaves;
641 @@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
642 (ebx->split.ways_of_associativity + 1) - 1;
643 }
644
645 +struct _cache_attr {
646 + struct attribute attr;
647 + ssize_t (*show)(struct _cpuid4_info *, char *);
648 + ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
649 +};
650 +
651 +#ifdef CONFIG_CPU_SUP_AMD
652 +static unsigned int __cpuinit amd_calc_l3_indices(void)
653 +{
654 + /*
655 + * We're called over smp_call_function_single() and therefore
656 + * are on the correct cpu.
657 + */
658 + int cpu = smp_processor_id();
659 + int node = cpu_to_node(cpu);
660 + struct pci_dev *dev = node_to_k8_nb_misc(node);
661 + unsigned int sc0, sc1, sc2, sc3;
662 + u32 val = 0;
663 +
664 + pci_read_config_dword(dev, 0x1C4, &val);
665 +
666 + /* calculate subcache sizes */
667 + sc0 = !(val & BIT(0));
668 + sc1 = !(val & BIT(4));
669 + sc2 = !(val & BIT(8)) + !(val & BIT(9));
670 + sc3 = !(val & BIT(12)) + !(val & BIT(13));
671 +
672 + return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
673 +}
674 +
675 static void __cpuinit
676 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
677 {
678 @@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
679 if (boot_cpu_data.x86 == 0x11)
680 return;
681
682 - /* see erratum #382 */
683 - if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
684 + /* see errata #382 and #388 */
685 + if ((boot_cpu_data.x86 == 0x10) &&
686 + ((boot_cpu_data.x86_model < 0x8) ||
687 + (boot_cpu_data.x86_mask < 0x1)))
688 return;
689
690 - this_leaf->can_disable = 1;
691 + this_leaf->can_disable = true;
692 + this_leaf->l3_indices = amd_calc_l3_indices();
693 +}
694 +
695 +static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
696 + unsigned int index)
697 +{
698 + int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
699 + int node = amd_get_nb_id(cpu);
700 + struct pci_dev *dev = node_to_k8_nb_misc(node);
701 + unsigned int reg = 0;
702 +
703 + if (!this_leaf->can_disable)
704 + return -EINVAL;
705 +
706 + if (!dev)
707 + return -EINVAL;
708 +
709 + pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
710 + return sprintf(buf, "0x%08x\n", reg);
711 +}
712 +
713 +#define SHOW_CACHE_DISABLE(index) \
714 +static ssize_t \
715 +show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
716 +{ \
717 + return show_cache_disable(this_leaf, buf, index); \
718 +}
719 +SHOW_CACHE_DISABLE(0)
720 +SHOW_CACHE_DISABLE(1)
721 +
722 +static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
723 + const char *buf, size_t count, unsigned int index)
724 +{
725 + int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
726 + int node = amd_get_nb_id(cpu);
727 + struct pci_dev *dev = node_to_k8_nb_misc(node);
728 + unsigned long val = 0;
729 +
730 +#define SUBCACHE_MASK (3UL << 20)
731 +#define SUBCACHE_INDEX 0xfff
732 +
733 + if (!this_leaf->can_disable)
734 + return -EINVAL;
735 +
736 + if (!capable(CAP_SYS_ADMIN))
737 + return -EPERM;
738 +
739 + if (!dev)
740 + return -EINVAL;
741 +
742 + if (strict_strtoul(buf, 10, &val) < 0)
743 + return -EINVAL;
744 +
745 + /* do not allow writes outside of allowed bits */
746 + if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
747 + ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
748 + return -EINVAL;
749 +
750 + val |= BIT(30);
751 + pci_write_config_dword(dev, 0x1BC + index * 4, val);
752 + /*
753 + * We need to WBINVD on a core on the node containing the L3 cache which
754 + * indices we disable therefore a simple wbinvd() is not sufficient.
755 + */
756 + wbinvd_on_cpu(cpu);
757 + pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
758 + return count;
759 +}
760 +
761 +#define STORE_CACHE_DISABLE(index) \
762 +static ssize_t \
763 +store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
764 + const char *buf, size_t count) \
765 +{ \
766 + return store_cache_disable(this_leaf, buf, count, index); \
767 }
768 +STORE_CACHE_DISABLE(0)
769 +STORE_CACHE_DISABLE(1)
770 +
771 +static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
772 + show_cache_disable_0, store_cache_disable_0);
773 +static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
774 + show_cache_disable_1, store_cache_disable_1);
775 +
776 +#else /* CONFIG_CPU_SUP_AMD */
777 +static void __cpuinit
778 +amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
779 +{
780 +};
781 +#endif /* CONFIG_CPU_SUP_AMD */
782
783 static int
784 __cpuinit cpuid4_cache_lookup_regs(int index,
785 @@ -711,82 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
786 #define to_object(k) container_of(k, struct _index_kobject, kobj)
787 #define to_attr(a) container_of(a, struct _cache_attr, attr)
788
789 -static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
790 - unsigned int index)
791 -{
792 - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
793 - int node = cpu_to_node(cpu);
794 - struct pci_dev *dev = node_to_k8_nb_misc(node);
795 - unsigned int reg = 0;
796 -
797 - if (!this_leaf->can_disable)
798 - return -EINVAL;
799 -
800 - if (!dev)
801 - return -EINVAL;
802 -
803 - pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
804 - return sprintf(buf, "%x\n", reg);
805 -}
806 -
807 -#define SHOW_CACHE_DISABLE(index) \
808 -static ssize_t \
809 -show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
810 -{ \
811 - return show_cache_disable(this_leaf, buf, index); \
812 -}
813 -SHOW_CACHE_DISABLE(0)
814 -SHOW_CACHE_DISABLE(1)
815 -
816 -static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
817 - const char *buf, size_t count, unsigned int index)
818 -{
819 - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
820 - int node = cpu_to_node(cpu);
821 - struct pci_dev *dev = node_to_k8_nb_misc(node);
822 - unsigned long val = 0;
823 - unsigned int scrubber = 0;
824 -
825 - if (!this_leaf->can_disable)
826 - return -EINVAL;
827 -
828 - if (!capable(CAP_SYS_ADMIN))
829 - return -EPERM;
830 -
831 - if (!dev)
832 - return -EINVAL;
833 -
834 - if (strict_strtoul(buf, 10, &val) < 0)
835 - return -EINVAL;
836 -
837 - val |= 0xc0000000;
838 -
839 - pci_read_config_dword(dev, 0x58, &scrubber);
840 - scrubber &= ~0x1f000000;
841 - pci_write_config_dword(dev, 0x58, scrubber);
842 -
843 - pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
844 - wbinvd();
845 - pci_write_config_dword(dev, 0x1BC + index * 4, val);
846 - return count;
847 -}
848 -
849 -#define STORE_CACHE_DISABLE(index) \
850 -static ssize_t \
851 -store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
852 - const char *buf, size_t count) \
853 -{ \
854 - return store_cache_disable(this_leaf, buf, count, index); \
855 -}
856 -STORE_CACHE_DISABLE(0)
857 -STORE_CACHE_DISABLE(1)
858 -
859 -struct _cache_attr {
860 - struct attribute attr;
861 - ssize_t (*show)(struct _cpuid4_info *, char *);
862 - ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
863 -};
864 -
865 #define define_one_ro(_name) \
866 static struct _cache_attr _name = \
867 __ATTR(_name, 0444, show_##_name, NULL)
868 @@ -801,23 +849,28 @@ define_one_ro(size);
869 define_one_ro(shared_cpu_map);
870 define_one_ro(shared_cpu_list);
871
872 -static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
873 - show_cache_disable_0, store_cache_disable_0);
874 -static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
875 - show_cache_disable_1, store_cache_disable_1);
876 +#define DEFAULT_SYSFS_CACHE_ATTRS \
877 + &type.attr, \
878 + &level.attr, \
879 + &coherency_line_size.attr, \
880 + &physical_line_partition.attr, \
881 + &ways_of_associativity.attr, \
882 + &number_of_sets.attr, \
883 + &size.attr, \
884 + &shared_cpu_map.attr, \
885 + &shared_cpu_list.attr
886
887 static struct attribute *default_attrs[] = {
888 - &type.attr,
889 - &level.attr,
890 - &coherency_line_size.attr,
891 - &physical_line_partition.attr,
892 - &ways_of_associativity.attr,
893 - &number_of_sets.attr,
894 - &size.attr,
895 - &shared_cpu_map.attr,
896 - &shared_cpu_list.attr,
897 + DEFAULT_SYSFS_CACHE_ATTRS,
898 + NULL
899 +};
900 +
901 +static struct attribute *default_l3_attrs[] = {
902 + DEFAULT_SYSFS_CACHE_ATTRS,
903 +#ifdef CONFIG_CPU_SUP_AMD
904 &cache_disable_0.attr,
905 &cache_disable_1.attr,
906 +#endif
907 NULL
908 };
909
910 @@ -908,6 +961,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
911 unsigned int cpu = sys_dev->id;
912 unsigned long i, j;
913 struct _index_kobject *this_object;
914 + struct _cpuid4_info *this_leaf;
915 int retval;
916
917 retval = cpuid4_cache_sysfs_init(cpu);
918 @@ -926,6 +980,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
919 this_object = INDEX_KOBJECT_PTR(cpu, i);
920 this_object->cpu = cpu;
921 this_object->index = i;
922 +
923 + this_leaf = CPUID4_INFO_IDX(cpu, i);
924 +
925 + if (this_leaf->can_disable)
926 + ktype_cache.default_attrs = default_l3_attrs;
927 + else
928 + ktype_cache.default_attrs = default_attrs;
929 +
930 retval = kobject_init_and_add(&(this_object->kobj),
931 &ktype_cache,
932 per_cpu(ici_cache_kobject, cpu),
933 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
934 index 98819b3..c7ca8e2 100644
935 --- a/arch/x86/kernel/cpu/perf_event.c
936 +++ b/arch/x86/kernel/cpu/perf_event.c
937 @@ -245,6 +245,97 @@ static u64 __read_mostly hw_cache_event_ids
938 [PERF_COUNT_HW_CACHE_OP_MAX]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX];
940
941 +static const u64 westmere_hw_cache_event_ids
942 + [PERF_COUNT_HW_CACHE_MAX]
943 + [PERF_COUNT_HW_CACHE_OP_MAX]
944 + [PERF_COUNT_HW_CACHE_RESULT_MAX] =
945 +{
946 + [ C(L1D) ] = {
947 + [ C(OP_READ) ] = {
948 + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
949 + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
950 + },
951 + [ C(OP_WRITE) ] = {
952 + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
953 + [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
954 + },
955 + [ C(OP_PREFETCH) ] = {
956 + [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
957 + [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
958 + },
959 + },
960 + [ C(L1I ) ] = {
961 + [ C(OP_READ) ] = {
962 + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
963 + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
964 + },
965 + [ C(OP_WRITE) ] = {
966 + [ C(RESULT_ACCESS) ] = -1,
967 + [ C(RESULT_MISS) ] = -1,
968 + },
969 + [ C(OP_PREFETCH) ] = {
970 + [ C(RESULT_ACCESS) ] = 0x0,
971 + [ C(RESULT_MISS) ] = 0x0,
972 + },
973 + },
974 + [ C(LL ) ] = {
975 + [ C(OP_READ) ] = {
976 + [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
977 + [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
978 + },
979 + [ C(OP_WRITE) ] = {
980 + [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
981 + [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
982 + },
983 + [ C(OP_PREFETCH) ] = {
984 + [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
985 + [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
986 + },
987 + },
988 + [ C(DTLB) ] = {
989 + [ C(OP_READ) ] = {
990 + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
991 + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
992 + },
993 + [ C(OP_WRITE) ] = {
994 + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
995 + [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
996 + },
997 + [ C(OP_PREFETCH) ] = {
998 + [ C(RESULT_ACCESS) ] = 0x0,
999 + [ C(RESULT_MISS) ] = 0x0,
1000 + },
1001 + },
1002 + [ C(ITLB) ] = {
1003 + [ C(OP_READ) ] = {
1004 + [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1005 + [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1006 + },
1007 + [ C(OP_WRITE) ] = {
1008 + [ C(RESULT_ACCESS) ] = -1,
1009 + [ C(RESULT_MISS) ] = -1,
1010 + },
1011 + [ C(OP_PREFETCH) ] = {
1012 + [ C(RESULT_ACCESS) ] = -1,
1013 + [ C(RESULT_MISS) ] = -1,
1014 + },
1015 + },
1016 + [ C(BPU ) ] = {
1017 + [ C(OP_READ) ] = {
1018 + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1019 + [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1020 + },
1021 + [ C(OP_WRITE) ] = {
1022 + [ C(RESULT_ACCESS) ] = -1,
1023 + [ C(RESULT_MISS) ] = -1,
1024 + },
1025 + [ C(OP_PREFETCH) ] = {
1026 + [ C(RESULT_ACCESS) ] = -1,
1027 + [ C(RESULT_MISS) ] = -1,
1028 + },
1029 + },
1030 +};
1031 +
1032 static __initconst u64 nehalem_hw_cache_event_ids
1033 [PERF_COUNT_HW_CACHE_MAX]
1034 [PERF_COUNT_HW_CACHE_OP_MAX]
1035 @@ -2118,6 +2209,7 @@ static __init int intel_pmu_init(void)
1036 * Install the hw-cache-events table:
1037 */
1038 switch (boot_cpu_data.x86_model) {
1039 +
1040 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1041 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1042 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1043 @@ -2129,7 +2221,9 @@ static __init int intel_pmu_init(void)
1044 event_constraints = intel_core_event_constraints;
1045 break;
1046 default:
1047 - case 26:
1048 + case 26: /* 45 nm nehalem, "Bloomfield" */
1049 + case 30: /* 45 nm nehalem, "Lynnfield" */
1050 + case 46: /* 45 nm nehalem-ex, "Beckton" */
1051 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1052 sizeof(hw_cache_event_ids));
1053
1054 @@ -2142,6 +2236,14 @@ static __init int intel_pmu_init(void)
1055
1056 pr_cont("Atom events, ");
1057 break;
1058 +
1059 + case 37: /* 32 nm nehalem, "Clarkdale" */
1060 + case 44: /* 32 nm nehalem, "Gulftown" */
1061 + memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1062 + sizeof(hw_cache_event_ids));
1063 +
1064 + pr_cont("Westmere events, ");
1065 + break;
1066 }
1067 return 0;
1068 }
1069 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
1070 index a4849c1..ebd4c51 100644
1071 --- a/arch/x86/kernel/crash.c
1072 +++ b/arch/x86/kernel/crash.c
1073 @@ -27,7 +27,6 @@
1074 #include <asm/cpu.h>
1075 #include <asm/reboot.h>
1076 #include <asm/virtext.h>
1077 -#include <asm/x86_init.h>
1078
1079 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
1080
1081 @@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
1082 #ifdef CONFIG_HPET_TIMER
1083 hpet_disable();
1084 #endif
1085 -
1086 -#ifdef CONFIG_X86_64
1087 - x86_platform.iommu_shutdown();
1088 -#endif
1089 -
1090 crash_save_cpu(regs, safe_smp_processor_id());
1091 }
1092 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1093 index ad80a1c..773afc9 100644
1094 --- a/arch/x86/kernel/hpet.c
1095 +++ b/arch/x86/kernel/hpet.c
1096 @@ -399,9 +399,15 @@ static int hpet_next_event(unsigned long delta,
1097 * then we might have a real hardware problem. We can not do
1098 * much about it here, but at least alert the user/admin with
1099 * a prominent warning.
1100 + * An erratum on some chipsets (ICH9,..), results in comparator read
1101 + * immediately following a write returning old value. Workaround
1102 + * for this is to read this value second time, when first
1103 + * read returns old value.
1104 */
1105 - WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1106 + if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
1107 + WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1108 KERN_WARNING "hpet: compare register read back failed.\n");
1109 + }
1110
1111 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
1112 }
1113 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
1114 index bfba601..b2258ca 100644
1115 --- a/arch/x86/kernel/kgdb.c
1116 +++ b/arch/x86/kernel/kgdb.c
1117 @@ -618,8 +618,8 @@ int kgdb_arch_init(void)
1118 * portion of kgdb because this operation requires mutexs to
1119 * complete.
1120 */
1121 + hw_breakpoint_init(&attr);
1122 attr.bp_addr = (unsigned long)kgdb_arch_init;
1123 - attr.type = PERF_TYPE_BREAKPOINT;
1124 attr.bp_len = HW_BREAKPOINT_LEN_1;
1125 attr.bp_type = HW_BREAKPOINT_W;
1126 attr.disabled = 1;
1127 diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
1128 index a2c1edd..e81030f 100644
1129 --- a/arch/x86/kernel/mpparse.c
1130 +++ b/arch/x86/kernel/mpparse.c
1131 @@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
1132 {
1133 unsigned long size = get_mpc_size(mpf->physptr);
1134
1135 - reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1136 + reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc");
1137 }
1138
1139 static int __init smp_scan_config(unsigned long base, unsigned long length)
1140 @@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
1141 mpf, (u64)virt_to_phys(mpf));
1142
1143 mem = virt_to_phys(mpf);
1144 - reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf");
1145 + reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf");
1146 if (mpf->physptr)
1147 smp_reserve_memory(mpf);
1148
1149 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
1150 index 34de53b..4f41b29 100644
1151 --- a/arch/x86/kernel/pci-gart_64.c
1152 +++ b/arch/x86/kernel/pci-gart_64.c
1153 @@ -564,6 +564,9 @@ static void enable_gart_translations(void)
1154
1155 enable_gart_translation(dev, __pa(agp_gatt_table));
1156 }
1157 +
1158 + /* Flush the GART-TLB to remove stale entries */
1159 + k8_flush_garts();
1160 }
1161
1162 /*
1163 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1164 index 89a49fb..28c3d81 100644
1165 --- a/arch/x86/kvm/mmu.c
1166 +++ b/arch/x86/kvm/mmu.c
1167 @@ -1502,8 +1502,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1168 for_each_sp(pages, sp, parents, i) {
1169 kvm_mmu_zap_page(kvm, sp);
1170 mmu_pages_clear_parents(&parents);
1171 + zapped++;
1172 }
1173 - zapped += pages.nr;
1174 kvm_mmu_pages_init(parent, &parents, &pages);
1175 }
1176
1177 @@ -1554,14 +1554,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1178 */
1179
1180 if (used_pages > kvm_nr_mmu_pages) {
1181 - while (used_pages > kvm_nr_mmu_pages) {
1182 + while (used_pages > kvm_nr_mmu_pages &&
1183 + !list_empty(&kvm->arch.active_mmu_pages)) {
1184 struct kvm_mmu_page *page;
1185
1186 page = container_of(kvm->arch.active_mmu_pages.prev,
1187 struct kvm_mmu_page, link);
1188 - kvm_mmu_zap_page(kvm, page);
1189 + used_pages -= kvm_mmu_zap_page(kvm, page);
1190 used_pages--;
1191 }
1192 + kvm_nr_mmu_pages = used_pages;
1193 kvm->arch.n_free_mmu_pages = 0;
1194 }
1195 else
1196 @@ -1608,7 +1610,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1197 && !sp->role.invalid) {
1198 pgprintk("%s: zap %lx %x\n",
1199 __func__, gfn, sp->role.word);
1200 - kvm_mmu_zap_page(kvm, sp);
1201 + if (kvm_mmu_zap_page(kvm, sp))
1202 + nn = bucket->first;
1203 }
1204 }
1205 }
1206 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1207 index 1d9b338..d42e191 100644
1208 --- a/arch/x86/kvm/svm.c
1209 +++ b/arch/x86/kvm/svm.c
1210 @@ -698,29 +698,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1211 if (err)
1212 goto free_svm;
1213
1214 + err = -ENOMEM;
1215 page = alloc_page(GFP_KERNEL);
1216 - if (!page) {
1217 - err = -ENOMEM;
1218 + if (!page)
1219 goto uninit;
1220 - }
1221
1222 - err = -ENOMEM;
1223 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1224 if (!msrpm_pages)
1225 - goto uninit;
1226 + goto free_page1;
1227
1228 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1229 if (!nested_msrpm_pages)
1230 - goto uninit;
1231 -
1232 - svm->msrpm = page_address(msrpm_pages);
1233 - svm_vcpu_init_msrpm(svm->msrpm);
1234 + goto free_page2;
1235
1236 hsave_page = alloc_page(GFP_KERNEL);
1237 if (!hsave_page)
1238 - goto uninit;
1239 + goto free_page3;
1240 +
1241 svm->nested.hsave = page_address(hsave_page);
1242
1243 + svm->msrpm = page_address(msrpm_pages);
1244 + svm_vcpu_init_msrpm(svm->msrpm);
1245 +
1246 svm->nested.msrpm = page_address(nested_msrpm_pages);
1247
1248 svm->vmcb = page_address(page);
1249 @@ -737,6 +736,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1250
1251 return &svm->vcpu;
1252
1253 +free_page3:
1254 + __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1255 +free_page2:
1256 + __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1257 +free_page1:
1258 + __free_page(page);
1259 uninit:
1260 kvm_vcpu_uninit(&svm->vcpu);
1261 free_svm:
1262 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1263 index 8a8e139..3acbe19 100644
1264 --- a/arch/x86/kvm/vmx.c
1265 +++ b/arch/x86/kvm/vmx.c
1266 @@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
1267 static int __read_mostly emulate_invalid_guest_state = 0;
1268 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
1269
1270 +#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
1271 +
1272 /*
1273 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
1274 * ple_gap: upper bound on the amount of time between two successive
1275 @@ -115,7 +117,7 @@ struct vcpu_vmx {
1276 } host_state;
1277 struct {
1278 int vm86_active;
1279 - u8 save_iopl;
1280 + ulong save_rflags;
1281 struct kvm_save_segment {
1282 u16 selector;
1283 unsigned long base;
1284 @@ -787,18 +789,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1285
1286 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1287 {
1288 - unsigned long rflags;
1289 + unsigned long rflags, save_rflags;
1290
1291 rflags = vmcs_readl(GUEST_RFLAGS);
1292 - if (to_vmx(vcpu)->rmode.vm86_active)
1293 - rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1294 + if (to_vmx(vcpu)->rmode.vm86_active) {
1295 + rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1296 + save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1297 + rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1298 + }
1299 return rflags;
1300 }
1301
1302 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1303 {
1304 - if (to_vmx(vcpu)->rmode.vm86_active)
1305 + if (to_vmx(vcpu)->rmode.vm86_active) {
1306 + to_vmx(vcpu)->rmode.save_rflags = rflags;
1307 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1308 + }
1309 vmcs_writel(GUEST_RFLAGS, rflags);
1310 }
1311
1312 @@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1313 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1314
1315 flags = vmcs_readl(GUEST_RFLAGS);
1316 - flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1317 - flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1318 + flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1319 + flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1320 vmcs_writel(GUEST_RFLAGS, flags);
1321
1322 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1323 @@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1324 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1325
1326 flags = vmcs_readl(GUEST_RFLAGS);
1327 - vmx->rmode.save_iopl
1328 - = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1329 + vmx->rmode.save_rflags = flags;
1330
1331 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1332
1333 @@ -2719,6 +2725,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1334 kvm_queue_exception(vcpu, vec);
1335 return 1;
1336 case BP_VECTOR:
1337 + /*
1338 + * Update instruction length as we may reinject the exception
1339 + * from user space while in guest debugging mode.
1340 + */
1341 + to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
1342 + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1343 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1344 return 0;
1345 /* fall through */
1346 @@ -2841,6 +2853,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
1347 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
1348 /* fall through */
1349 case BP_VECTOR:
1350 + /*
1351 + * Update instruction length as we may reinject #BP from
1352 + * user space while in guest debugging mode. Reading it for
1353 + * #DB as well causes no harm, it is not used in that case.
1354 + */
1355 + vmx->vcpu.arch.event_exit_inst_len =
1356 + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1357 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1358 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
1359 kvm_run->debug.arch.exception = ex_no;
1360 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1361 index e900908..dd78927 100644
1362 --- a/arch/x86/kvm/x86.c
1363 +++ b/arch/x86/kvm/x86.c
1364 @@ -384,21 +384,16 @@ out:
1365 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1366 {
1367 if (cr0 & CR0_RESERVED_BITS) {
1368 - printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
1369 - cr0, vcpu->arch.cr0);
1370 kvm_inject_gp(vcpu, 0);
1371 return;
1372 }
1373
1374 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
1375 - printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
1376 kvm_inject_gp(vcpu, 0);
1377 return;
1378 }
1379
1380 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
1381 - printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
1382 - "and a clear PE flag\n");
1383 kvm_inject_gp(vcpu, 0);
1384 return;
1385 }
1386 @@ -409,15 +404,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1387 int cs_db, cs_l;
1388
1389 if (!is_pae(vcpu)) {
1390 - printk(KERN_DEBUG "set_cr0: #GP, start paging "
1391 - "in long mode while PAE is disabled\n");
1392 kvm_inject_gp(vcpu, 0);
1393 return;
1394 }
1395 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1396 if (cs_l) {
1397 - printk(KERN_DEBUG "set_cr0: #GP, start paging "
1398 - "in long mode while CS.L == 1\n");
1399 kvm_inject_gp(vcpu, 0);
1400 return;
1401
1402 @@ -425,8 +416,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1403 } else
1404 #endif
1405 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1406 - printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
1407 - "reserved bits\n");
1408 kvm_inject_gp(vcpu, 0);
1409 return;
1410 }
1411 @@ -453,28 +442,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1412 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
1413
1414 if (cr4 & CR4_RESERVED_BITS) {
1415 - printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
1416 kvm_inject_gp(vcpu, 0);
1417 return;
1418 }
1419
1420 if (is_long_mode(vcpu)) {
1421 if (!(cr4 & X86_CR4_PAE)) {
1422 - printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
1423 - "in long mode\n");
1424 kvm_inject_gp(vcpu, 0);
1425 return;
1426 }
1427 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1428 && ((cr4 ^ old_cr4) & pdptr_bits)
1429 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1430 - printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
1431 kvm_inject_gp(vcpu, 0);
1432 return;
1433 }
1434
1435 if (cr4 & X86_CR4_VMXE) {
1436 - printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
1437 kvm_inject_gp(vcpu, 0);
1438 return;
1439 }
1440 @@ -495,21 +479,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1441
1442 if (is_long_mode(vcpu)) {
1443 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
1444 - printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
1445 kvm_inject_gp(vcpu, 0);
1446 return;
1447 }
1448 } else {
1449 if (is_pae(vcpu)) {
1450 if (cr3 & CR3_PAE_RESERVED_BITS) {
1451 - printk(KERN_DEBUG
1452 - "set_cr3: #GP, reserved bits\n");
1453 kvm_inject_gp(vcpu, 0);
1454 return;
1455 }
1456 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
1457 - printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
1458 - "reserved bits\n");
1459 kvm_inject_gp(vcpu, 0);
1460 return;
1461 }
1462 @@ -541,7 +520,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
1463 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1464 {
1465 if (cr8 & CR8_RESERVED_BITS) {
1466 - printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
1467 kvm_inject_gp(vcpu, 0);
1468 return;
1469 }
1470 @@ -595,15 +573,12 @@ static u32 emulated_msrs[] = {
1471 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1472 {
1473 if (efer & efer_reserved_bits) {
1474 - printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1475 - efer);
1476 kvm_inject_gp(vcpu, 0);
1477 return;
1478 }
1479
1480 if (is_paging(vcpu)
1481 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1482 - printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1483 kvm_inject_gp(vcpu, 0);
1484 return;
1485 }
1486 @@ -613,7 +588,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1487
1488 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1489 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1490 - printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
1491 kvm_inject_gp(vcpu, 0);
1492 return;
1493 }
1494 @@ -624,7 +598,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1495
1496 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1497 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1498 - printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
1499 kvm_inject_gp(vcpu, 0);
1500 return;
1501 }
1502 @@ -913,9 +886,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1503 if (msr >= MSR_IA32_MC0_CTL &&
1504 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1505 u32 offset = msr - MSR_IA32_MC0_CTL;
1506 - /* only 0 or all 1s can be written to IA32_MCi_CTL */
1507 + /* only 0 or all 1s can be written to IA32_MCi_CTL
1508 + * some Linux kernels though clear bit 10 in bank 4 to
1509 + * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1510 + * this to avoid an uncatched #GP in the guest
1511 + */
1512 if ((offset & 0x3) == 0 &&
1513 - data != 0 && data != ~(u64)0)
1514 + data != 0 && (data | (1 << 10)) != ~(u64)0)
1515 return -1;
1516 vcpu->arch.mce_banks[offset] = data;
1517 break;
1518 @@ -2366,7 +2343,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1519 struct kvm_dirty_log *log)
1520 {
1521 int r;
1522 - int n;
1523 + unsigned long n;
1524 struct kvm_memory_slot *memslot;
1525 int is_dirty = 0;
1526
1527 @@ -2382,7 +2359,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1528 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1529 spin_unlock(&kvm->mmu_lock);
1530 memslot = &kvm->memslots[log->slot];
1531 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1532 + n = kvm_dirty_bitmap_bytes(memslot);
1533 memset(memslot->dirty_bitmap, 0, n);
1534 }
1535 r = 0;
1536 @@ -4599,6 +4576,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1537 int ret = 0;
1538 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
1539 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
1540 + u32 desc_limit;
1541
1542 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
1543
1544 @@ -4621,7 +4599,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
1545 }
1546 }
1547
1548 - if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
1549 + desc_limit = get_desc_limit(&nseg_desc);
1550 + if (!nseg_desc.p ||
1551 + ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
1552 + desc_limit < 0x2b)) {
1553 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
1554 return 1;
1555 }
1556 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
1557 index cffd754..ddef409 100644
1558 --- a/arch/x86/lib/Makefile
1559 +++ b/arch/x86/lib/Makefile
1560 @@ -14,7 +14,7 @@ $(obj)/inat.o: $(obj)/inat-tables.c
1561
1562 clean-files := inat-tables.c
1563
1564 -obj-$(CONFIG_SMP) += msr-smp.o
1565 +obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
1566
1567 lib-y := delay.o
1568 lib-y += thunk_$(BITS).o
1569 @@ -39,4 +39,5 @@ else
1570 lib-y += thunk_64.o clear_page_64.o copy_page_64.o
1571 lib-y += memmove_64.o memset_64.o
1572 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
1573 + lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
1574 endif
1575 diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
1576 new file mode 100644
1577 index 0000000..a3c6688
1578 --- /dev/null
1579 +++ b/arch/x86/lib/cache-smp.c
1580 @@ -0,0 +1,19 @@
1581 +#include <linux/smp.h>
1582 +#include <linux/module.h>
1583 +
1584 +static void __wbinvd(void *dummy)
1585 +{
1586 + wbinvd();
1587 +}
1588 +
1589 +void wbinvd_on_cpu(int cpu)
1590 +{
1591 + smp_call_function_single(cpu, __wbinvd, NULL, 1);
1592 +}
1593 +EXPORT_SYMBOL(wbinvd_on_cpu);
1594 +
1595 +int wbinvd_on_all_cpus(void)
1596 +{
1597 + return on_each_cpu(__wbinvd, NULL, 1);
1598 +}
1599 +EXPORT_SYMBOL(wbinvd_on_all_cpus);
1600 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
1601 new file mode 100644
1602 index 0000000..15acecf
1603 --- /dev/null
1604 +++ b/arch/x86/lib/rwsem_64.S
1605 @@ -0,0 +1,81 @@
1606 +/*
1607 + * x86-64 rwsem wrappers
1608 + *
1609 + * This interfaces the inline asm code to the slow-path
1610 + * C routines. We need to save the call-clobbered regs
1611 + * that the asm does not mark as clobbered, and move the
1612 + * argument from %rax to %rdi.
1613 + *
1614 + * NOTE! We don't need to save %rax, because the functions
1615 + * will always return the semaphore pointer in %rax (which
1616 + * is also the input argument to these helpers)
1617 + *
1618 + * The following can clobber %rdx because the asm clobbers it:
1619 + * call_rwsem_down_write_failed
1620 + * call_rwsem_wake
1621 + * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
1622 + */
1623 +
1624 +#include <linux/linkage.h>
1625 +#include <asm/rwlock.h>
1626 +#include <asm/alternative-asm.h>
1627 +#include <asm/frame.h>
1628 +#include <asm/dwarf2.h>
1629 +
1630 +#define save_common_regs \
1631 + pushq %rdi; \
1632 + pushq %rsi; \
1633 + pushq %rcx; \
1634 + pushq %r8; \
1635 + pushq %r9; \
1636 + pushq %r10; \
1637 + pushq %r11
1638 +
1639 +#define restore_common_regs \
1640 + popq %r11; \
1641 + popq %r10; \
1642 + popq %r9; \
1643 + popq %r8; \
1644 + popq %rcx; \
1645 + popq %rsi; \
1646 + popq %rdi
1647 +
1648 +/* Fix up special calling conventions */
1649 +ENTRY(call_rwsem_down_read_failed)
1650 + save_common_regs
1651 + pushq %rdx
1652 + movq %rax,%rdi
1653 + call rwsem_down_read_failed
1654 + popq %rdx
1655 + restore_common_regs
1656 + ret
1657 + ENDPROC(call_rwsem_down_read_failed)
1658 +
1659 +ENTRY(call_rwsem_down_write_failed)
1660 + save_common_regs
1661 + movq %rax,%rdi
1662 + call rwsem_down_write_failed
1663 + restore_common_regs
1664 + ret
1665 + ENDPROC(call_rwsem_down_write_failed)
1666 +
1667 +ENTRY(call_rwsem_wake)
1668 + decw %dx /* do nothing if still outstanding active readers */
1669 + jnz 1f
1670 + save_common_regs
1671 + movq %rax,%rdi
1672 + call rwsem_wake
1673 + restore_common_regs
1674 +1: ret
1675 + ENDPROC(call_rwsem_wake)
1676 +
1677 +/* Fix up special calling conventions */
1678 +ENTRY(call_rwsem_downgrade_wake)
1679 + save_common_regs
1680 + pushq %rdx
1681 + movq %rax,%rdi
1682 + call rwsem_downgrade_wake
1683 + popq %rdx
1684 + restore_common_regs
1685 + ret
1686 + ENDPROC(call_rwsem_downgrade_wake)
1687 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
1688 index 0696d50..b02f6d8 100644
1689 --- a/arch/x86/pci/irq.c
1690 +++ b/arch/x86/pci/irq.c
1691 @@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
1692 case PCI_DEVICE_ID_INTEL_ICH10_1:
1693 case PCI_DEVICE_ID_INTEL_ICH10_2:
1694 case PCI_DEVICE_ID_INTEL_ICH10_3:
1695 + case PCI_DEVICE_ID_INTEL_CPT_LPC1:
1696 + case PCI_DEVICE_ID_INTEL_CPT_LPC2:
1697 r->name = "PIIX/ICH";
1698 r->get = pirq_piix_get;
1699 r->set = pirq_piix_set;
1700 diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
1701 index b641388..ad47dae 100644
1702 --- a/arch/x86/power/hibernate_asm_32.S
1703 +++ b/arch/x86/power/hibernate_asm_32.S
1704 @@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
1705 ret
1706
1707 ENTRY(restore_image)
1708 + movl mmu_cr4_features, %ecx
1709 movl resume_pg_dir, %eax
1710 subl $__PAGE_OFFSET, %eax
1711 movl %eax, %cr3
1712
1713 + jecxz 1f # cr4 Pentium and higher, skip if zero
1714 + andl $~(X86_CR4_PGE), %ecx
1715 + movl %ecx, %cr4; # turn off PGE
1716 + movl %cr3, %eax; # flush TLB
1717 + movl %eax, %cr3
1718 +1:
1719 movl restore_pblist, %edx
1720 .p2align 4,,7
1721
1722 @@ -54,16 +61,8 @@ done:
1723 movl $swapper_pg_dir, %eax
1724 subl $__PAGE_OFFSET, %eax
1725 movl %eax, %cr3
1726 - /* Flush TLB, including "global" things (vmalloc) */
1727 movl mmu_cr4_features, %ecx
1728 jecxz 1f # cr4 Pentium and higher, skip if zero
1729 - movl %ecx, %edx
1730 - andl $~(X86_CR4_PGE), %edx
1731 - movl %edx, %cr4; # turn off PGE
1732 -1:
1733 - movl %cr3, %eax; # flush TLB
1734 - movl %eax, %cr3
1735 - jecxz 1f # cr4 Pentium and higher, skip if zero
1736 movl %ecx, %cr4; # turn PGE back on
1737 1:
1738
1739 diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
1740 index 52fec07..83b6252 100644
1741 --- a/drivers/acpi/acpica/exprep.c
1742 +++ b/drivers/acpi/acpica/exprep.c
1743 @@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
1744
1745 acpi_ut_add_reference(obj_desc->field.region_obj);
1746
1747 + /* allow full data read from EC address space */
1748 + if (obj_desc->field.region_obj->region.space_id ==
1749 + ACPI_ADR_SPACE_EC) {
1750 + if (obj_desc->common_field.bit_length > 8) {
1751 + unsigned width =
1752 + ACPI_ROUND_BITS_UP_TO_BYTES(
1753 + obj_desc->common_field.bit_length);
1754 + // access_bit_width is u8, don't overflow it
1755 + if (width > 8)
1756 + width = 8;
1757 + obj_desc->common_field.access_byte_width =
1758 + width;
1759 + obj_desc->common_field.access_bit_width =
1760 + 8 * width;
1761 + }
1762 + }
1763 +
1764 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
1765 "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
1766 obj_desc->field.start_field_bit_offset,
1767 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1768 index d6471bb..fc67d11 100644
1769 --- a/drivers/acpi/ec.c
1770 +++ b/drivers/acpi/ec.c
1771 @@ -589,12 +589,12 @@ static u32 acpi_ec_gpe_handler(void *data)
1772
1773 static acpi_status
1774 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1775 - u32 bits, acpi_integer *value,
1776 + u32 bits, acpi_integer *value64,
1777 void *handler_context, void *region_context)
1778 {
1779 struct acpi_ec *ec = handler_context;
1780 - int result = 0, i;
1781 - u8 temp = 0;
1782 + int result = 0, i, bytes = bits / 8;
1783 + u8 *value = (u8 *)value64;
1784
1785 if ((address > 0xFF) || !value || !handler_context)
1786 return AE_BAD_PARAMETER;
1787 @@ -602,32 +602,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
1788 if (function != ACPI_READ && function != ACPI_WRITE)
1789 return AE_BAD_PARAMETER;
1790
1791 - if (bits != 8 && acpi_strict)
1792 - return AE_BAD_PARAMETER;
1793 -
1794 - if (EC_FLAGS_MSI)
1795 + if (EC_FLAGS_MSI || bits > 8)
1796 acpi_ec_burst_enable(ec);
1797
1798 - if (function == ACPI_READ) {
1799 - result = acpi_ec_read(ec, address, &temp);
1800 - *value = temp;
1801 - } else {
1802 - temp = 0xff & (*value);
1803 - result = acpi_ec_write(ec, address, temp);
1804 - }
1805 -
1806 - for (i = 8; unlikely(bits - i > 0); i += 8) {
1807 - ++address;
1808 - if (function == ACPI_READ) {
1809 - result = acpi_ec_read(ec, address, &temp);
1810 - (*value) |= ((acpi_integer)temp) << i;
1811 - } else {
1812 - temp = 0xff & ((*value) >> i);
1813 - result = acpi_ec_write(ec, address, temp);
1814 - }
1815 - }
1816 + for (i = 0; i < bytes; ++i, ++address, ++value)
1817 + result = (function == ACPI_READ) ?
1818 + acpi_ec_read(ec, address, value) :
1819 + acpi_ec_write(ec, address, *value);
1820
1821 - if (EC_FLAGS_MSI)
1822 + if (EC_FLAGS_MSI || bits > 8)
1823 acpi_ec_burst_disable(ec);
1824
1825 switch (result) {
1826 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1827 index 9e2feb6..462200d 100644
1828 --- a/drivers/ata/ahci.c
1829 +++ b/drivers/ata/ahci.c
1830 @@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1831 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
1832 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
1833 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
1834 + { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
1835 + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
1836 + { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
1837 + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
1838 + { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
1839 + { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
1840
1841 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
1842 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
1843 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
1844 index 6f3f225..b5f614b 100644
1845 --- a/drivers/ata/ata_piix.c
1846 +++ b/drivers/ata/ata_piix.c
1847 @@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
1848 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1849 /* SATA Controller IDE (PCH) */
1850 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1851 + /* SATA Controller IDE (CPT) */
1852 + { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1853 + /* SATA Controller IDE (CPT) */
1854 + { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
1855 + /* SATA Controller IDE (CPT) */
1856 + { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1857 + /* SATA Controller IDE (CPT) */
1858 + { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
1859 { } /* terminate list */
1860 };
1861
1862 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1863 index 6728328..2401c9c 100644
1864 --- a/drivers/ata/libata-core.c
1865 +++ b/drivers/ata/libata-core.c
1866 @@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1867 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
1868 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
1869
1870 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
1871 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
1872 +
1873 /* devices which puke on READ_NATIVE_MAX */
1874 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
1875 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
1876 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
1877 index be7c395..ad64750 100644
1878 --- a/drivers/ata/pata_via.c
1879 +++ b/drivers/ata/pata_via.c
1880 @@ -697,6 +697,7 @@ static const struct pci_device_id via[] = {
1881 { PCI_VDEVICE(VIA, 0x3164), },
1882 { PCI_VDEVICE(VIA, 0x5324), },
1883 { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
1884 + { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
1885
1886 { },
1887 };
1888 diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
1889 index 3999a5f..8a713f1 100644
1890 --- a/drivers/char/agp/intel-agp.c
1891 +++ b/drivers/char/agp/intel-agp.c
1892 @@ -8,6 +8,7 @@
1893 #include <linux/kernel.h>
1894 #include <linux/pagemap.h>
1895 #include <linux/agp_backend.h>
1896 +#include <asm/smp.h>
1897 #include "agp.h"
1898
1899 /*
1900 @@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
1901 intel_i830_fini_flush();
1902 }
1903
1904 -static void
1905 -do_wbinvd(void *null)
1906 -{
1907 - wbinvd();
1908 -}
1909 -
1910 /* The chipset_flush interface needs to get data that has already been
1911 * flushed out of the CPU all the way out to main memory, because the GPU
1912 * doesn't snoop those buffers.
1913 @@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
1914
1915 memset(pg, 0, 1024);
1916
1917 - if (cpu_has_clflush) {
1918 + if (cpu_has_clflush)
1919 clflush_cache_range(pg, 1024);
1920 - } else {
1921 - if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
1922 - printk(KERN_ERR "Timed out waiting for cache flush.\n");
1923 - }
1924 + else if (wbinvd_on_all_cpus() != 0)
1925 + printk(KERN_ERR "Timed out waiting for cache flush.\n");
1926 }
1927
1928 /* The intel i830 automatically initializes the agp aperture during POST.
1929 diff --git a/drivers/char/raw.c b/drivers/char/raw.c
1930 index 64acd05..9abc3a1 100644
1931 --- a/drivers/char/raw.c
1932 +++ b/drivers/char/raw.c
1933 @@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
1934 .aio_read = generic_file_aio_read,
1935 .write = do_sync_write,
1936 .aio_write = blkdev_aio_write,
1937 + .fsync = block_fsync,
1938 .open = raw_open,
1939 .release= raw_release,
1940 .ioctl = raw_ioctl,
1941 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
1942 index dcb9083..76253cf 100644
1943 --- a/drivers/char/tty_io.c
1944 +++ b/drivers/char/tty_io.c
1945 @@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
1946 list_del_init(&tty->tty_files);
1947 file_list_unlock();
1948
1949 + put_pid(tty->pgrp);
1950 + put_pid(tty->session);
1951 free_tty_struct(tty);
1952 }
1953
1954 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1955 index 7d0f00a..99907c3 100644
1956 --- a/drivers/gpu/drm/drm_crtc_helper.c
1957 +++ b/drivers/gpu/drm/drm_crtc_helper.c
1958 @@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
1959 if (connector->status == connector_status_disconnected) {
1960 DRM_DEBUG_KMS("%s is disconnected\n",
1961 drm_get_connector_name(connector));
1962 + drm_mode_connector_update_edid_property(connector, NULL);
1963 goto prune;
1964 }
1965
1966 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1967 index ab6c973..bfd0e4a 100644
1968 --- a/drivers/gpu/drm/drm_edid.c
1969 +++ b/drivers/gpu/drm/drm_edid.c
1970 @@ -85,6 +85,8 @@ static struct edid_quirk {
1971
1972 /* Envision Peripherals, Inc. EN-7100e */
1973 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
1974 + /* Envision EN2028 */
1975 + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
1976
1977 /* Funai Electronics PM36B */
1978 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
1979 @@ -707,15 +709,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
1980 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
1981 mode->vtotal = mode->vdisplay + vblank;
1982
1983 - /* perform the basic check for the detailed timing */
1984 - if (mode->hsync_end > mode->htotal ||
1985 - mode->vsync_end > mode->vtotal) {
1986 - drm_mode_destroy(dev, mode);
1987 - DRM_DEBUG_KMS("Incorrect detailed timing. "
1988 - "Sync is beyond the blank.\n");
1989 - return NULL;
1990 - }
1991 -
1992 /* Some EDIDs have bogus h/vtotal values */
1993 if (mode->hsync_end > mode->htotal)
1994 mode->htotal = mode->hsync_end + 1;
1995 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
1996 index 08d14df..4804872 100644
1997 --- a/drivers/gpu/drm/drm_fops.c
1998 +++ b/drivers/gpu/drm/drm_fops.c
1999 @@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
2000 spin_unlock(&dev->count_lock);
2001 }
2002 out:
2003 - mutex_lock(&dev->struct_mutex);
2004 - if (minor->type == DRM_MINOR_LEGACY) {
2005 - BUG_ON((dev->dev_mapping != NULL) &&
2006 - (dev->dev_mapping != inode->i_mapping));
2007 - if (dev->dev_mapping == NULL)
2008 - dev->dev_mapping = inode->i_mapping;
2009 + if (!retcode) {
2010 + mutex_lock(&dev->struct_mutex);
2011 + if (minor->type == DRM_MINOR_LEGACY) {
2012 + if (dev->dev_mapping == NULL)
2013 + dev->dev_mapping = inode->i_mapping;
2014 + else if (dev->dev_mapping != inode->i_mapping)
2015 + retcode = -ENODEV;
2016 + }
2017 + mutex_unlock(&dev->struct_mutex);
2018 }
2019 - mutex_unlock(&dev->struct_mutex);
2020
2021 return retcode;
2022 }
2023 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
2024 index 93031a7..1238bc9 100644
2025 --- a/drivers/gpu/drm/i915/intel_lvds.c
2026 +++ b/drivers/gpu/drm/i915/intel_lvds.c
2027 @@ -899,6 +899,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
2028 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
2029 },
2030 },
2031 + {
2032 + .callback = intel_no_lvds_dmi_callback,
2033 + .ident = "Clientron U800",
2034 + .matches = {
2035 + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
2036 + DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
2037 + },
2038 + },
2039
2040 { } /* terminating entry */
2041 };
2042 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
2043 index d75788f..b1f929d 100644
2044 --- a/drivers/gpu/drm/radeon/atom.c
2045 +++ b/drivers/gpu/drm/radeon/atom.c
2046 @@ -881,11 +881,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
2047 uint8_t attr = U8((*ptr)++), shift;
2048 uint32_t saved, dst;
2049 int dptr = *ptr;
2050 + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2051 SDEBUG(" dst: ");
2052 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2053 + /* op needs to full dst value */
2054 + dst = saved;
2055 shift = atom_get_src(ctx, attr, ptr);
2056 SDEBUG(" shift: %d\n", shift);
2057 dst <<= shift;
2058 + dst &= atom_arg_mask[dst_align];
2059 + dst >>= atom_arg_shift[dst_align];
2060 SDEBUG(" dst: ");
2061 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2062 }
2063 @@ -895,11 +900,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
2064 uint8_t attr = U8((*ptr)++), shift;
2065 uint32_t saved, dst;
2066 int dptr = *ptr;
2067 + uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
2068 SDEBUG(" dst: ");
2069 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
2070 + /* op needs to full dst value */
2071 + dst = saved;
2072 shift = atom_get_src(ctx, attr, ptr);
2073 SDEBUG(" shift: %d\n", shift);
2074 dst >>= shift;
2075 + dst &= atom_arg_mask[dst_align];
2076 + dst >>= atom_arg_shift[dst_align];
2077 SDEBUG(" dst: ");
2078 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
2079 }
2080 diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
2081 index 43b55a0..5bdfaf2 100644
2082 --- a/drivers/gpu/drm/radeon/r300.c
2083 +++ b/drivers/gpu/drm/radeon/r300.c
2084 @@ -364,11 +364,12 @@ void r300_gpu_init(struct radeon_device *rdev)
2085
2086 r100_hdp_reset(rdev);
2087 /* FIXME: rv380 one pipes ? */
2088 - if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
2089 + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
2090 + (rdev->family == CHIP_R350)) {
2091 /* r300,r350 */
2092 rdev->num_gb_pipes = 2;
2093 } else {
2094 - /* rv350,rv370,rv380 */
2095 + /* rv350,rv370,rv380,r300 AD */
2096 rdev->num_gb_pipes = 1;
2097 }
2098 rdev->num_z_pipes = 1;
2099 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2100 index e7b1944..81b832e 100644
2101 --- a/drivers/gpu/drm/radeon/radeon_combios.c
2102 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
2103 @@ -670,7 +670,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
2104 dac = RBIOS8(dac_info + 0x3) & 0xf;
2105 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
2106 }
2107 - found = 1;
2108 + /* if the values are all zeros, use the table */
2109 + if (p_dac->ps2_pdac_adj)
2110 + found = 1;
2111 }
2112
2113 out:
2114 @@ -812,7 +814,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2115 bg = RBIOS8(dac_info + 0x10) & 0xf;
2116 dac = RBIOS8(dac_info + 0x11) & 0xf;
2117 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2118 - found = 1;
2119 + /* if the values are all zeros, use the table */
2120 + if (tv_dac->ps2_tvdac_adj)
2121 + found = 1;
2122 } else if (rev > 1) {
2123 bg = RBIOS8(dac_info + 0xc) & 0xf;
2124 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
2125 @@ -825,7 +829,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2126 bg = RBIOS8(dac_info + 0xe) & 0xf;
2127 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
2128 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
2129 - found = 1;
2130 + /* if the values are all zeros, use the table */
2131 + if (tv_dac->ps2_tvdac_adj)
2132 + found = 1;
2133 }
2134 tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
2135 }
2136 @@ -842,7 +848,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2137 (bg << 16) | (dac << 20);
2138 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2139 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2140 - found = 1;
2141 + /* if the values are all zeros, use the table */
2142 + if (tv_dac->ps2_tvdac_adj)
2143 + found = 1;
2144 } else {
2145 bg = RBIOS8(dac_info + 0x4) & 0xf;
2146 dac = RBIOS8(dac_info + 0x5) & 0xf;
2147 @@ -850,7 +858,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
2148 (bg << 16) | (dac << 20);
2149 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
2150 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
2151 - found = 1;
2152 + /* if the values are all zeros, use the table */
2153 + if (tv_dac->ps2_tvdac_adj)
2154 + found = 1;
2155 }
2156 } else {
2157 DRM_INFO("No TV DAC info found in BIOS\n");
2158 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2159 index 65f8194..2bdfbcd 100644
2160 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2161 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2162 @@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2163 {
2164 struct drm_device *dev = connector->dev;
2165 struct drm_connector *conflict;
2166 + struct radeon_connector *radeon_conflict;
2167 int i;
2168
2169 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
2170 if (conflict == connector)
2171 continue;
2172
2173 + radeon_conflict = to_radeon_connector(conflict);
2174 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
2175 if (conflict->encoder_ids[i] == 0)
2176 break;
2177 @@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
2178 if (conflict->status != connector_status_connected)
2179 continue;
2180
2181 + if (radeon_conflict->use_digital)
2182 + continue;
2183 +
2184 if (priority == true) {
2185 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
2186 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
2187 @@ -315,7 +320,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
2188 radeon_encoder = to_radeon_encoder(encoder);
2189 if (!radeon_encoder->enc_priv)
2190 return 0;
2191 - if (rdev->is_atom_bios) {
2192 + if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
2193 struct radeon_encoder_atom_dac *dac_int;
2194 dac_int = radeon_encoder->enc_priv;
2195 dac_int->tv_std = val;
2196 diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
2197 index 06123ba..f129bbb 100644
2198 --- a/drivers/gpu/drm/radeon/radeon_cp.c
2199 +++ b/drivers/gpu/drm/radeon/radeon_cp.c
2200 @@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
2201 return -EBUSY;
2202 }
2203
2204 -static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2205 +static void radeon_init_pipes(struct drm_device *dev)
2206 {
2207 + drm_radeon_private_t *dev_priv = dev->dev_private;
2208 uint32_t gb_tile_config, gb_pipe_sel = 0;
2209
2210 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
2211 @@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
2212 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
2213 } else {
2214 /* R3xx */
2215 - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
2216 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
2217 + dev->pdev->device != 0x4144) ||
2218 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
2219 dev_priv->num_gb_pipes = 2;
2220 } else {
2221 - /* R3Vxx */
2222 + /* RV3xx/R300 AD */
2223 dev_priv->num_gb_pipes = 1;
2224 }
2225 }
2226 @@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
2227
2228 /* setup the raster pipes */
2229 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
2230 - radeon_init_pipes(dev_priv);
2231 + radeon_init_pipes(dev);
2232
2233 /* Reset the CP ring */
2234 radeon_do_cp_reset(dev_priv);
2235 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
2236 index e9d0850..9933c2c 100644
2237 --- a/drivers/gpu/drm/radeon/radeon_cs.c
2238 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
2239 @@ -193,11 +193,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2240 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
2241 }
2242 radeon_bo_list_unreserve(&parser->validated);
2243 - for (i = 0; i < parser->nrelocs; i++) {
2244 - if (parser->relocs[i].gobj) {
2245 - mutex_lock(&parser->rdev->ddev->struct_mutex);
2246 - drm_gem_object_unreference(parser->relocs[i].gobj);
2247 - mutex_unlock(&parser->rdev->ddev->struct_mutex);
2248 + if (parser->relocs != NULL) {
2249 + for (i = 0; i < parser->nrelocs; i++) {
2250 + if (parser->relocs[i].gobj) {
2251 + mutex_lock(&parser->rdev->ddev->struct_mutex);
2252 + drm_gem_object_unreference(parser->relocs[i].gobj);
2253 + mutex_unlock(&parser->rdev->ddev->struct_mutex);
2254 + }
2255 }
2256 }
2257 kfree(parser->track);
2258 @@ -246,7 +248,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2259 }
2260 r = radeon_cs_parser_relocs(&parser);
2261 if (r) {
2262 - DRM_ERROR("Failed to parse relocation !\n");
2263 + if (r != -ERESTARTSYS)
2264 + DRM_ERROR("Failed to parse relocation %d!\n", r);
2265 radeon_cs_parser_fini(&parser, r);
2266 mutex_unlock(&rdev->cs_mutex);
2267 return r;
2268 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2269 index 768b150..509ba3f 100644
2270 --- a/drivers/gpu/drm/radeon/radeon_device.c
2271 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2272 @@ -655,6 +655,14 @@ int radeon_device_init(struct radeon_device *rdev,
2273 return r;
2274 radeon_check_arguments(rdev);
2275
2276 + /* all of the newer IGP chips have an internal gart
2277 + * However some rs4xx report as AGP, so remove that here.
2278 + */
2279 + if ((rdev->family >= CHIP_RS400) &&
2280 + (rdev->flags & RADEON_IS_IGP)) {
2281 + rdev->flags &= ~RADEON_IS_AGP;
2282 + }
2283 +
2284 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
2285 radeon_agp_disable(rdev);
2286 }
2287 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
2288 index 3c91724..7626bd5 100644
2289 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
2290 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
2291 @@ -1276,8 +1276,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2292 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
2293 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
2294 atombios_dac_setup(encoder, ATOM_ENABLE);
2295 - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2296 - atombios_tv_setup(encoder, ATOM_ENABLE);
2297 + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
2298 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
2299 + atombios_tv_setup(encoder, ATOM_ENABLE);
2300 + else
2301 + atombios_tv_setup(encoder, ATOM_DISABLE);
2302 + }
2303 break;
2304 }
2305 atombios_apply_encoder_quirks(encoder, adjusted_mode);
2306 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2307 index 417684d..f2ed27c 100644
2308 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2309 +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
2310 @@ -57,6 +57,10 @@
2311 #define NTSC_TV_PLL_N_14 693
2312 #define NTSC_TV_PLL_P_14 7
2313
2314 +#define PAL_TV_PLL_M_14 19
2315 +#define PAL_TV_PLL_N_14 353
2316 +#define PAL_TV_PLL_P_14 5
2317 +
2318 #define VERT_LEAD_IN_LINES 2
2319 #define FRAC_BITS 0xe
2320 #define FRAC_MASK 0x3fff
2321 @@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
2322 630627, /* defRestart */
2323 347, /* crtcPLL_N */
2324 14, /* crtcPLL_M */
2325 - 8, /* crtcPLL_postDiv */
2326 + 8, /* crtcPLL_postDiv */
2327 1022, /* pixToTV */
2328 },
2329 + { /* PAL timing for 14 Mhz ref clk */
2330 + 800, /* horResolution */
2331 + 600, /* verResolution */
2332 + TV_STD_PAL, /* standard */
2333 + 1131, /* horTotal */
2334 + 742, /* verTotal */
2335 + 813, /* horStart */
2336 + 840, /* horSyncStart */
2337 + 633, /* verSyncStart */
2338 + 708369, /* defRestart */
2339 + 211, /* crtcPLL_N */
2340 + 9, /* crtcPLL_M */
2341 + 8, /* crtcPLL_postDiv */
2342 + 759, /* pixToTV */
2343 + },
2344 };
2345
2346 #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
2347 @@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
2348 if (pll->reference_freq == 2700)
2349 const_ptr = &available_tv_modes[1];
2350 else
2351 - const_ptr = &available_tv_modes[1]; /* FIX ME */
2352 + const_ptr = &available_tv_modes[3];
2353 }
2354 return const_ptr;
2355 }
2356 @@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
2357 n = PAL_TV_PLL_N_27;
2358 p = PAL_TV_PLL_P_27;
2359 } else {
2360 - m = PAL_TV_PLL_M_27;
2361 - n = PAL_TV_PLL_N_27;
2362 - p = PAL_TV_PLL_P_27;
2363 + m = PAL_TV_PLL_M_14;
2364 + n = PAL_TV_PLL_N_14;
2365 + p = PAL_TV_PLL_P_14;
2366 }
2367 }
2368
2369 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
2370 index c381856..a27c09f 100644
2371 --- a/drivers/gpu/drm/radeon/rs600.c
2372 +++ b/drivers/gpu/drm/radeon/rs600.c
2373 @@ -175,7 +175,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
2374 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2375
2376 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2377 - tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
2378 + tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
2379 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
2380
2381 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
2382 diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2383 index cab13e8..62416e6 100644
2384 --- a/drivers/hid/hid-gyration.c
2385 +++ b/drivers/hid/hid-gyration.c
2386 @@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2387 static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2388 struct hid_usage *usage, __s32 value)
2389 {
2390 - struct input_dev *input = field->hidinput->input;
2391 +
2392 + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
2393 + return 0;
2394
2395 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2396 (usage->hid & 0xff) == 0x82) {
2397 + struct input_dev *input = field->hidinput->input;
2398 input_event(input, usage->type, usage->code, 1);
2399 input_sync(input);
2400 input_event(input, usage->type, usage->code, 0);
2401 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
2402 index 864a371..fbc997e 100644
2403 --- a/drivers/hwmon/sht15.c
2404 +++ b/drivers/hwmon/sht15.c
2405 @@ -302,13 +302,13 @@ error_ret:
2406 **/
2407 static inline int sht15_calc_temp(struct sht15_data *data)
2408 {
2409 - int d1 = 0;
2410 + int d1 = temppoints[0].d1;
2411 int i;
2412
2413 - for (i = 1; i < ARRAY_SIZE(temppoints); i++)
2414 + for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
2415 /* Find pointer to interpolate */
2416 if (data->supply_uV > temppoints[i - 1].vdd) {
2417 - d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
2418 + d1 = (data->supply_uV - temppoints[i - 1].vdd)
2419 * (temppoints[i].d1 - temppoints[i - 1].d1)
2420 / (temppoints[i].vdd - temppoints[i - 1].vdd)
2421 + temppoints[i - 1].d1;
2422 @@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
2423 /* If a regulator is available, query what the supply voltage actually is!*/
2424 data->reg = regulator_get(data->dev, "vcc");
2425 if (!IS_ERR(data->reg)) {
2426 - data->supply_uV = regulator_get_voltage(data->reg);
2427 + int voltage;
2428 +
2429 + voltage = regulator_get_voltage(data->reg);
2430 + if (voltage)
2431 + data->supply_uV = voltage;
2432 +
2433 regulator_enable(data->reg);
2434 /* setup a notifier block to update this if another device
2435 * causes the voltage to change */
2436 diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
2437 index 5f318ce..cb9f95c 100644
2438 --- a/drivers/i2c/busses/Kconfig
2439 +++ b/drivers/i2c/busses/Kconfig
2440 @@ -77,7 +77,7 @@ config I2C_AMD8111
2441 will be called i2c-amd8111.
2442
2443 config I2C_I801
2444 - tristate "Intel 82801 (ICH)"
2445 + tristate "Intel 82801 (ICH/PCH)"
2446 depends on PCI
2447 help
2448 If you say yes to this option, support will be included for the Intel
2449 @@ -97,7 +97,8 @@ config I2C_I801
2450 ICH9
2451 Tolapai
2452 ICH10
2453 - PCH
2454 + 3400/5 Series (PCH)
2455 + Cougar Point (PCH)
2456
2457 This driver can also be built as a module. If so, the module
2458 will be called i2c-i801.
2459 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
2460 index 5574be2..e361da7 100644
2461 --- a/drivers/i2c/busses/i2c-i801.c
2462 +++ b/drivers/i2c/busses/i2c-i801.c
2463 @@ -41,7 +41,8 @@
2464 Tolapai 0x5032 32 hard yes yes yes
2465 ICH10 0x3a30 32 hard yes yes yes
2466 ICH10 0x3a60 32 hard yes yes yes
2467 - PCH 0x3b30 32 hard yes yes yes
2468 + 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
2469 + Cougar Point (PCH) 0x1c22 32 hard yes yes yes
2470
2471 Features supported by this driver:
2472 Software PEC no
2473 @@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
2474 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
2475 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
2476 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
2477 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
2478 { 0, }
2479 };
2480
2481 @@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
2482 case PCI_DEVICE_ID_INTEL_ICH10_4:
2483 case PCI_DEVICE_ID_INTEL_ICH10_5:
2484 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
2485 + case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
2486 i801_features |= FEATURE_I2C_BLOCK_READ;
2487 /* fall through */
2488 case PCI_DEVICE_ID_INTEL_82801DB_3:
2489 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2490 index 30bdf42..f8302c2 100644
2491 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2492 +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2493 @@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
2494 if (++priv->tx_outstanding == ipoib_sendq_size) {
2495 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
2496 tx->qp->qp_num);
2497 + if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
2498 + ipoib_warn(priv, "request notify on send CQ failed\n");
2499 netif_stop_queue(dev);
2500 }
2501 }
2502 diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
2503 index fbd3987..e8d65b3 100644
2504 --- a/drivers/input/sparse-keymap.c
2505 +++ b/drivers/input/sparse-keymap.c
2506 @@ -161,7 +161,7 @@ int sparse_keymap_setup(struct input_dev *dev,
2507 return 0;
2508
2509 err_out:
2510 - kfree(keymap);
2511 + kfree(map);
2512 return error;
2513
2514 }
2515 diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
2516 index 072f33b..e53ddc5 100644
2517 --- a/drivers/input/tablet/wacom_sys.c
2518 +++ b/drivers/input/tablet/wacom_sys.c
2519 @@ -644,13 +644,15 @@ static int wacom_resume(struct usb_interface *intf)
2520 int rv;
2521
2522 mutex_lock(&wacom->lock);
2523 - if (wacom->open) {
2524 +
2525 + /* switch to wacom mode first */
2526 + wacom_query_tablet_data(intf, features);
2527 +
2528 + if (wacom->open)
2529 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
2530 - /* switch to wacom mode if needed */
2531 - if (!wacom_retrieve_hid_descriptor(intf, features))
2532 - wacom_query_tablet_data(intf, features);
2533 - } else
2534 + else
2535 rv = 0;
2536 +
2537 mutex_unlock(&wacom->lock);
2538
2539 return rv;
2540 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
2541 index e3cf568..d7500e1 100644
2542 --- a/drivers/md/dm-ioctl.c
2543 +++ b/drivers/md/dm-ioctl.c
2544 @@ -285,7 +285,8 @@ retry:
2545 up_write(&_hash_lock);
2546 }
2547
2548 -static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
2549 +static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
2550 + const char *new)
2551 {
2552 char *new_name, *old_name;
2553 struct hash_cell *hc;
2554 @@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
2555 dm_table_put(table);
2556 }
2557
2558 - dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
2559 + if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
2560 + *flags |= DM_UEVENT_GENERATED_FLAG;
2561
2562 dm_put(hc->md);
2563 up_write(&_hash_lock);
2564 @@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
2565 __hash_remove(hc);
2566 up_write(&_hash_lock);
2567
2568 - dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
2569 + if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
2570 + param->flags |= DM_UEVENT_GENERATED_FLAG;
2571
2572 dm_put(md);
2573 - param->data_size = 0;
2574 return 0;
2575 }
2576
2577 @@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
2578 return r;
2579
2580 param->data_size = 0;
2581 - return dm_hash_rename(param->event_nr, param->name, new_name);
2582 +
2583 + return dm_hash_rename(param->event_nr, &param->flags, param->name,
2584 + new_name);
2585 }
2586
2587 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
2588 @@ -899,8 +903,8 @@ static int do_resume(struct dm_ioctl *param)
2589
2590 if (dm_suspended_md(md)) {
2591 r = dm_resume(md);
2592 - if (!r)
2593 - dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
2594 + if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
2595 + param->flags |= DM_UEVENT_GENERATED_FLAG;
2596 }
2597
2598 if (old_map)
2599 @@ -1477,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
2600 {
2601 /* Always clear this flag */
2602 param->flags &= ~DM_BUFFER_FULL_FLAG;
2603 + param->flags &= ~DM_UEVENT_GENERATED_FLAG;
2604
2605 /* Ignores parameters */
2606 if (cmd == DM_REMOVE_ALL_CMD ||
2607 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2608 index fa786b9..fe8889e 100644
2609 --- a/drivers/md/dm.c
2610 +++ b/drivers/md/dm.c
2611 @@ -2618,18 +2618,19 @@ out:
2612 /*-----------------------------------------------------------------
2613 * Event notification.
2614 *---------------------------------------------------------------*/
2615 -void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2616 +int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2617 unsigned cookie)
2618 {
2619 char udev_cookie[DM_COOKIE_LENGTH];
2620 char *envp[] = { udev_cookie, NULL };
2621
2622 if (!cookie)
2623 - kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2624 + return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2625 else {
2626 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2627 DM_COOKIE_ENV_VAR_NAME, cookie);
2628 - kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
2629 + return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2630 + action, envp);
2631 }
2632 }
2633
2634 diff --git a/drivers/md/dm.h b/drivers/md/dm.h
2635 index 8dadaa5..bad1724 100644
2636 --- a/drivers/md/dm.h
2637 +++ b/drivers/md/dm.h
2638 @@ -125,8 +125,8 @@ void dm_stripe_exit(void);
2639 int dm_open_count(struct mapped_device *md);
2640 int dm_lock_for_deletion(struct mapped_device *md);
2641
2642 -void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2643 - unsigned cookie);
2644 +int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2645 + unsigned cookie);
2646
2647 int dm_io_init(void);
2648 void dm_io_exit(void);
2649 diff --git a/drivers/md/linear.c b/drivers/md/linear.c
2650 index 00435bd..001317b 100644
2651 --- a/drivers/md/linear.c
2652 +++ b/drivers/md/linear.c
2653 @@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
2654 disk_stack_limits(mddev->gendisk, rdev->bdev,
2655 rdev->data_offset << 9);
2656 /* as we don't honour merge_bvec_fn, we must never risk
2657 - * violating it, so limit ->max_sector to one PAGE, as
2658 - * a one page request is never in violation.
2659 + * violating it, so limit max_phys_segments to 1 lying within
2660 + * a single page.
2661 */
2662 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2663 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2664 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2665 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2666 + blk_queue_max_phys_segments(mddev->queue, 1);
2667 + blk_queue_segment_boundary(mddev->queue,
2668 + PAGE_CACHE_SIZE - 1);
2669 + }
2670
2671 conf->array_sectors += rdev->sectors;
2672 cnt++;
2673 diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
2674 index 32a662f..f9ee99f 100644
2675 --- a/drivers/md/multipath.c
2676 +++ b/drivers/md/multipath.c
2677 @@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2678 rdev->data_offset << 9);
2679
2680 /* as we don't honour merge_bvec_fn, we must never risk
2681 - * violating it, so limit ->max_sector to one PAGE, as
2682 - * a one page request is never in violation.
2683 + * violating it, so limit ->max_phys_segments to one, lying
2684 + * within a single page.
2685 * (Note: it is very unlikely that a device with
2686 * merge_bvec_fn will be involved in multipath.)
2687 */
2688 - if (q->merge_bvec_fn &&
2689 - queue_max_sectors(q) > (PAGE_SIZE>>9))
2690 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2691 + if (q->merge_bvec_fn) {
2692 + blk_queue_max_phys_segments(mddev->queue, 1);
2693 + blk_queue_segment_boundary(mddev->queue,
2694 + PAGE_CACHE_SIZE - 1);
2695 + }
2696
2697 conf->working_disks++;
2698 mddev->degraded--;
2699 @@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
2700 /* as we don't honour merge_bvec_fn, we must never risk
2701 * violating it, not that we ever expect a device with
2702 * a merge_bvec_fn to be involved in multipath */
2703 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2704 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2705 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2706 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2707 + blk_queue_max_phys_segments(mddev->queue, 1);
2708 + blk_queue_segment_boundary(mddev->queue,
2709 + PAGE_CACHE_SIZE - 1);
2710 + }
2711
2712 if (!test_bit(Faulty, &rdev->flags))
2713 conf->working_disks++;
2714 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
2715 index 77605cd..41ee9de 100644
2716 --- a/drivers/md/raid0.c
2717 +++ b/drivers/md/raid0.c
2718 @@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
2719 disk_stack_limits(mddev->gendisk, rdev1->bdev,
2720 rdev1->data_offset << 9);
2721 /* as we don't honour merge_bvec_fn, we must never risk
2722 - * violating it, so limit ->max_sector to one PAGE, as
2723 - * a one page request is never in violation.
2724 + * violating it, so limit ->max_phys_segments to 1, lying within
2725 + * a single page.
2726 */
2727
2728 - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
2729 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2730 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2731 -
2732 + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
2733 + blk_queue_max_phys_segments(mddev->queue, 1);
2734 + blk_queue_segment_boundary(mddev->queue,
2735 + PAGE_CACHE_SIZE - 1);
2736 + }
2737 if (!smallest || (rdev1->sectors < smallest->sectors))
2738 smallest = rdev1;
2739 cnt++;
2740 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2741 index d119b7b..047c468 100644
2742 --- a/drivers/md/raid10.c
2743 +++ b/drivers/md/raid10.c
2744 @@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2745
2746 disk_stack_limits(mddev->gendisk, rdev->bdev,
2747 rdev->data_offset << 9);
2748 - /* as we don't honour merge_bvec_fn, we must never risk
2749 - * violating it, so limit ->max_sector to one PAGE, as
2750 - * a one page request is never in violation.
2751 + /* as we don't honour merge_bvec_fn, we must
2752 + * never risk violating it, so limit
2753 + * ->max_phys_segments to one lying with a single
2754 + * page, as a one page request is never in
2755 + * violation.
2756 */
2757 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2758 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2759 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2760 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2761 + blk_queue_max_phys_segments(mddev->queue, 1);
2762 + blk_queue_segment_boundary(mddev->queue,
2763 + PAGE_CACHE_SIZE - 1);
2764 + }
2765
2766 p->head_position = 0;
2767 rdev->raid_disk = mirror;
2768 @@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
2769 disk_stack_limits(mddev->gendisk, rdev->bdev,
2770 rdev->data_offset << 9);
2771 /* as we don't honour merge_bvec_fn, we must never risk
2772 - * violating it, so limit ->max_sector to one PAGE, as
2773 - * a one page request is never in violation.
2774 + * violating it, so limit max_phys_segments to 1 lying
2775 + * within a single page.
2776 */
2777 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2778 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2779 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2780 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2781 + blk_queue_max_phys_segments(mddev->queue, 1);
2782 + blk_queue_segment_boundary(mddev->queue,
2783 + PAGE_CACHE_SIZE - 1);
2784 + }
2785
2786 disk->head_position = 0;
2787 }
2788 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
2789 index 57f149b..4d353d2 100644
2790 --- a/drivers/net/e1000e/netdev.c
2791 +++ b/drivers/net/e1000e/netdev.c
2792 @@ -660,6 +660,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
2793 i = 0;
2794 }
2795
2796 + if (i == tx_ring->next_to_use)
2797 + break;
2798 eop = tx_ring->buffer_info[i].next_to_watch;
2799 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2800 }
2801 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
2802 index 67d414b..3db85da 100644
2803 --- a/drivers/net/r8169.c
2804 +++ b/drivers/net/r8169.c
2805 @@ -3255,8 +3255,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
2806 unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
2807
2808 if (max_frame != 16383)
2809 - printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
2810 - "May lead to frame reception errors!\n");
2811 + printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
2812 + "NIC may lead to frame reception errors!\n");
2813
2814 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
2815 }
2816 diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
2817 index e0799d9..0387658 100644
2818 --- a/drivers/net/wireless/ath/ar9170/usb.c
2819 +++ b/drivers/net/wireless/ath/ar9170/usb.c
2820 @@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
2821 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
2822
2823 usb_fill_int_urb(urb, aru->udev,
2824 - usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
2825 + usb_sndintpipe(aru->udev, AR9170_EP_CMD),
2826 aru->common.cmdbuf, plen + 4,
2827 ar9170_usb_tx_urb_complete, NULL, 1);
2828
2829 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2830 index 33a1071..7b1eab4 100644
2831 --- a/drivers/net/wireless/ath/ath9k/main.c
2832 +++ b/drivers/net/wireless/ath/ath9k/main.c
2833 @@ -2721,8 +2721,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2834 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
2835 ath9k_set_wiphy_idle(aphy, idle);
2836
2837 - if (!idle && all_wiphys_idle)
2838 - enable_radio = true;
2839 + enable_radio = (!idle && all_wiphys_idle);
2840
2841 /*
2842 * After we unlock here its possible another wiphy
2843 diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
2844 index 64c12e1..0a00d42 100644
2845 --- a/drivers/net/wireless/b43/Kconfig
2846 +++ b/drivers/net/wireless/b43/Kconfig
2847 @@ -78,11 +78,11 @@ config B43_SDIO
2848
2849 If unsure, say N.
2850
2851 -# Data transfers to the device via PIO
2852 -# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
2853 +#Data transfers to the device via PIO. We want it as a fallback even
2854 +# if we can do DMA.
2855 config B43_PIO
2856 bool
2857 - depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
2858 + depends on B43
2859 select SSB_BLOCKIO
2860 default y
2861
2862 diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
2863 index 84772a2..5e83b6f 100644
2864 --- a/drivers/net/wireless/b43/Makefile
2865 +++ b/drivers/net/wireless/b43/Makefile
2866 @@ -12,7 +12,7 @@ b43-y += xmit.o
2867 b43-y += lo.o
2868 b43-y += wa.o
2869 b43-y += dma.o
2870 -b43-$(CONFIG_B43_PIO) += pio.o
2871 +b43-y += pio.o
2872 b43-y += rfkill.o
2873 b43-$(CONFIG_B43_LEDS) += leds.o
2874 b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
2875 diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
2876 index c484cc2..7df822e 100644
2877 --- a/drivers/net/wireless/b43/b43.h
2878 +++ b/drivers/net/wireless/b43/b43.h
2879 @@ -694,6 +694,7 @@ struct b43_wldev {
2880 bool radio_hw_enable; /* saved state of radio hardware enabled state */
2881 bool qos_enabled; /* TRUE, if QoS is used. */
2882 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
2883 + bool use_pio; /* TRUE if next init should use PIO */
2884
2885 /* PHY/Radio device. */
2886 struct b43_phy phy;
2887 @@ -822,11 +823,9 @@ struct b43_wl {
2888 /* The device LEDs. */
2889 struct b43_leds leds;
2890
2891 -#ifdef CONFIG_B43_PIO
2892 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
2893 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
2894 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
2895 -#endif /* CONFIG_B43_PIO */
2896 };
2897
2898 static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
2899 @@ -877,20 +876,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
2900
2901 static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
2902 {
2903 -#ifdef CONFIG_B43_PIO
2904 return dev->__using_pio_transfers;
2905 -#else
2906 - return 0;
2907 -#endif
2908 }
2909
2910 #ifdef CONFIG_B43_FORCE_PIO
2911 -# define B43_FORCE_PIO 1
2912 +# define B43_PIO_DEFAULT 1
2913 #else
2914 -# define B43_FORCE_PIO 0
2915 +# define B43_PIO_DEFAULT 0
2916 #endif
2917
2918 -
2919 /* Message printing */
2920 void b43info(struct b43_wl *wl, const char *fmt, ...)
2921 __attribute__ ((format(printf, 2, 3)));
2922 diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
2923 index 88d1fd0..615af22 100644
2924 --- a/drivers/net/wireless/b43/dma.c
2925 +++ b/drivers/net/wireless/b43/dma.c
2926 @@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
2927 b43_power_saving_ctl_bits(dev, 0);
2928 }
2929
2930 -#ifdef CONFIG_B43_PIO
2931 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
2932 u16 mmio_base, bool enable)
2933 {
2934 @@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
2935 mmio_base = b43_dmacontroller_base(type, engine_index);
2936 direct_fifo_rx(dev, type, mmio_base, enable);
2937 }
2938 -#endif /* CONFIG_B43_PIO */
2939 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
2940 index 629c166..9eb4f5e 100644
2941 --- a/drivers/net/wireless/b43/main.c
2942 +++ b/drivers/net/wireless/b43/main.c
2943 @@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
2944 module_param_named(verbose, b43_modparam_verbose, int, 0644);
2945 MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
2946
2947 +int b43_modparam_pio = B43_PIO_DEFAULT;
2948 +module_param_named(pio, b43_modparam_pio, int, 0644);
2949 +MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
2950
2951 static const struct ssb_device_id b43_ssb_tbl[] = {
2952 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
2953 @@ -1790,8 +1793,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
2954 dma_reason[4], dma_reason[5]);
2955 b43err(dev->wl, "This device does not support DMA "
2956 "on your system. Please use PIO instead.\n");
2957 - b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
2958 - "your kernel configuration.\n");
2959 + /* Fall back to PIO transfers if we get fatal DMA errors! */
2960 + dev->use_pio = 1;
2961 + b43_controller_restart(dev, "DMA error");
2962 return;
2963 }
2964 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
2965 @@ -4358,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
2966
2967 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
2968 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
2969 - B43_FORCE_PIO) {
2970 + dev->use_pio) {
2971 dev->__using_pio_transfers = 1;
2972 err = b43_pio_init(dev);
2973 } else {
2974 @@ -4826,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
2975 if (!wldev)
2976 goto out;
2977
2978 + wldev->use_pio = b43_modparam_pio;
2979 wldev->dev = dev;
2980 wldev->wl = wl;
2981 b43_set_status(wldev, B43_STAT_UNINIT);
2982 diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
2983 index 7dd649c..7b3c42f 100644
2984 --- a/drivers/net/wireless/b43/pio.h
2985 +++ b/drivers/net/wireless/b43/pio.h
2986 @@ -55,8 +55,6 @@
2987 #define B43_PIO_MAX_NR_TXPACKETS 32
2988
2989
2990 -#ifdef CONFIG_B43_PIO
2991 -
2992 struct b43_pio_txpacket {
2993 /* Pointer to the TX queue we belong to. */
2994 struct b43_pio_txqueue *queue;
2995 @@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
2996 void b43_pio_tx_suspend(struct b43_wldev *dev);
2997 void b43_pio_tx_resume(struct b43_wldev *dev);
2998
2999 -
3000 -#else /* CONFIG_B43_PIO */
3001 -
3002 -
3003 -static inline int b43_pio_init(struct b43_wldev *dev)
3004 -{
3005 - return 0;
3006 -}
3007 -static inline void b43_pio_free(struct b43_wldev *dev)
3008 -{
3009 -}
3010 -static inline void b43_pio_stop(struct b43_wldev *dev)
3011 -{
3012 -}
3013 -static inline int b43_pio_tx(struct b43_wldev *dev,
3014 - struct sk_buff *skb)
3015 -{
3016 - return 0;
3017 -}
3018 -static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
3019 - const struct b43_txstatus *status)
3020 -{
3021 -}
3022 -static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
3023 - struct ieee80211_tx_queue_stats *stats)
3024 -{
3025 -}
3026 -static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
3027 -{
3028 -}
3029 -static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
3030 -{
3031 -}
3032 -static inline void b43_pio_tx_resume(struct b43_wldev *dev)
3033 -{
3034 -}
3035 -
3036 -#endif /* CONFIG_B43_PIO */
3037 #endif /* B43_PIO_H_ */
3038 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
3039 index 3146281..3b4c5a4 100644
3040 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
3041 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
3042 @@ -581,6 +581,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
3043
3044 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
3045
3046 + /* reset to 0 to enable all the queue first */
3047 + priv->txq_ctx_active_msk = 0;
3048 /* Map each Tx/cmd queue to its corresponding fifo */
3049 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
3050 int ac = default_queue_to_tx_fifo[i];
3051 @@ -2008,7 +2010,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3052 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
3053 "%d index %d\n", scd_ssn , index);
3054 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3055 - iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
3056 + if (qc)
3057 + iwl_free_tfds_in_queue(priv, sta_id,
3058 + tid, freed);
3059
3060 if (priv->mac80211_registered &&
3061 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
3062 @@ -2035,13 +2039,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3063
3064 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3065 if (qc && likely(sta_id != IWL_INVALID_STATION))
3066 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3067 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
3068 + else if (sta_id == IWL_INVALID_STATION)
3069 + IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
3070
3071 if (priv->mac80211_registered &&
3072 (iwl_queue_space(&txq->q) > txq->q.low_mark))
3073 iwl_wake_queue(priv, txq_id);
3074 }
3075 -
3076 if (qc && likely(sta_id != IWL_INVALID_STATION))
3077 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
3078
3079 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
3080 index cffaae7..c610e5f 100644
3081 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
3082 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
3083 @@ -657,6 +657,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
3084
3085 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
3086
3087 + /* reset to 0 to enable all the queue first */
3088 + priv->txq_ctx_active_msk = 0;
3089 /* map qos queues to fifos one-to-one */
3090 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
3091 int ac = iwl5000_default_queue_to_tx_fifo[i];
3092 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
3093 index 1c9866d..5622a55 100644
3094 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
3095 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
3096 @@ -2461,7 +2461,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
3097 BIT(NL80211_IFTYPE_STATION) |
3098 BIT(NL80211_IFTYPE_ADHOC);
3099
3100 - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
3101 + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3102 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3103
3104 /*
3105 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
3106 index fa1c89b..8f1b850 100644
3107 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
3108 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
3109 @@ -404,21 +404,6 @@ EXPORT_SYMBOL(iwl_init_scan_params);
3110
3111 static int iwl_scan_initiate(struct iwl_priv *priv)
3112 {
3113 - if (!iwl_is_ready_rf(priv)) {
3114 - IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
3115 - return -EIO;
3116 - }
3117 -
3118 - if (test_bit(STATUS_SCANNING, &priv->status)) {
3119 - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
3120 - return -EAGAIN;
3121 - }
3122 -
3123 - if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3124 - IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
3125 - return -EAGAIN;
3126 - }
3127 -
3128 IWL_DEBUG_INFO(priv, "Starting scan...\n");
3129 set_bit(STATUS_SCANNING, &priv->status);
3130 priv->scan_start = jiffies;
3131 @@ -449,6 +434,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
3132 goto out_unlock;
3133 }
3134
3135 + if (test_bit(STATUS_SCANNING, &priv->status)) {
3136 + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
3137 + ret = -EAGAIN;
3138 + goto out_unlock;
3139 + }
3140 +
3141 + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3142 + IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
3143 + ret = -EAGAIN;
3144 + goto out_unlock;
3145 + }
3146 +
3147 /* We don't schedule scan within next_scan_jiffies period.
3148 * Avoid scanning during possible EAPOL exchange, return
3149 * success immediately.
3150 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
3151 index f297865..adbb3ea 100644
3152 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
3153 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
3154 @@ -1926,7 +1926,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
3155 {
3156 int i;
3157
3158 - for (i = 0; i < IWL_RATE_COUNT; i++) {
3159 + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3160 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
3161 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3162 rates[i].hw_value_short = i;
3163 @@ -3903,7 +3903,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3164 BIT(NL80211_IFTYPE_STATION) |
3165 BIT(NL80211_IFTYPE_ADHOC);
3166
3167 - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
3168 + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3169 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3170
3171 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3172 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3173 index 3245d33..c4fead1 100644
3174 --- a/drivers/pci/pci.c
3175 +++ b/drivers/pci/pci.c
3176 @@ -2612,6 +2612,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3177 return 0;
3178 }
3179
3180 +/* Some architectures require additional programming to enable VGA */
3181 +static arch_set_vga_state_t arch_set_vga_state;
3182 +
3183 +void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3184 +{
3185 + arch_set_vga_state = func; /* NULL disables */
3186 +}
3187 +
3188 +static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3189 + unsigned int command_bits, bool change_bridge)
3190 +{
3191 + if (arch_set_vga_state)
3192 + return arch_set_vga_state(dev, decode, command_bits,
3193 + change_bridge);
3194 + return 0;
3195 +}
3196 +
3197 /**
3198 * pci_set_vga_state - set VGA decode state on device and parents if requested
3199 * @dev: the PCI device
3200 @@ -2625,9 +2642,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
3201 struct pci_bus *bus;
3202 struct pci_dev *bridge;
3203 u16 cmd;
3204 + int rc;
3205
3206 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
3207
3208 + /* ARCH specific VGA enables */
3209 + rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
3210 + if (rc)
3211 + return rc;
3212 +
3213 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3214 if (decode == true)
3215 cmd |= command_bits;
3216 @@ -2874,4 +2897,3 @@ EXPORT_SYMBOL(pci_target_state);
3217 EXPORT_SYMBOL(pci_prepare_to_sleep);
3218 EXPORT_SYMBOL(pci_back_from_sleep);
3219 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3220 -
3221 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
3222 index c28a712..e6b67f2 100644
3223 --- a/drivers/scsi/libiscsi.c
3224 +++ b/drivers/scsi/libiscsi.c
3225 @@ -3027,14 +3027,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
3226 session->state = ISCSI_STATE_TERMINATE;
3227 else if (conn->stop_stage != STOP_CONN_RECOVER)
3228 session->state = ISCSI_STATE_IN_RECOVERY;
3229 +
3230 + old_stop_stage = conn->stop_stage;
3231 + conn->stop_stage = flag;
3232 spin_unlock_bh(&session->lock);
3233
3234 del_timer_sync(&conn->transport_timer);
3235 iscsi_suspend_tx(conn);
3236
3237 spin_lock_bh(&session->lock);
3238 - old_stop_stage = conn->stop_stage;
3239 - conn->stop_stage = flag;
3240 conn->c_stage = ISCSI_CONN_STOPPED;
3241 spin_unlock_bh(&session->lock);
3242
3243 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3244 index 34d4eb9..db6b071 100644
3245 --- a/drivers/usb/class/cdc-acm.c
3246 +++ b/drivers/usb/class/cdc-acm.c
3247 @@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
3248 {
3249 wb->use = 0;
3250 acm->transmitting--;
3251 + usb_autopm_put_interface_async(acm->control);
3252 }
3253
3254 /*
3255 @@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
3256 }
3257
3258 dbg("%s susp_count: %d", __func__, acm->susp_count);
3259 + usb_autopm_get_interface_async(acm->control);
3260 if (acm->susp_count) {
3261 - acm->delayed_wb = wb;
3262 - schedule_work(&acm->waker);
3263 + if (!acm->delayed_wb)
3264 + acm->delayed_wb = wb;
3265 + else
3266 + usb_autopm_put_interface_async(acm->control);
3267 spin_unlock_irqrestore(&acm->write_lock, flags);
3268 return 0; /* A white lie */
3269 }
3270 @@ -534,23 +538,6 @@ static void acm_softint(struct work_struct *work)
3271 tty_kref_put(tty);
3272 }
3273
3274 -static void acm_waker(struct work_struct *waker)
3275 -{
3276 - struct acm *acm = container_of(waker, struct acm, waker);
3277 - int rv;
3278 -
3279 - rv = usb_autopm_get_interface(acm->control);
3280 - if (rv < 0) {
3281 - dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
3282 - return;
3283 - }
3284 - if (acm->delayed_wb) {
3285 - acm_start_wb(acm, acm->delayed_wb);
3286 - acm->delayed_wb = NULL;
3287 - }
3288 - usb_autopm_put_interface(acm->control);
3289 -}
3290 -
3291 /*
3292 * TTY handlers
3293 */
3294 @@ -1178,7 +1165,6 @@ made_compressed_probe:
3295 acm->urb_task.func = acm_rx_tasklet;
3296 acm->urb_task.data = (unsigned long) acm;
3297 INIT_WORK(&acm->work, acm_softint);
3298 - INIT_WORK(&acm->waker, acm_waker);
3299 init_waitqueue_head(&acm->drain_wait);
3300 spin_lock_init(&acm->throttle_lock);
3301 spin_lock_init(&acm->write_lock);
3302 @@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
3303 tasklet_enable(&acm->urb_task);
3304
3305 cancel_work_sync(&acm->work);
3306 - cancel_work_sync(&acm->waker);
3307 }
3308
3309 static void acm_disconnect(struct usb_interface *intf)
3310 @@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
3311 static int acm_resume(struct usb_interface *intf)
3312 {
3313 struct acm *acm = usb_get_intfdata(intf);
3314 + struct acm_wb *wb;
3315 int rv = 0;
3316 int cnt;
3317
3318 @@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
3319 mutex_lock(&acm->mutex);
3320 if (acm->port.count) {
3321 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
3322 +
3323 + spin_lock_irq(&acm->write_lock);
3324 + if (acm->delayed_wb) {
3325 + wb = acm->delayed_wb;
3326 + acm->delayed_wb = NULL;
3327 + spin_unlock_irq(&acm->write_lock);
3328 + acm_start_wb(acm, wb);
3329 + } else {
3330 + spin_unlock_irq(&acm->write_lock);
3331 + }
3332 +
3333 + /*
3334 + * delayed error checking because we must
3335 + * do the write path at all cost
3336 + */
3337 if (rv < 0)
3338 goto err_out;
3339
3340 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
3341 index c4a0ee8..519eb63 100644
3342 --- a/drivers/usb/class/cdc-acm.h
3343 +++ b/drivers/usb/class/cdc-acm.h
3344 @@ -112,7 +112,6 @@ struct acm {
3345 struct mutex mutex;
3346 struct usb_cdc_line_coding line; /* bits, stop, parity */
3347 struct work_struct work; /* work queue entry for line discipline waking up */
3348 - struct work_struct waker;
3349 wait_queue_head_t drain_wait; /* close processing */
3350 struct tasklet_struct urb_task; /* rx processing */
3351 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
3352 diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
3353 index 2e78b07..9804ee9 100644
3354 --- a/drivers/video/backlight/mbp_nvidia_bl.c
3355 +++ b/drivers/video/backlight/mbp_nvidia_bl.c
3356 @@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
3357 static const struct dmi_system_id __initdata mbp_device_table[] = {
3358 {
3359 .callback = mbp_dmi_match,
3360 + .ident = "MacBook 1,1",
3361 + .matches = {
3362 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3363 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
3364 + },
3365 + .driver_data = (void *)&intel_chipset_data,
3366 + },
3367 + {
3368 + .callback = mbp_dmi_match,
3369 + .ident = "MacBook 2,1",
3370 + .matches = {
3371 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3372 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
3373 + },
3374 + .driver_data = (void *)&intel_chipset_data,
3375 + },
3376 + {
3377 + .callback = mbp_dmi_match,
3378 + .ident = "MacBook 3,1",
3379 + .matches = {
3380 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3381 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
3382 + },
3383 + .driver_data = (void *)&intel_chipset_data,
3384 + },
3385 + {
3386 + .callback = mbp_dmi_match,
3387 + .ident = "MacBook 4,1",
3388 + .matches = {
3389 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3390 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
3391 + },
3392 + .driver_data = (void *)&intel_chipset_data,
3393 + },
3394 + {
3395 + .callback = mbp_dmi_match,
3396 + .ident = "MacBook 4,2",
3397 + .matches = {
3398 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3399 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
3400 + },
3401 + .driver_data = (void *)&intel_chipset_data,
3402 + },
3403 + {
3404 + .callback = mbp_dmi_match,
3405 .ident = "MacBookPro 3,1",
3406 .matches = {
3407 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
3408 diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
3409 index 4cd5049..3803745 100644
3410 --- a/drivers/video/sunxvr500.c
3411 +++ b/drivers/video/sunxvr500.c
3412 @@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
3413 static int __devinit e3d_pci_register(struct pci_dev *pdev,
3414 const struct pci_device_id *ent)
3415 {
3416 + struct device_node *of_node;
3417 + const char *device_type;
3418 struct fb_info *info;
3419 struct e3d_info *ep;
3420 unsigned int line_length;
3421 int err;
3422
3423 + of_node = pci_device_to_OF_node(pdev);
3424 + if (!of_node) {
3425 + printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
3426 + pci_name(pdev));
3427 + return -ENODEV;
3428 + }
3429 +
3430 + device_type = of_get_property(of_node, "device_type", NULL);
3431 + if (!device_type) {
3432 + printk(KERN_INFO "e3d: Ignoring secondary output device "
3433 + "at %s\n", pci_name(pdev));
3434 + return -ENODEV;
3435 + }
3436 +
3437 err = pci_enable_device(pdev);
3438 if (err < 0) {
3439 printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
3440 @@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
3441 ep->info = info;
3442 ep->pdev = pdev;
3443 spin_lock_init(&ep->lock);
3444 - ep->of_node = pci_device_to_OF_node(pdev);
3445 - if (!ep->of_node) {
3446 - printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
3447 - pci_name(pdev));
3448 - err = -ENODEV;
3449 - goto err_release_fb;
3450 - }
3451 + ep->of_node = of_node;
3452
3453 /* Read the PCI base register of the frame buffer, which we
3454 * need in order to interpret the RAMDAC_VID_*FB* values in
3455 diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
3456 index a6c5674..0b91907 100644
3457 --- a/drivers/watchdog/hpwdt.c
3458 +++ b/drivers/watchdog/hpwdt.c
3459 @@ -443,7 +443,7 @@ static void hpwdt_ping(void)
3460 static int hpwdt_change_timer(int new_margin)
3461 {
3462 /* Arbitrary, can't find the card's limits */
3463 - if (new_margin < 30 || new_margin > 600) {
3464 + if (new_margin < 5 || new_margin > 600) {
3465 printk(KERN_WARNING
3466 "hpwdt: New value passed in is invalid: %d seconds.\n",
3467 new_margin);
3468 diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
3469 index 4bdb7f1..e2ebe08 100644
3470 --- a/drivers/watchdog/iTCO_wdt.c
3471 +++ b/drivers/watchdog/iTCO_wdt.c
3472 @@ -115,8 +115,37 @@ enum iTCO_chipsets {
3473 TCO_3420, /* 3420 */
3474 TCO_3450, /* 3450 */
3475 TCO_EP80579, /* EP80579 */
3476 - TCO_CPTD, /* CPT Desktop */
3477 - TCO_CPTM, /* CPT Mobile */
3478 + TCO_CPT1, /* Cougar Point */
3479 + TCO_CPT2, /* Cougar Point Desktop */
3480 + TCO_CPT3, /* Cougar Point Mobile */
3481 + TCO_CPT4, /* Cougar Point */
3482 + TCO_CPT5, /* Cougar Point */
3483 + TCO_CPT6, /* Cougar Point */
3484 + TCO_CPT7, /* Cougar Point */
3485 + TCO_CPT8, /* Cougar Point */
3486 + TCO_CPT9, /* Cougar Point */
3487 + TCO_CPT10, /* Cougar Point */
3488 + TCO_CPT11, /* Cougar Point */
3489 + TCO_CPT12, /* Cougar Point */
3490 + TCO_CPT13, /* Cougar Point */
3491 + TCO_CPT14, /* Cougar Point */
3492 + TCO_CPT15, /* Cougar Point */
3493 + TCO_CPT16, /* Cougar Point */
3494 + TCO_CPT17, /* Cougar Point */
3495 + TCO_CPT18, /* Cougar Point */
3496 + TCO_CPT19, /* Cougar Point */
3497 + TCO_CPT20, /* Cougar Point */
3498 + TCO_CPT21, /* Cougar Point */
3499 + TCO_CPT22, /* Cougar Point */
3500 + TCO_CPT23, /* Cougar Point */
3501 + TCO_CPT24, /* Cougar Point */
3502 + TCO_CPT25, /* Cougar Point */
3503 + TCO_CPT26, /* Cougar Point */
3504 + TCO_CPT27, /* Cougar Point */
3505 + TCO_CPT28, /* Cougar Point */
3506 + TCO_CPT29, /* Cougar Point */
3507 + TCO_CPT30, /* Cougar Point */
3508 + TCO_CPT31, /* Cougar Point */
3509 };
3510
3511 static struct {
3512 @@ -173,8 +202,37 @@ static struct {
3513 {"3420", 2},
3514 {"3450", 2},
3515 {"EP80579", 2},
3516 - {"CPT Desktop", 2},
3517 - {"CPT Mobile", 2},
3518 + {"Cougar Point", 2},
3519 + {"Cougar Point", 2},
3520 + {"Cougar Point", 2},
3521 + {"Cougar Point", 2},
3522 + {"Cougar Point", 2},
3523 + {"Cougar Point", 2},
3524 + {"Cougar Point", 2},
3525 + {"Cougar Point", 2},
3526 + {"Cougar Point", 2},
3527 + {"Cougar Point", 2},
3528 + {"Cougar Point", 2},
3529 + {"Cougar Point", 2},
3530 + {"Cougar Point", 2},
3531 + {"Cougar Point", 2},
3532 + {"Cougar Point", 2},
3533 + {"Cougar Point", 2},
3534 + {"Cougar Point", 2},
3535 + {"Cougar Point", 2},
3536 + {"Cougar Point", 2},
3537 + {"Cougar Point", 2},
3538 + {"Cougar Point", 2},
3539 + {"Cougar Point", 2},
3540 + {"Cougar Point", 2},
3541 + {"Cougar Point", 2},
3542 + {"Cougar Point", 2},
3543 + {"Cougar Point", 2},
3544 + {"Cougar Point", 2},
3545 + {"Cougar Point", 2},
3546 + {"Cougar Point", 2},
3547 + {"Cougar Point", 2},
3548 + {"Cougar Point", 2},
3549 {NULL, 0}
3550 };
3551
3552 @@ -259,8 +317,37 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
3553 { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
3554 { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
3555 { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
3556 - { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
3557 - { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
3558 + { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
3559 + { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
3560 + { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
3561 + { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
3562 + { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
3563 + { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
3564 + { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
3565 + { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
3566 + { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
3567 + { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
3568 + { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
3569 + { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
3570 + { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
3571 + { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
3572 + { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
3573 + { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
3574 + { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
3575 + { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
3576 + { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
3577 + { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
3578 + { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
3579 + { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
3580 + { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
3581 + { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
3582 + { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
3583 + { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
3584 + { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
3585 + { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
3586 + { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
3587 + { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
3588 + { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
3589 { 0, }, /* End of list */
3590 };
3591 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
3592 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
3593 index 74a0461..92f9590 100644
3594 --- a/fs/9p/vfs_file.c
3595 +++ b/fs/9p/vfs_file.c
3596 @@ -114,7 +114,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
3597 P9_DPRINTK(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
3598
3599 /* No mandatory locks */
3600 - if (__mandatory_lock(inode))
3601 + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
3602 return -ENOLCK;
3603
3604 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
3605 diff --git a/fs/block_dev.c b/fs/block_dev.c
3606 index d11d028..8db62b2 100644
3607 --- a/fs/block_dev.c
3608 +++ b/fs/block_dev.c
3609 @@ -404,7 +404,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
3610 * NULL first argument is nfsd_sync_dir() and that's not a directory.
3611 */
3612
3613 -static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3614 +int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3615 {
3616 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
3617 int error;
3618 @@ -418,6 +418,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
3619 error = 0;
3620 return error;
3621 }
3622 +EXPORT_SYMBOL(block_fsync);
3623
3624 /*
3625 * pseudo-fs
3626 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3627 index 941441d..4e6dbab 100644
3628 --- a/fs/cifs/cifssmb.c
3629 +++ b/fs/cifs/cifssmb.c
3630 @@ -1430,6 +1430,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
3631 __u32 bytes_sent;
3632 __u16 byte_count;
3633
3634 + *nbytes = 0;
3635 +
3636 /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
3637 if (tcon->ses == NULL)
3638 return -ECONNABORTED;
3639 @@ -1512,11 +1514,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
3640 cifs_stats_inc(&tcon->num_writes);
3641 if (rc) {
3642 cFYI(1, ("Send error in write = %d", rc));
3643 - *nbytes = 0;
3644 } else {
3645 *nbytes = le16_to_cpu(pSMBr->CountHigh);
3646 *nbytes = (*nbytes) << 16;
3647 *nbytes += le16_to_cpu(pSMBr->Count);
3648 +
3649 + /*
3650 + * Mask off high 16 bits when bytes written as returned by the
3651 + * server is greater than bytes requested by the client. Some
3652 + * OS/2 servers are known to set incorrect CountHigh values.
3653 + */
3654 + if (*nbytes > count)
3655 + *nbytes &= 0xFFFF;
3656 }
3657
3658 cifs_buf_release(pSMB);
3659 @@ -1605,6 +1614,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
3660 *nbytes = le16_to_cpu(pSMBr->CountHigh);
3661 *nbytes = (*nbytes) << 16;
3662 *nbytes += le16_to_cpu(pSMBr->Count);
3663 +
3664 + /*
3665 + * Mask off high 16 bits when bytes written as returned by the
3666 + * server is greater than bytes requested by the client. OS/2
3667 + * servers are known to set incorrect CountHigh values.
3668 + */
3669 + if (*nbytes > count)
3670 + *nbytes &= 0xFFFF;
3671 }
3672
3673 /* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
3674 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
3675 index 4a430ab..23dc2af 100644
3676 --- a/fs/ecryptfs/inode.c
3677 +++ b/fs/ecryptfs/inode.c
3678 @@ -647,38 +647,17 @@ out_lock:
3679 return rc;
3680 }
3681
3682 -static int
3683 -ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3684 +static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
3685 + size_t *bufsiz)
3686 {
3687 + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
3688 char *lower_buf;
3689 - size_t lower_bufsiz;
3690 - struct dentry *lower_dentry;
3691 - struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
3692 - char *plaintext_name;
3693 - size_t plaintext_name_size;
3694 + size_t lower_bufsiz = PATH_MAX;
3695 mm_segment_t old_fs;
3696 int rc;
3697
3698 - lower_dentry = ecryptfs_dentry_to_lower(dentry);
3699 - if (!lower_dentry->d_inode->i_op->readlink) {
3700 - rc = -EINVAL;
3701 - goto out;
3702 - }
3703 - mount_crypt_stat = &ecryptfs_superblock_to_private(
3704 - dentry->d_sb)->mount_crypt_stat;
3705 - /*
3706 - * If the lower filename is encrypted, it will result in a significantly
3707 - * longer name. If needed, truncate the name after decode and decrypt.
3708 - */
3709 - if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
3710 - lower_bufsiz = PATH_MAX;
3711 - else
3712 - lower_bufsiz = bufsiz;
3713 - /* Released in this function */
3714 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
3715 - if (lower_buf == NULL) {
3716 - printk(KERN_ERR "%s: Out of memory whilst attempting to "
3717 - "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
3718 + if (!lower_buf) {
3719 rc = -ENOMEM;
3720 goto out;
3721 }
3722 @@ -688,29 +667,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3723 (char __user *)lower_buf,
3724 lower_bufsiz);
3725 set_fs(old_fs);
3726 - if (rc >= 0) {
3727 - rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
3728 - &plaintext_name_size,
3729 - dentry, lower_buf,
3730 - rc);
3731 - if (rc) {
3732 - printk(KERN_ERR "%s: Error attempting to decode and "
3733 - "decrypt filename; rc = [%d]\n", __func__,
3734 - rc);
3735 - goto out_free_lower_buf;
3736 - }
3737 - /* Check for bufsiz <= 0 done in sys_readlinkat() */
3738 - rc = copy_to_user(buf, plaintext_name,
3739 - min((size_t) bufsiz, plaintext_name_size));
3740 - if (rc)
3741 - rc = -EFAULT;
3742 - else
3743 - rc = plaintext_name_size;
3744 - kfree(plaintext_name);
3745 - fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
3746 - }
3747 -out_free_lower_buf:
3748 + if (rc < 0)
3749 + goto out;
3750 + lower_bufsiz = rc;
3751 + rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
3752 + lower_buf, lower_bufsiz);
3753 +out:
3754 kfree(lower_buf);
3755 + return rc;
3756 +}
3757 +
3758 +static int
3759 +ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
3760 +{
3761 + char *kbuf;
3762 + size_t kbufsiz, copied;
3763 + int rc;
3764 +
3765 + rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
3766 + if (rc)
3767 + goto out;
3768 + copied = min_t(size_t, bufsiz, kbufsiz);
3769 + rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
3770 + kfree(kbuf);
3771 + fsstack_copy_attr_atime(dentry->d_inode,
3772 + ecryptfs_dentry_to_lower(dentry)->d_inode);
3773 out:
3774 return rc;
3775 }
3776 @@ -1015,6 +996,28 @@ out:
3777 return rc;
3778 }
3779
3780 +int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
3781 + struct kstat *stat)
3782 +{
3783 + struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
3784 + int rc = 0;
3785 +
3786 + mount_crypt_stat = &ecryptfs_superblock_to_private(
3787 + dentry->d_sb)->mount_crypt_stat;
3788 + generic_fillattr(dentry->d_inode, stat);
3789 + if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
3790 + char *target;
3791 + size_t targetsiz;
3792 +
3793 + rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
3794 + if (!rc) {
3795 + kfree(target);
3796 + stat->size = targetsiz;
3797 + }
3798 + }
3799 + return rc;
3800 +}
3801 +
3802 int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
3803 struct kstat *stat)
3804 {
3805 @@ -1039,7 +1042,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
3806
3807 lower_dentry = ecryptfs_dentry_to_lower(dentry);
3808 if (!lower_dentry->d_inode->i_op->setxattr) {
3809 - rc = -ENOSYS;
3810 + rc = -EOPNOTSUPP;
3811 goto out;
3812 }
3813 mutex_lock(&lower_dentry->d_inode->i_mutex);
3814 @@ -1057,7 +1060,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
3815 int rc = 0;
3816
3817 if (!lower_dentry->d_inode->i_op->getxattr) {
3818 - rc = -ENOSYS;
3819 + rc = -EOPNOTSUPP;
3820 goto out;
3821 }
3822 mutex_lock(&lower_dentry->d_inode->i_mutex);
3823 @@ -1084,7 +1087,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
3824
3825 lower_dentry = ecryptfs_dentry_to_lower(dentry);
3826 if (!lower_dentry->d_inode->i_op->listxattr) {
3827 - rc = -ENOSYS;
3828 + rc = -EOPNOTSUPP;
3829 goto out;
3830 }
3831 mutex_lock(&lower_dentry->d_inode->i_mutex);
3832 @@ -1101,7 +1104,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
3833
3834 lower_dentry = ecryptfs_dentry_to_lower(dentry);
3835 if (!lower_dentry->d_inode->i_op->removexattr) {
3836 - rc = -ENOSYS;
3837 + rc = -EOPNOTSUPP;
3838 goto out;
3839 }
3840 mutex_lock(&lower_dentry->d_inode->i_mutex);
3841 @@ -1132,6 +1135,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
3842 .put_link = ecryptfs_put_link,
3843 .permission = ecryptfs_permission,
3844 .setattr = ecryptfs_setattr,
3845 + .getattr = ecryptfs_getattr_link,
3846 .setxattr = ecryptfs_setxattr,
3847 .getxattr = ecryptfs_getxattr,
3848 .listxattr = ecryptfs_listxattr,
3849 diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
3850 index b15a43a..1a037f7 100644
3851 --- a/fs/ecryptfs/super.c
3852 +++ b/fs/ecryptfs/super.c
3853 @@ -85,7 +85,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
3854 if (lower_dentry->d_inode) {
3855 fput(inode_info->lower_file);
3856 inode_info->lower_file = NULL;
3857 - d_drop(lower_dentry);
3858 }
3859 }
3860 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
3861 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3862 index 874d169..602d5ad 100644
3863 --- a/fs/ext4/ext4.h
3864 +++ b/fs/ext4/ext4.h
3865 @@ -139,8 +139,8 @@ typedef struct ext4_io_end {
3866 struct inode *inode; /* file being written to */
3867 unsigned int flag; /* unwritten or not */
3868 int error; /* I/O error code */
3869 - ext4_lblk_t offset; /* offset in the file */
3870 - size_t size; /* size of the extent */
3871 + loff_t offset; /* offset in the file */
3872 + ssize_t size; /* size of the extent */
3873 struct work_struct work; /* data work queue */
3874 } ext4_io_end_t;
3875
3876 @@ -1744,7 +1744,7 @@ extern void ext4_ext_release(struct super_block *);
3877 extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
3878 loff_t len);
3879 extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3880 - loff_t len);
3881 + ssize_t len);
3882 extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
3883 sector_t block, unsigned int max_blocks,
3884 struct buffer_head *bh, int flags);
3885 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3886 index 765a482..c568779 100644
3887 --- a/fs/ext4/extents.c
3888 +++ b/fs/ext4/extents.c
3889 @@ -3603,7 +3603,7 @@ retry:
3890 * Returns 0 on success.
3891 */
3892 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3893 - loff_t len)
3894 + ssize_t len)
3895 {
3896 handle_t *handle;
3897 ext4_lblk_t block;
3898 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3899 index e119524..2059c34 100644
3900 --- a/fs/ext4/inode.c
3901 +++ b/fs/ext4/inode.c
3902 @@ -3551,7 +3551,7 @@ static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
3903 {
3904 struct inode *inode = io->inode;
3905 loff_t offset = io->offset;
3906 - size_t size = io->size;
3907 + ssize_t size = io->size;
3908 int ret = 0;
3909
3910 ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
3911 diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
3912 index f565f24..72646e2 100644
3913 --- a/fs/fat/namei_vfat.c
3914 +++ b/fs/fat/namei_vfat.c
3915 @@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3916 {
3917 struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
3918 wchar_t *ip, *ext_start, *end, *name_start;
3919 - unsigned char base[9], ext[4], buf[8], *p;
3920 + unsigned char base[9], ext[4], buf[5], *p;
3921 unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
3922 int chl, chi;
3923 int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
3924 @@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3925 return 0;
3926 }
3927
3928 - i = jiffies & 0xffff;
3929 + i = jiffies;
3930 sz = (jiffies >> 16) & 0x7;
3931 if (baselen > 2) {
3932 baselen = numtail2_baselen;
3933 @@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
3934 name_res[baselen + 4] = '~';
3935 name_res[baselen + 5] = '1' + sz;
3936 while (1) {
3937 - sprintf(buf, "%04X", i);
3938 + snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
3939 memcpy(&name_res[baselen], buf, 4);
3940 if (vfat_find_form(dir, name_res) < 0)
3941 break;
3942 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
3943 index ee77713..bd39abc 100644
3944 --- a/fs/nfs/client.c
3945 +++ b/fs/nfs/client.c
3946 @@ -1293,7 +1293,8 @@ static int nfs4_init_server(struct nfs_server *server,
3947
3948 /* Initialise the client representation from the mount data */
3949 server->flags = data->flags;
3950 - server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
3951 + server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
3952 + NFS_CAP_POSIX_LOCK;
3953 server->options = data->options;
3954
3955 /* Get a client record */
3956 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3957 index 8b5382e..af6948d 100644
3958 --- a/fs/nfs/dir.c
3959 +++ b/fs/nfs/dir.c
3960 @@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
3961 res = NULL;
3962 goto out;
3963 /* This turned out not to be a regular file */
3964 + case -EISDIR:
3965 case -ENOTDIR:
3966 goto no_open;
3967 case -ELOOP:
3968 if (!(nd->intent.open.flags & O_NOFOLLOW))
3969 goto no_open;
3970 - /* case -EISDIR: */
3971 /* case -EINVAL: */
3972 default:
3973 goto out;
3974 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3975 index 375f0fa..ecf6602 100644
3976 --- a/fs/nfs/nfs4proc.c
3977 +++ b/fs/nfs/nfs4proc.c
3978 @@ -1520,6 +1520,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
3979 nfs_post_op_update_inode(dir, o_res->dir_attr);
3980 } else
3981 nfs_refresh_inode(dir, o_res->dir_attr);
3982 + if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
3983 + server->caps &= ~NFS_CAP_POSIX_LOCK;
3984 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
3985 status = _nfs4_proc_open_confirm(data);
3986 if (status != 0)
3987 @@ -1660,7 +1662,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
3988 status = PTR_ERR(state);
3989 if (IS_ERR(state))
3990 goto err_opendata_put;
3991 - if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
3992 + if (server->caps & NFS_CAP_POSIX_LOCK)
3993 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3994 nfs4_opendata_put(opendata);
3995 nfs4_put_state_owner(sp);
3996 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
3997 index a8587e9..bbf72d8 100644
3998 --- a/fs/nfsd/nfs4xdr.c
3999 +++ b/fs/nfsd/nfs4xdr.c
4000 @@ -2121,9 +2121,15 @@ out_acl:
4001 * and this is the root of a cross-mounted filesystem.
4002 */
4003 if (ignore_crossmnt == 0 &&
4004 - exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
4005 - err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
4006 - exp->ex_path.mnt->mnt_mountpoint, &stat);
4007 + dentry == exp->ex_path.mnt->mnt_root) {
4008 + struct path path = exp->ex_path;
4009 + path_get(&path);
4010 + while (follow_up(&path)) {
4011 + if (path.dentry != path.mnt->mnt_root)
4012 + break;
4013 + }
4014 + err = vfs_getattr(path.mnt, path.dentry, &stat);
4015 + path_put(&path);
4016 if (err)
4017 goto out_nfserr;
4018 }
4019 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
4020 index 0501974..8ccf0f8 100644
4021 --- a/fs/ocfs2/acl.c
4022 +++ b/fs/ocfs2/acl.c
4023 @@ -30,6 +30,8 @@
4024 #include "alloc.h"
4025 #include "dlmglue.h"
4026 #include "file.h"
4027 +#include "inode.h"
4028 +#include "journal.h"
4029 #include "ocfs2_fs.h"
4030
4031 #include "xattr.h"
4032 @@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
4033 }
4034
4035 /*
4036 + * Helper function to set i_mode in memory and disk. Some call paths
4037 + * will not have di_bh or a journal handle to pass, in which case it
4038 + * will create it's own.
4039 + */
4040 +static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
4041 + handle_t *handle, umode_t new_mode)
4042 +{
4043 + int ret, commit_handle = 0;
4044 + struct ocfs2_dinode *di;
4045 +
4046 + if (di_bh == NULL) {
4047 + ret = ocfs2_read_inode_block(inode, &di_bh);
4048 + if (ret) {
4049 + mlog_errno(ret);
4050 + goto out;
4051 + }
4052 + } else
4053 + get_bh(di_bh);
4054 +
4055 + if (handle == NULL) {
4056 + handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
4057 + OCFS2_INODE_UPDATE_CREDITS);
4058 + if (IS_ERR(handle)) {
4059 + ret = PTR_ERR(handle);
4060 + mlog_errno(ret);
4061 + goto out_brelse;
4062 + }
4063 +
4064 + commit_handle = 1;
4065 + }
4066 +
4067 + di = (struct ocfs2_dinode *)di_bh->b_data;
4068 + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
4069 + OCFS2_JOURNAL_ACCESS_WRITE);
4070 + if (ret) {
4071 + mlog_errno(ret);
4072 + goto out_commit;
4073 + }
4074 +
4075 + inode->i_mode = new_mode;
4076 + di->i_mode = cpu_to_le16(inode->i_mode);
4077 +
4078 + ocfs2_journal_dirty(handle, di_bh);
4079 +
4080 +out_commit:
4081 + if (commit_handle)
4082 + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
4083 +out_brelse:
4084 + brelse(di_bh);
4085 +out:
4086 + return ret;
4087 +}
4088 +
4089 +/*
4090 * Set the access or default ACL of an inode.
4091 */
4092 static int ocfs2_set_acl(handle_t *handle,
4093 @@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle,
4094 if (ret < 0)
4095 return ret;
4096 else {
4097 - inode->i_mode = mode;
4098 if (ret == 0)
4099 acl = NULL;
4100 +
4101 + ret = ocfs2_acl_set_mode(inode, di_bh,
4102 + handle, mode);
4103 + if (ret)
4104 + return ret;
4105 +
4106 }
4107 }
4108 break;
4109 @@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
4110 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4111 struct posix_acl *acl = NULL;
4112 int ret = 0;
4113 + mode_t mode;
4114
4115 if (!S_ISLNK(inode->i_mode)) {
4116 if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
4117 @@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle,
4118 if (IS_ERR(acl))
4119 return PTR_ERR(acl);
4120 }
4121 - if (!acl)
4122 - inode->i_mode &= ~current_umask();
4123 + if (!acl) {
4124 + mode = inode->i_mode & ~current_umask();
4125 + ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
4126 + if (ret) {
4127 + mlog_errno(ret);
4128 + goto cleanup;
4129 + }
4130 + }
4131 }
4132 if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
4133 struct posix_acl *clone;
4134 - mode_t mode;
4135
4136 if (S_ISDIR(inode->i_mode)) {
4137 ret = ocfs2_set_acl(handle, inode, di_bh,
4138 @@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle,
4139 mode = inode->i_mode;
4140 ret = posix_acl_create_masq(clone, &mode);
4141 if (ret >= 0) {
4142 - inode->i_mode = mode;
4143 + ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
4144 if (ret > 0) {
4145 ret = ocfs2_set_acl(handle, inode,
4146 di_bh, ACL_TYPE_ACCESS,
4147 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
4148 index c30b644..79b5dac 100644
4149 --- a/fs/ocfs2/suballoc.c
4150 +++ b/fs/ocfs2/suballoc.c
4151 @@ -152,7 +152,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
4152
4153 #define do_error(fmt, ...) \
4154 do{ \
4155 - if (clean_error) \
4156 + if (resize) \
4157 mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
4158 else \
4159 ocfs2_error(sb, fmt, ##__VA_ARGS__); \
4160 @@ -160,7 +160,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
4161
4162 static int ocfs2_validate_gd_self(struct super_block *sb,
4163 struct buffer_head *bh,
4164 - int clean_error)
4165 + int resize)
4166 {
4167 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
4168
4169 @@ -211,7 +211,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
4170 static int ocfs2_validate_gd_parent(struct super_block *sb,
4171 struct ocfs2_dinode *di,
4172 struct buffer_head *bh,
4173 - int clean_error)
4174 + int resize)
4175 {
4176 unsigned int max_bits;
4177 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
4178 @@ -233,8 +233,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
4179 return -EINVAL;
4180 }
4181
4182 - if (le16_to_cpu(gd->bg_chain) >=
4183 - le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
4184 + /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
4185 + if ((le16_to_cpu(gd->bg_chain) >
4186 + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
4187 + ((le16_to_cpu(gd->bg_chain) ==
4188 + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
4189 do_error("Group descriptor #%llu has bad chain %u",
4190 (unsigned long long)bh->b_blocknr,
4191 le16_to_cpu(gd->bg_chain));
4192 diff --git a/fs/proc/base.c b/fs/proc/base.c
4193 index 58324c2..3cd449d 100644
4194 --- a/fs/proc/base.c
4195 +++ b/fs/proc/base.c
4196 @@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = {
4197 unsigned long badness(struct task_struct *p, unsigned long uptime);
4198 static int proc_oom_score(struct task_struct *task, char *buffer)
4199 {
4200 - unsigned long points;
4201 + unsigned long points = 0;
4202 struct timespec uptime;
4203
4204 do_posix_clock_monotonic_gettime(&uptime);
4205 read_lock(&tasklist_lock);
4206 - points = badness(task->group_leader, uptime.tv_sec);
4207 + if (pid_alive(task))
4208 + points = badness(task, uptime.tv_sec);
4209 read_unlock(&tasklist_lock);
4210 return sprintf(buffer, "%lu\n", points);
4211 }
4212 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
4213 index 6e722c1..6c9da00 100644
4214 --- a/fs/quota/dquot.c
4215 +++ b/fs/quota/dquot.c
4216 @@ -2321,34 +2321,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
4217 if (di->dqb_valid & QIF_SPACE) {
4218 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
4219 check_blim = 1;
4220 - __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
4221 + set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
4222 }
4223 if (di->dqb_valid & QIF_BLIMITS) {
4224 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
4225 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
4226 check_blim = 1;
4227 - __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
4228 + set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
4229 }
4230 if (di->dqb_valid & QIF_INODES) {
4231 dm->dqb_curinodes = di->dqb_curinodes;
4232 check_ilim = 1;
4233 - __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
4234 + set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
4235 }
4236 if (di->dqb_valid & QIF_ILIMITS) {
4237 dm->dqb_isoftlimit = di->dqb_isoftlimit;
4238 dm->dqb_ihardlimit = di->dqb_ihardlimit;
4239 check_ilim = 1;
4240 - __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
4241 + set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
4242 }
4243 if (di->dqb_valid & QIF_BTIME) {
4244 dm->dqb_btime = di->dqb_btime;
4245 check_blim = 1;
4246 - __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
4247 + set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
4248 }
4249 if (di->dqb_valid & QIF_ITIME) {
4250 dm->dqb_itime = di->dqb_itime;
4251 check_ilim = 1;
4252 - __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
4253 + set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
4254 }
4255
4256 if (check_blim) {
4257 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
4258 index b4a7dd0..33bc410 100644
4259 --- a/fs/reiserfs/super.c
4260 +++ b/fs/reiserfs/super.c
4261 @@ -1619,10 +1619,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
4262 save_mount_options(s, data);
4263
4264 sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
4265 - if (!sbi) {
4266 - errval = -ENOMEM;
4267 - goto error_alloc;
4268 - }
4269 + if (!sbi)
4270 + return -ENOMEM;
4271 s->s_fs_info = sbi;
4272 /* Set default values for options: non-aggressive tails, RO on errors */
4273 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
4274 @@ -1879,12 +1877,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
4275 return (0);
4276
4277 error:
4278 - reiserfs_write_unlock(s);
4279 -error_alloc:
4280 if (jinit_done) { /* kill the commit thread, free journal ram */
4281 journal_release_error(NULL, s);
4282 }
4283
4284 + reiserfs_write_unlock(s);
4285 +
4286 reiserfs_free_bitmap_cache(s);
4287 if (SB_BUFFER_WITH_SB(s))
4288 brelse(SB_BUFFER_WITH_SB(s));
4289 diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
4290 index 66abe36..1c65a2b 100644
4291 --- a/fs/xfs/linux-2.6/xfs_aops.c
4292 +++ b/fs/xfs/linux-2.6/xfs_aops.c
4293 @@ -163,14 +163,17 @@ xfs_ioend_new_eof(
4294 }
4295
4296 /*
4297 - * Update on-disk file size now that data has been written to disk.
4298 - * The current in-memory file size is i_size. If a write is beyond
4299 - * eof i_new_size will be the intended file size until i_size is
4300 - * updated. If this write does not extend all the way to the valid
4301 - * file size then restrict this update to the end of the write.
4302 + * Update on-disk file size now that data has been written to disk. The
4303 + * current in-memory file size is i_size. If a write is beyond eof i_new_size
4304 + * will be the intended file size until i_size is updated. If this write does
4305 + * not extend all the way to the valid file size then restrict this update to
4306 + * the end of the write.
4307 + *
4308 + * This function does not block as blocking on the inode lock in IO completion
4309 + * can lead to IO completion order dependency deadlocks.. If it can't get the
4310 + * inode ilock it will return EAGAIN. Callers must handle this.
4311 */
4312 -
4313 -STATIC void
4314 +STATIC int
4315 xfs_setfilesize(
4316 xfs_ioend_t *ioend)
4317 {
4318 @@ -181,9 +184,11 @@ xfs_setfilesize(
4319 ASSERT(ioend->io_type != IOMAP_READ);
4320
4321 if (unlikely(ioend->io_error))
4322 - return;
4323 + return 0;
4324 +
4325 + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
4326 + return EAGAIN;
4327
4328 - xfs_ilock(ip, XFS_ILOCK_EXCL);
4329 isize = xfs_ioend_new_eof(ioend);
4330 if (isize) {
4331 ip->i_d.di_size = isize;
4332 @@ -191,6 +196,28 @@ xfs_setfilesize(
4333 }
4334
4335 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4336 + return 0;
4337 +}
4338 +
4339 +/*
4340 + * Schedule IO completion handling on a xfsdatad if this was
4341 + * the final hold on this ioend. If we are asked to wait,
4342 + * flush the workqueue.
4343 + */
4344 +STATIC void
4345 +xfs_finish_ioend(
4346 + xfs_ioend_t *ioend,
4347 + int wait)
4348 +{
4349 + if (atomic_dec_and_test(&ioend->io_remaining)) {
4350 + struct workqueue_struct *wq;
4351 +
4352 + wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
4353 + xfsconvertd_workqueue : xfsdatad_workqueue;
4354 + queue_work(wq, &ioend->io_work);
4355 + if (wait)
4356 + flush_workqueue(wq);
4357 + }
4358 }
4359
4360 /*
4361 @@ -198,11 +225,11 @@ xfs_setfilesize(
4362 */
4363 STATIC void
4364 xfs_end_io(
4365 - struct work_struct *work)
4366 + struct work_struct *work)
4367 {
4368 - xfs_ioend_t *ioend =
4369 - container_of(work, xfs_ioend_t, io_work);
4370 - struct xfs_inode *ip = XFS_I(ioend->io_inode);
4371 + xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
4372 + struct xfs_inode *ip = XFS_I(ioend->io_inode);
4373 + int error;
4374
4375 /*
4376 * For unwritten extents we need to issue transactions to convert a
4377 @@ -210,7 +237,6 @@ xfs_end_io(
4378 */
4379 if (ioend->io_type == IOMAP_UNWRITTEN &&
4380 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
4381 - int error;
4382
4383 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
4384 ioend->io_size);
4385 @@ -222,30 +248,23 @@ xfs_end_io(
4386 * We might have to update the on-disk file size after extending
4387 * writes.
4388 */
4389 - if (ioend->io_type != IOMAP_READ)
4390 - xfs_setfilesize(ioend);
4391 - xfs_destroy_ioend(ioend);
4392 -}
4393 -
4394 -/*
4395 - * Schedule IO completion handling on a xfsdatad if this was
4396 - * the final hold on this ioend. If we are asked to wait,
4397 - * flush the workqueue.
4398 - */
4399 -STATIC void
4400 -xfs_finish_ioend(
4401 - xfs_ioend_t *ioend,
4402 - int wait)
4403 -{
4404 - if (atomic_dec_and_test(&ioend->io_remaining)) {
4405 - struct workqueue_struct *wq;
4406 -
4407 - wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
4408 - xfsconvertd_workqueue : xfsdatad_workqueue;
4409 - queue_work(wq, &ioend->io_work);
4410 - if (wait)
4411 - flush_workqueue(wq);
4412 + if (ioend->io_type != IOMAP_READ) {
4413 + error = xfs_setfilesize(ioend);
4414 + ASSERT(!error || error == EAGAIN);
4415 }
4416 +
4417 + /*
4418 + * If we didn't complete processing of the ioend, requeue it to the
4419 + * tail of the workqueue for another attempt later. Otherwise destroy
4420 + * it.
4421 + */
4422 + if (error == EAGAIN) {
4423 + atomic_inc(&ioend->io_remaining);
4424 + xfs_finish_ioend(ioend, 0);
4425 + /* ensure we don't spin on blocked ioends */
4426 + delay(1);
4427 + } else
4428 + xfs_destroy_ioend(ioend);
4429 }
4430
4431 /*
4432 diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
4433 index 1f5e4bb..6b6b394 100644
4434 --- a/fs/xfs/linux-2.6/xfs_sync.c
4435 +++ b/fs/xfs/linux-2.6/xfs_sync.c
4436 @@ -613,7 +613,8 @@ xfssyncd(
4437 set_freezable();
4438 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
4439 for (;;) {
4440 - timeleft = schedule_timeout_interruptible(timeleft);
4441 + if (list_empty(&mp->m_sync_list))
4442 + timeleft = schedule_timeout_interruptible(timeleft);
4443 /* swsusp */
4444 try_to_freeze();
4445 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
4446 @@ -633,8 +634,7 @@ xfssyncd(
4447 list_add_tail(&mp->m_sync_work.w_list,
4448 &mp->m_sync_list);
4449 }
4450 - list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
4451 - list_move(&work->w_list, &tmp);
4452 + list_splice_init(&mp->m_sync_list, &tmp);
4453 spin_unlock(&mp->m_sync_lock);
4454
4455 list_for_each_entry_safe(work, n, &tmp, w_list) {
4456 @@ -693,12 +693,12 @@ xfs_inode_set_reclaim_tag(
4457 xfs_mount_t *mp = ip->i_mount;
4458 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
4459
4460 - read_lock(&pag->pag_ici_lock);
4461 + write_lock(&pag->pag_ici_lock);
4462 spin_lock(&ip->i_flags_lock);
4463 __xfs_inode_set_reclaim_tag(pag, ip);
4464 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
4465 spin_unlock(&ip->i_flags_lock);
4466 - read_unlock(&pag->pag_ici_lock);
4467 + write_unlock(&pag->pag_ici_lock);
4468 xfs_put_perag(mp, pag);
4469 }
4470
4471 diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
4472 index 155e798..fd21160 100644
4473 --- a/fs/xfs/xfs_iget.c
4474 +++ b/fs/xfs/xfs_iget.c
4475 @@ -190,13 +190,12 @@ xfs_iget_cache_hit(
4476 trace_xfs_iget_reclaim(ip);
4477
4478 /*
4479 - * We need to set XFS_INEW atomically with clearing the
4480 - * reclaimable tag so that we do have an indicator of the
4481 - * inode still being initialized.
4482 + * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
4483 + * from stomping over us while we recycle the inode. We can't
4484 + * clear the radix tree reclaimable tag yet as it requires
4485 + * pag_ici_lock to be held exclusive.
4486 */
4487 - ip->i_flags |= XFS_INEW;
4488 - ip->i_flags &= ~XFS_IRECLAIMABLE;
4489 - __xfs_inode_clear_reclaim_tag(mp, pag, ip);
4490 + ip->i_flags |= XFS_IRECLAIM;
4491
4492 spin_unlock(&ip->i_flags_lock);
4493 read_unlock(&pag->pag_ici_lock);
4494 @@ -216,7 +215,15 @@ xfs_iget_cache_hit(
4495 trace_xfs_iget_reclaim(ip);
4496 goto out_error;
4497 }
4498 +
4499 + write_lock(&pag->pag_ici_lock);
4500 + spin_lock(&ip->i_flags_lock);
4501 + ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
4502 + ip->i_flags |= XFS_INEW;
4503 + __xfs_inode_clear_reclaim_tag(mp, pag, ip);
4504 inode->i_state = I_NEW;
4505 + spin_unlock(&ip->i_flags_lock);
4506 + write_unlock(&pag->pag_ici_lock);
4507 } else {
4508 /* If the VFS inode is being torn down, pause and try again. */
4509 if (!igrab(inode)) {
4510 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
4511 index e6f3b12..0cbdccc 100644
4512 --- a/include/drm/drm_pciids.h
4513 +++ b/include/drm/drm_pciids.h
4514 @@ -6,6 +6,7 @@
4515 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
4516 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4517 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4518 + {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
4519 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
4520 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
4521 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
4522 @@ -375,6 +376,7 @@
4523 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4524 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4525 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4526 + {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
4527 {0, 0, 0}
4528
4529 #define r128_PCI_IDS \
4530 diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
4531 index aa95508..2c445e1 100644
4532 --- a/include/linux/dm-ioctl.h
4533 +++ b/include/linux/dm-ioctl.h
4534 @@ -266,9 +266,9 @@ enum {
4535 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
4536
4537 #define DM_VERSION_MAJOR 4
4538 -#define DM_VERSION_MINOR 16
4539 +#define DM_VERSION_MINOR 17
4540 #define DM_VERSION_PATCHLEVEL 0
4541 -#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
4542 +#define DM_VERSION_EXTRA "-ioctl (2010-03-05)"
4543
4544 /* Status bits */
4545 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
4546 @@ -316,4 +316,9 @@ enum {
4547 */
4548 #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
4549
4550 +/*
4551 + * If set, a uevent was generated for which the caller may need to wait.
4552 + */
4553 +#define DM_UEVENT_GENERATED_FLAG (1 << 13) /* Out */
4554 +
4555 #endif /* _LINUX_DM_IOCTL_H */
4556 diff --git a/include/linux/freezer.h b/include/linux/freezer.h
4557 index 5a361f8..da7e52b 100644
4558 --- a/include/linux/freezer.h
4559 +++ b/include/linux/freezer.h
4560 @@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
4561 extern void cancel_freezing(struct task_struct *p);
4562
4563 #ifdef CONFIG_CGROUP_FREEZER
4564 -extern int cgroup_frozen(struct task_struct *task);
4565 +extern int cgroup_freezing_or_frozen(struct task_struct *task);
4566 #else /* !CONFIG_CGROUP_FREEZER */
4567 -static inline int cgroup_frozen(struct task_struct *task) { return 0; }
4568 +static inline int cgroup_freezing_or_frozen(struct task_struct *task)
4569 +{
4570 + return 0;
4571 +}
4572 #endif /* !CONFIG_CGROUP_FREEZER */
4573
4574 /*
4575 diff --git a/include/linux/fs.h b/include/linux/fs.h
4576 index f2f68ce..66b0705 100644
4577 --- a/include/linux/fs.h
4578 +++ b/include/linux/fs.h
4579 @@ -2214,6 +2214,7 @@ extern int generic_segment_checks(const struct iovec *iov,
4580 /* fs/block_dev.c */
4581 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
4582 unsigned long nr_segs, loff_t pos);
4583 +extern int block_fsync(struct file *filp, struct dentry *dentry, int datasync);
4584
4585 /* fs/splice.c */
4586 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
4587 diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
4588 index ece0b1c..e117b1a 100644
4589 --- a/include/linux/kfifo.h
4590 +++ b/include/linux/kfifo.h
4591 @@ -86,7 +86,8 @@ union { \
4592 */
4593 #define INIT_KFIFO(name) \
4594 name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
4595 - sizeof(struct kfifo), name##kfifo_buffer)
4596 + sizeof(struct kfifo), \
4597 + name##kfifo_buffer + sizeof(struct kfifo))
4598
4599 /**
4600 * DEFINE_KFIFO - macro to define and initialize a kfifo
4601 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4602 index bd5a616..1fe293e 100644
4603 --- a/include/linux/kvm_host.h
4604 +++ b/include/linux/kvm_host.h
4605 @@ -53,7 +53,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
4606 */
4607 struct kvm_io_bus {
4608 int dev_count;
4609 -#define NR_IOBUS_DEVS 6
4610 +#define NR_IOBUS_DEVS 200
4611 struct kvm_io_device *devs[NR_IOBUS_DEVS];
4612 };
4613
4614 @@ -116,6 +116,11 @@ struct kvm_memory_slot {
4615 int user_alloc;
4616 };
4617
4618 +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
4619 +{
4620 + return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
4621 +}
4622 +
4623 struct kvm_kernel_irq_routing_entry {
4624 u32 gsi;
4625 u32 type;
4626 diff --git a/include/linux/module.h b/include/linux/module.h
4627 index 6cb1a3c..bd465d4 100644
4628 --- a/include/linux/module.h
4629 +++ b/include/linux/module.h
4630 @@ -457,7 +457,7 @@ void symbol_put_addr(void *addr);
4631 static inline local_t *__module_ref_addr(struct module *mod, int cpu)
4632 {
4633 #ifdef CONFIG_SMP
4634 - return (local_t *) (mod->refptr + per_cpu_offset(cpu));
4635 + return (local_t *) per_cpu_ptr(mod->refptr, cpu);
4636 #else
4637 return &mod->ref;
4638 #endif
4639 diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
4640 index 34fc6be..ebc4809 100644
4641 --- a/include/linux/nfs_fs_sb.h
4642 +++ b/include/linux/nfs_fs_sb.h
4643 @@ -176,6 +176,7 @@ struct nfs_server {
4644 #define NFS_CAP_ATIME (1U << 11)
4645 #define NFS_CAP_CTIME (1U << 12)
4646 #define NFS_CAP_MTIME (1U << 13)
4647 +#define NFS_CAP_POSIX_LOCK (1U << 14)
4648
4649
4650 /* maximum number of slots to use */
4651 diff --git a/include/linux/pci.h b/include/linux/pci.h
4652 index c1968f4..0afb527 100644
4653 --- a/include/linux/pci.h
4654 +++ b/include/linux/pci.h
4655 @@ -959,6 +959,11 @@ static inline int pci_proc_domain(struct pci_bus *bus)
4656 }
4657 #endif /* CONFIG_PCI_DOMAINS */
4658
4659 +/* some architectures require additional setup to direct VGA traffic */
4660 +typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
4661 + unsigned int command_bits, bool change_bridge);
4662 +extern void pci_register_set_vga_state(arch_set_vga_state_t func);
4663 +
4664 #else /* CONFIG_PCI is not enabled */
4665
4666 /*
4667 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4668 index cca8a04..0be8243 100644
4669 --- a/include/linux/pci_ids.h
4670 +++ b/include/linux/pci_ids.h
4671 @@ -2417,6 +2417,9 @@
4672 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
4673 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
4674 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
4675 +#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
4676 +#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
4677 +#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
4678 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
4679 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
4680 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
4681 diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
4682 index 59e9ef6..eb3f34d 100644
4683 --- a/kernel/cgroup_freezer.c
4684 +++ b/kernel/cgroup_freezer.c
4685 @@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
4686 struct freezer, css);
4687 }
4688
4689 -int cgroup_frozen(struct task_struct *task)
4690 +int cgroup_freezing_or_frozen(struct task_struct *task)
4691 {
4692 struct freezer *freezer;
4693 enum freezer_state state;
4694
4695 task_lock(task);
4696 freezer = task_freezer(task);
4697 - state = freezer->state;
4698 + if (!freezer->css.cgroup->parent)
4699 + state = CGROUP_THAWED; /* root cgroup can't be frozen */
4700 + else
4701 + state = freezer->state;
4702 task_unlock(task);
4703
4704 - return state == CGROUP_FROZEN;
4705 + return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
4706 }
4707
4708 /*
4709 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4710 index 69a3d7b..0b23ff7 100644
4711 --- a/kernel/irq/manage.c
4712 +++ b/kernel/irq/manage.c
4713 @@ -753,6 +753,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
4714 if (new->flags & IRQF_ONESHOT)
4715 desc->status |= IRQ_ONESHOT;
4716
4717 + /*
4718 + * Force MSI interrupts to run with interrupts
4719 + * disabled. The multi vector cards can cause stack
4720 + * overflows due to nested interrupts when enough of
4721 + * them are directed to a core and fire at the same
4722 + * time.
4723 + */
4724 + if (desc->msi_desc)
4725 + new->flags |= IRQF_DISABLED;
4726 +
4727 if (!(desc->status & IRQ_NOAUTOEN)) {
4728 desc->depth = 0;
4729 desc->status &= ~IRQ_DISABLED;
4730 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
4731 index c62ec14..493a0ef 100644
4732 --- a/kernel/lockdep.c
4733 +++ b/kernel/lockdep.c
4734 @@ -600,9 +600,9 @@ static int static_obj(void *obj)
4735 * percpu var?
4736 */
4737 for_each_possible_cpu(i) {
4738 - start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
4739 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
4740 - + per_cpu_offset(i);
4741 + start = (unsigned long) per_cpu_ptr(&__per_cpu_start, i);
4742 + end = (unsigned long) per_cpu_ptr(&__per_cpu_start, i)
4743 + + PERCPU_ENOUGH_ROOM;
4744
4745 if ((addr >= start) && (addr < end))
4746 return 1;
4747 diff --git a/kernel/module.c b/kernel/module.c
4748 index f82386b..5b6ce39 100644
4749 --- a/kernel/module.c
4750 +++ b/kernel/module.c
4751 @@ -405,7 +405,7 @@ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
4752 int cpu;
4753
4754 for_each_possible_cpu(cpu)
4755 - memcpy(pcpudest + per_cpu_offset(cpu), from, size);
4756 + memcpy(per_cpu_ptr(pcpudest, cpu), from, size);
4757 }
4758
4759 #else /* ... !CONFIG_SMP */
4760 diff --git a/kernel/power/process.c b/kernel/power/process.c
4761 index 5ade1bd..de53015 100644
4762 --- a/kernel/power/process.c
4763 +++ b/kernel/power/process.c
4764 @@ -145,7 +145,7 @@ static void thaw_tasks(bool nosig_only)
4765 if (nosig_only && should_send_signal(p))
4766 continue;
4767
4768 - if (cgroup_frozen(p))
4769 + if (cgroup_freezing_or_frozen(p))
4770 continue;
4771
4772 thaw_process(p);
4773 diff --git a/kernel/sched.c b/kernel/sched.c
4774 index 7ca9345..da19c1e 100644
4775 --- a/kernel/sched.c
4776 +++ b/kernel/sched.c
4777 @@ -6717,7 +6717,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4778 int ret;
4779 cpumask_var_t mask;
4780
4781 - if (len < cpumask_size())
4782 + if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4783 + return -EINVAL;
4784 + if (len & (sizeof(unsigned long)-1))
4785 return -EINVAL;
4786
4787 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4788 @@ -6725,10 +6727,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4789
4790 ret = sched_getaffinity(pid, mask);
4791 if (ret == 0) {
4792 - if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
4793 + size_t retlen = min_t(size_t, len, cpumask_size());
4794 +
4795 + if (copy_to_user(user_mask_ptr, mask, retlen))
4796 ret = -EFAULT;
4797 else
4798 - ret = cpumask_size();
4799 + ret = retlen;
4800 }
4801 free_cpumask_var(mask);
4802
4803 diff --git a/mm/readahead.c b/mm/readahead.c
4804 index 337b20e..fe1a069 100644
4805 --- a/mm/readahead.c
4806 +++ b/mm/readahead.c
4807 @@ -502,7 +502,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
4808 return;
4809
4810 /* be dumb */
4811 - if (filp->f_mode & FMODE_RANDOM) {
4812 + if (filp && (filp->f_mode & FMODE_RANDOM)) {
4813 force_page_cache_readahead(mapping, filp, offset, req_size);
4814 return;
4815 }
4816 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
4817 index 6a43314..ba1fadb 100644
4818 --- a/net/mac80211/mesh.c
4819 +++ b/net/mac80211/mesh.c
4820 @@ -749,9 +749,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
4821
4822 switch (fc & IEEE80211_FCTL_STYPE) {
4823 case IEEE80211_STYPE_ACTION:
4824 - if (skb->len < IEEE80211_MIN_ACTION_SIZE)
4825 - return RX_DROP_MONITOR;
4826 - /* fall through */
4827 case IEEE80211_STYPE_PROBE_RESP:
4828 case IEEE80211_STYPE_BEACON:
4829 skb_queue_tail(&ifmsh->skb_queue, skb);
4830 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
4831 index d28acb6..4eed81b 100644
4832 --- a/net/mac80211/mesh_hwmp.c
4833 +++ b/net/mac80211/mesh_hwmp.c
4834 @@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
4835 if (SN_GT(mpath->sn, orig_sn) ||
4836 (mpath->sn == orig_sn &&
4837 action == MPATH_PREQ &&
4838 - new_metric > mpath->metric)) {
4839 + new_metric >= mpath->metric)) {
4840 process = false;
4841 fresh_info = false;
4842 }
4843 @@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
4844
4845 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
4846 cpu_to_le32(orig_sn), 0, target_addr,
4847 - cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
4848 + cpu_to_le32(target_sn), next_hop, hopcount,
4849 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
4850 0, sdata);
4851 rcu_read_unlock();
4852 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4853 index da92cde..edfa036 100644
4854 --- a/net/mac80211/rx.c
4855 +++ b/net/mac80211/rx.c
4856 @@ -2355,6 +2355,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
4857 /* should never get here */
4858 WARN_ON(1);
4859 break;
4860 + case MESH_PLINK_CATEGORY:
4861 + case MESH_PATH_SEL_CATEGORY:
4862 + if (ieee80211_vif_is_mesh(&sdata->vif))
4863 + return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
4864 + break;
4865 }
4866
4867 return 1;
4868 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4869 index 70c79c3..1fdc0a5 100644
4870 --- a/net/mac80211/tx.c
4871 +++ b/net/mac80211/tx.c
4872 @@ -1945,6 +1945,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
4873 void ieee80211_tx_pending(unsigned long data)
4874 {
4875 struct ieee80211_local *local = (struct ieee80211_local *)data;
4876 + struct ieee80211_sub_if_data *sdata;
4877 unsigned long flags;
4878 int i;
4879 bool txok;
4880 @@ -1983,6 +1984,11 @@ void ieee80211_tx_pending(unsigned long data)
4881 if (!txok)
4882 break;
4883 }
4884 +
4885 + if (skb_queue_empty(&local->pending[i]))
4886 + list_for_each_entry_rcu(sdata, &local->interfaces, list)
4887 + netif_tx_wake_queue(
4888 + netdev_get_tx_queue(sdata->dev, i));
4889 }
4890 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
4891
4892 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
4893 index 3848140..27212e8 100644
4894 --- a/net/mac80211/util.c
4895 +++ b/net/mac80211/util.c
4896 @@ -280,13 +280,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
4897 /* someone still has this queue stopped */
4898 return;
4899
4900 - if (!skb_queue_empty(&local->pending[queue]))
4901 + if (skb_queue_empty(&local->pending[queue])) {
4902 + rcu_read_lock();
4903 + list_for_each_entry_rcu(sdata, &local->interfaces, list)
4904 + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
4905 + rcu_read_unlock();
4906 + } else
4907 tasklet_schedule(&local->tx_pending_tasklet);
4908 -
4909 - rcu_read_lock();
4910 - list_for_each_entry_rcu(sdata, &local->interfaces, list)
4911 - netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
4912 - rcu_read_unlock();
4913 }
4914
4915 void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
4916 @@ -1145,6 +1145,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
4917 }
4918 }
4919
4920 + rcu_read_lock();
4921 + if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
4922 + list_for_each_entry_rcu(sta, &local->sta_list, list) {
4923 + ieee80211_sta_tear_down_BA_sessions(sta);
4924 + }
4925 + }
4926 + rcu_read_unlock();
4927 +
4928 /* add back keys */
4929 list_for_each_entry(sdata, &local->interfaces, list)
4930 if (netif_running(sdata->dev))
4931 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4932 index 9ace8eb..062a8b0 100644
4933 --- a/sound/pci/hda/hda_intel.c
4934 +++ b/sound/pci/hda/hda_intel.c
4935 @@ -125,6 +125,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
4936 "{Intel, ICH9},"
4937 "{Intel, ICH10},"
4938 "{Intel, PCH},"
4939 + "{Intel, CPT},"
4940 "{Intel, SCH},"
4941 "{ATI, SB450},"
4942 "{ATI, SB600},"
4943 @@ -449,6 +450,7 @@ struct azx {
4944 /* driver types */
4945 enum {
4946 AZX_DRIVER_ICH,
4947 + AZX_DRIVER_PCH,
4948 AZX_DRIVER_SCH,
4949 AZX_DRIVER_ATI,
4950 AZX_DRIVER_ATIHDMI,
4951 @@ -463,6 +465,7 @@ enum {
4952
4953 static char *driver_short_names[] __devinitdata = {
4954 [AZX_DRIVER_ICH] = "HDA Intel",
4955 + [AZX_DRIVER_PCH] = "HDA Intel PCH",
4956 [AZX_DRIVER_SCH] = "HDA Intel MID",
4957 [AZX_DRIVER_ATI] = "HDA ATI SB",
4958 [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
4959 @@ -1065,6 +1068,7 @@ static void azx_init_pci(struct azx *chip)
4960 0x01, NVIDIA_HDA_ENABLE_COHBIT);
4961 break;
4962 case AZX_DRIVER_SCH:
4963 + case AZX_DRIVER_PCH:
4964 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
4965 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
4966 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
4967 @@ -2268,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
4968 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
4969 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
4970 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
4971 + SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
4972 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
4973 {}
4974 };
4975 @@ -2357,6 +2362,8 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
4976 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
4977 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
4978 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
4979 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
4980 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
4981 {}
4982 };
4983
4984 @@ -2431,6 +2438,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
4985 if (bdl_pos_adj[dev] < 0) {
4986 switch (chip->driver_type) {
4987 case AZX_DRIVER_ICH:
4988 + case AZX_DRIVER_PCH:
4989 bdl_pos_adj[dev] = 1;
4990 break;
4991 default:
4992 @@ -2709,6 +2717,8 @@ static struct pci_device_id azx_ids[] = {
4993 { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
4994 /* PCH */
4995 { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
4996 + /* CPT */
4997 + { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
4998 /* SCH */
4999 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
5000 /* ATI SB 450/600 */
5001 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
5002 index 7069441..263bf3b 100644
5003 --- a/sound/pci/hda/patch_analog.c
5004 +++ b/sound/pci/hda/patch_analog.c
5005 @@ -1805,6 +1805,14 @@ static int patch_ad1981(struct hda_codec *codec)
5006 case AD1981_THINKPAD:
5007 spec->mixers[0] = ad1981_thinkpad_mixers;
5008 spec->input_mux = &ad1981_thinkpad_capture_source;
5009 + /* set the upper-limit for mixer amp to 0dB for avoiding the
5010 + * possible damage by overloading
5011 + */
5012 + snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
5013 + (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
5014 + (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
5015 + (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
5016 + (1 << AC_AMPCAP_MUTE_SHIFT));
5017 break;
5018 case AD1981_TOSHIBA:
5019 spec->mixers[0] = ad1981_hp_mixers;
5020 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5021 index a79f841..bd8a567 100644
5022 --- a/sound/pci/hda/patch_realtek.c
5023 +++ b/sound/pci/hda/patch_realtek.c
5024 @@ -9074,6 +9074,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
5025 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
5026
5027 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
5028 + SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
5029 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
5030 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
5031 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
5032 diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
5033 index a83d196..32f9853 100644
5034 --- a/sound/pci/mixart/mixart.c
5035 +++ b/sound/pci/mixart/mixart.c
5036 @@ -1161,13 +1161,15 @@ static long snd_mixart_BA0_read(struct snd_info_entry *entry, void *file_private
5037 unsigned long count, unsigned long pos)
5038 {
5039 struct mixart_mgr *mgr = entry->private_data;
5040 + unsigned long maxsize;
5041
5042 - count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5043 - if(count <= 0)
5044 + if (pos >= MIXART_BA0_SIZE)
5045 return 0;
5046 - if(pos + count > MIXART_BA0_SIZE)
5047 - count = (long)(MIXART_BA0_SIZE - pos);
5048 - if(copy_to_user_fromio(buf, MIXART_MEM( mgr, pos ), count))
5049 + maxsize = MIXART_BA0_SIZE - pos;
5050 + if (count > maxsize)
5051 + count = maxsize;
5052 + count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5053 + if (copy_to_user_fromio(buf, MIXART_MEM(mgr, pos), count))
5054 return -EFAULT;
5055 return count;
5056 }
5057 @@ -1180,13 +1182,15 @@ static long snd_mixart_BA1_read(struct snd_info_entry *entry, void *file_private
5058 unsigned long count, unsigned long pos)
5059 {
5060 struct mixart_mgr *mgr = entry->private_data;
5061 + unsigned long maxsize;
5062
5063 - count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5064 - if(count <= 0)
5065 + if (pos > MIXART_BA1_SIZE)
5066 return 0;
5067 - if(pos + count > MIXART_BA1_SIZE)
5068 - count = (long)(MIXART_BA1_SIZE - pos);
5069 - if(copy_to_user_fromio(buf, MIXART_REG( mgr, pos ), count))
5070 + maxsize = MIXART_BA1_SIZE - pos;
5071 + if (count > maxsize)
5072 + count = maxsize;
5073 + count = count & ~3; /* make sure the read size is a multiple of 4 bytes */
5074 + if (copy_to_user_fromio(buf, MIXART_REG(mgr, pos), count))
5075 return -EFAULT;
5076 return count;
5077 }
5078 diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
5079 index b2da478..c7cb207 100644
5080 --- a/sound/usb/usbmidi.c
5081 +++ b/sound/usb/usbmidi.c
5082 @@ -984,6 +984,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
5083 DEFINE_WAIT(wait);
5084 long timeout = msecs_to_jiffies(50);
5085
5086 + if (ep->umidi->disconnected)
5087 + return;
5088 /*
5089 * The substream buffer is empty, but some data might still be in the
5090 * currently active URBs, so we have to wait for those to complete.
5091 @@ -1121,14 +1123,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
5092 * Frees an output endpoint.
5093 * May be called when ep hasn't been initialized completely.
5094 */
5095 -static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
5096 +static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
5097 {
5098 unsigned int i;
5099
5100 for (i = 0; i < OUTPUT_URBS; ++i)
5101 - if (ep->urbs[i].urb)
5102 + if (ep->urbs[i].urb) {
5103 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
5104 ep->max_transfer);
5105 + ep->urbs[i].urb = NULL;
5106 + }
5107 +}
5108 +
5109 +static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
5110 +{
5111 + snd_usbmidi_out_endpoint_clear(ep);
5112 kfree(ep);
5113 }
5114
5115 @@ -1260,15 +1269,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
5116 usb_kill_urb(ep->out->urbs[j].urb);
5117 if (umidi->usb_protocol_ops->finish_out_endpoint)
5118 umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
5119 + ep->out->active_urbs = 0;
5120 + if (ep->out->drain_urbs) {
5121 + ep->out->drain_urbs = 0;
5122 + wake_up(&ep->out->drain_wait);
5123 + }
5124 }
5125 if (ep->in)
5126 for (j = 0; j < INPUT_URBS; ++j)
5127 usb_kill_urb(ep->in->urbs[j]);
5128 /* free endpoints here; later call can result in Oops */
5129 - if (ep->out) {
5130 - snd_usbmidi_out_endpoint_delete(ep->out);
5131 - ep->out = NULL;
5132 - }
5133 + if (ep->out)
5134 + snd_usbmidi_out_endpoint_clear(ep->out);
5135 if (ep->in) {
5136 snd_usbmidi_in_endpoint_delete(ep->in);
5137 ep->in = NULL;
5138 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5139 index a944be3..9dd98cb 100644
5140 --- a/virt/kvm/kvm_main.c
5141 +++ b/virt/kvm/kvm_main.c
5142 @@ -636,7 +636,7 @@ skip_lpage:
5143
5144 /* Allocate page dirty bitmap if needed */
5145 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
5146 - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
5147 + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
5148
5149 new.dirty_bitmap = vmalloc(dirty_bytes);
5150 if (!new.dirty_bitmap)
5151 @@ -719,7 +719,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
5152 {
5153 struct kvm_memory_slot *memslot;
5154 int r, i;
5155 - int n;
5156 + unsigned long n;
5157 unsigned long any = 0;
5158
5159 r = -EINVAL;
5160 @@ -731,7 +731,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
5161 if (!memslot->dirty_bitmap)
5162 goto out;
5163
5164 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
5165 + n = kvm_dirty_bitmap_bytes(memslot);
5166
5167 for (i = 0; !any && i < n/sizeof(long); ++i)
5168 any = memslot->dirty_bitmap[i];
5169 @@ -1073,10 +1073,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
5170 memslot = gfn_to_memslot_unaliased(kvm, gfn);
5171 if (memslot && memslot->dirty_bitmap) {
5172 unsigned long rel_gfn = gfn - memslot->base_gfn;
5173 + unsigned long *p = memslot->dirty_bitmap +
5174 + rel_gfn / BITS_PER_LONG;
5175 + int offset = rel_gfn % BITS_PER_LONG;
5176
5177 /* avoid RMW */
5178 - if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
5179 - generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
5180 + if (!generic_test_le_bit(offset, p))
5181 + generic___set_le_bit(offset, p);
5182 }
5183 }
5184