Magellan Linux

Contents of /trunk/kernel-alx/patches-3.4/0111-3.4.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1945 - (show annotations) (download)
Wed Nov 14 15:23:43 2012 UTC (11 years, 5 months ago) by niro
File size: 280948 byte(s)
3.4.18-alx-r1
1 diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
2 index 286ec04..82dd174 100644
3 --- a/Documentation/sound/alsa/HD-Audio-Models.txt
4 +++ b/Documentation/sound/alsa/HD-Audio-Models.txt
5 @@ -47,6 +47,7 @@ ALC882/883/885/888/889
6 acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
7 acer-aspire-8930g Acer Aspire 8330G/6935G
8 acer-aspire Acer Aspire others
9 + no-primary-hp VAIO Z workaround (for fixed speaker DAC)
10
11 ALC861/660
12 ==========
13 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
14 index 3bb7ffe..c2cbe4f 100644
15 --- a/arch/alpha/include/asm/atomic.h
16 +++ b/arch/alpha/include/asm/atomic.h
17 @@ -14,8 +14,8 @@
18 */
19
20
21 -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
22 -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
23 +#define ATOMIC_INIT(i) { (i) }
24 +#define ATOMIC64_INIT(i) { (i) }
25
26 #define atomic_read(v) (*(volatile int *)&(v)->counter)
27 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
28 diff --git a/arch/arm/Makefile b/arch/arm/Makefile
29 index 047a207..1d6402c 100644
30 --- a/arch/arm/Makefile
31 +++ b/arch/arm/Makefile
32 @@ -283,10 +283,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
33 zinstall uinstall install: vmlinux
34 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
35
36 -%.dtb:
37 +%.dtb: scripts
38 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
39
40 -dtbs:
41 +dtbs: scripts
42 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
43
44 # We use MRPROPER_FILES and CLEAN_FILES now
45 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
46 index dc7e8ce..87278fc 100644
47 --- a/arch/arm/boot/compressed/head.S
48 +++ b/arch/arm/boot/compressed/head.S
49 @@ -648,6 +648,7 @@ __armv7_mmu_cache_on:
50 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
51 #endif
52 mrc p15, 0, r0, c1, c0, 0 @ read control reg
53 + bic r0, r0, #1 << 28 @ clear SCTLR.TRE
54 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
55 orr r0, r0, #0x003c @ write buffer
56 #ifdef CONFIG_MMU
57 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
58 index 03fb936..5c8b3bf4 100644
59 --- a/arch/arm/include/asm/assembler.h
60 +++ b/arch/arm/include/asm/assembler.h
61 @@ -320,4 +320,12 @@
62 .size \name , . - \name
63 .endm
64
65 + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
66 +#ifndef CONFIG_CPU_USE_DOMAINS
67 + adds \tmp, \addr, #\size - 1
68 + sbcccs \tmp, \tmp, \limit
69 + bcs \bad
70 +#endif
71 + .endm
72 +
73 #endif /* __ASM_ASSEMBLER_H__ */
74 diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
75 index 93226cf..b1479fd 100644
76 --- a/arch/arm/include/asm/mutex.h
77 +++ b/arch/arm/include/asm/mutex.h
78 @@ -7,121 +7,10 @@
79 */
80 #ifndef _ASM_MUTEX_H
81 #define _ASM_MUTEX_H
82 -
83 -#if __LINUX_ARM_ARCH__ < 6
84 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
85 -# include <asm-generic/mutex-xchg.h>
86 -#else
87 -
88 /*
89 - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
90 - * atomic decrement (it is not a reliable atomic decrement but it satisfies
91 - * the defined semantics for our purpose, while being smaller and faster
92 - * than a real atomic decrement or atomic swap. The idea is to attempt
93 - * decrementing the lock value only once. If once decremented it isn't zero,
94 - * or if its store-back fails due to a dispute on the exclusive store, we
95 - * simply bail out immediately through the slow path where the lock will be
96 - * reattempted until it succeeds.
97 + * On pre-ARMv6 hardware this results in a swp-based implementation,
98 + * which is the most efficient. For ARMv6+, we emit a pair of exclusive
99 + * accesses instead.
100 */
101 -static inline void
102 -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
103 -{
104 - int __ex_flag, __res;
105 -
106 - __asm__ (
107 -
108 - "ldrex %0, [%2] \n\t"
109 - "sub %0, %0, #1 \n\t"
110 - "strex %1, %0, [%2] "
111 -
112 - : "=&r" (__res), "=&r" (__ex_flag)
113 - : "r" (&(count)->counter)
114 - : "cc","memory" );
115 -
116 - __res |= __ex_flag;
117 - if (unlikely(__res != 0))
118 - fail_fn(count);
119 -}
120 -
121 -static inline int
122 -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
123 -{
124 - int __ex_flag, __res;
125 -
126 - __asm__ (
127 -
128 - "ldrex %0, [%2] \n\t"
129 - "sub %0, %0, #1 \n\t"
130 - "strex %1, %0, [%2] "
131 -
132 - : "=&r" (__res), "=&r" (__ex_flag)
133 - : "r" (&(count)->counter)
134 - : "cc","memory" );
135 -
136 - __res |= __ex_flag;
137 - if (unlikely(__res != 0))
138 - __res = fail_fn(count);
139 - return __res;
140 -}
141 -
142 -/*
143 - * Same trick is used for the unlock fast path. However the original value,
144 - * rather than the result, is used to test for success in order to have
145 - * better generated assembly.
146 - */
147 -static inline void
148 -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
149 -{
150 - int __ex_flag, __res, __orig;
151 -
152 - __asm__ (
153 -
154 - "ldrex %0, [%3] \n\t"
155 - "add %1, %0, #1 \n\t"
156 - "strex %2, %1, [%3] "
157 -
158 - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
159 - : "r" (&(count)->counter)
160 - : "cc","memory" );
161 -
162 - __orig |= __ex_flag;
163 - if (unlikely(__orig != 0))
164 - fail_fn(count);
165 -}
166 -
167 -/*
168 - * If the unlock was done on a contended lock, or if the unlock simply fails
169 - * then the mutex remains locked.
170 - */
171 -#define __mutex_slowpath_needs_to_unlock() 1
172 -
173 -/*
174 - * For __mutex_fastpath_trylock we use another construct which could be
175 - * described as a "single value cmpxchg".
176 - *
177 - * This provides the needed trylock semantics like cmpxchg would, but it is
178 - * lighter and less generic than a true cmpxchg implementation.
179 - */
180 -static inline int
181 -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
182 -{
183 - int __ex_flag, __res, __orig;
184 -
185 - __asm__ (
186 -
187 - "1: ldrex %0, [%3] \n\t"
188 - "subs %1, %0, #1 \n\t"
189 - "strexeq %2, %1, [%3] \n\t"
190 - "movlt %0, #0 \n\t"
191 - "cmpeq %2, #0 \n\t"
192 - "bgt 1b "
193 -
194 - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
195 - : "r" (&count->counter)
196 - : "cc", "memory" );
197 -
198 - return __orig;
199 -}
200 -
201 -#endif
202 +#include <asm-generic/mutex-xchg.h>
203 #endif
204 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
205 index 71f6536..0a070e9 100644
206 --- a/arch/arm/include/asm/uaccess.h
207 +++ b/arch/arm/include/asm/uaccess.h
208 @@ -101,28 +101,39 @@ extern int __get_user_1(void *);
209 extern int __get_user_2(void *);
210 extern int __get_user_4(void *);
211
212 -#define __get_user_x(__r2,__p,__e,__s,__i...) \
213 +#define __GUP_CLOBBER_1 "lr", "cc"
214 +#ifdef CONFIG_CPU_USE_DOMAINS
215 +#define __GUP_CLOBBER_2 "ip", "lr", "cc"
216 +#else
217 +#define __GUP_CLOBBER_2 "lr", "cc"
218 +#endif
219 +#define __GUP_CLOBBER_4 "lr", "cc"
220 +
221 +#define __get_user_x(__r2,__p,__e,__l,__s) \
222 __asm__ __volatile__ ( \
223 __asmeq("%0", "r0") __asmeq("%1", "r2") \
224 + __asmeq("%3", "r1") \
225 "bl __get_user_" #__s \
226 : "=&r" (__e), "=r" (__r2) \
227 - : "0" (__p) \
228 - : __i, "cc")
229 + : "0" (__p), "r" (__l) \
230 + : __GUP_CLOBBER_##__s)
231
232 #define get_user(x,p) \
233 ({ \
234 + unsigned long __limit = current_thread_info()->addr_limit - 1; \
235 register const typeof(*(p)) __user *__p asm("r0") = (p);\
236 register unsigned long __r2 asm("r2"); \
237 + register unsigned long __l asm("r1") = __limit; \
238 register int __e asm("r0"); \
239 switch (sizeof(*(__p))) { \
240 case 1: \
241 - __get_user_x(__r2, __p, __e, 1, "lr"); \
242 - break; \
243 + __get_user_x(__r2, __p, __e, __l, 1); \
244 + break; \
245 case 2: \
246 - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
247 + __get_user_x(__r2, __p, __e, __l, 2); \
248 break; \
249 case 4: \
250 - __get_user_x(__r2, __p, __e, 4, "lr"); \
251 + __get_user_x(__r2, __p, __e, __l, 4); \
252 break; \
253 default: __e = __get_user_bad(); break; \
254 } \
255 @@ -135,31 +146,34 @@ extern int __put_user_2(void *, unsigned int);
256 extern int __put_user_4(void *, unsigned int);
257 extern int __put_user_8(void *, unsigned long long);
258
259 -#define __put_user_x(__r2,__p,__e,__s) \
260 +#define __put_user_x(__r2,__p,__e,__l,__s) \
261 __asm__ __volatile__ ( \
262 __asmeq("%0", "r0") __asmeq("%2", "r2") \
263 + __asmeq("%3", "r1") \
264 "bl __put_user_" #__s \
265 : "=&r" (__e) \
266 - : "0" (__p), "r" (__r2) \
267 + : "0" (__p), "r" (__r2), "r" (__l) \
268 : "ip", "lr", "cc")
269
270 #define put_user(x,p) \
271 ({ \
272 + unsigned long __limit = current_thread_info()->addr_limit - 1; \
273 register const typeof(*(p)) __r2 asm("r2") = (x); \
274 register const typeof(*(p)) __user *__p asm("r0") = (p);\
275 + register unsigned long __l asm("r1") = __limit; \
276 register int __e asm("r0"); \
277 switch (sizeof(*(__p))) { \
278 case 1: \
279 - __put_user_x(__r2, __p, __e, 1); \
280 + __put_user_x(__r2, __p, __e, __l, 1); \
281 break; \
282 case 2: \
283 - __put_user_x(__r2, __p, __e, 2); \
284 + __put_user_x(__r2, __p, __e, __l, 2); \
285 break; \
286 case 4: \
287 - __put_user_x(__r2, __p, __e, 4); \
288 + __put_user_x(__r2, __p, __e, __l, 4); \
289 break; \
290 case 8: \
291 - __put_user_x(__r2, __p, __e, 8); \
292 + __put_user_x(__r2, __p, __e, __l, 8); \
293 break; \
294 default: __e = __put_user_bad(); break; \
295 } \
296 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
297 index ba386bd..18d39ea 100644
298 --- a/arch/arm/kernel/hw_breakpoint.c
299 +++ b/arch/arm/kernel/hw_breakpoint.c
300 @@ -159,6 +159,12 @@ static int debug_arch_supported(void)
301 arch >= ARM_DEBUG_ARCH_V7_1;
302 }
303
304 +/* Can we determine the watchpoint access type from the fsr? */
305 +static int debug_exception_updates_fsr(void)
306 +{
307 + return 0;
308 +}
309 +
310 /* Determine number of WRP registers available. */
311 static int get_num_wrp_resources(void)
312 {
313 @@ -619,18 +625,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
314 info->address &= ~alignment_mask;
315 info->ctrl.len <<= offset;
316
317 - /*
318 - * Currently we rely on an overflow handler to take
319 - * care of single-stepping the breakpoint when it fires.
320 - * In the case of userspace breakpoints on a core with V7 debug,
321 - * we can use the mismatch feature as a poor-man's hardware
322 - * single-step, but this only works for per-task breakpoints.
323 - */
324 - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
325 - !core_has_mismatch_brps() || !bp->hw.bp_target)) {
326 - pr_warning("overflow handler required but none found\n");
327 - ret = -EINVAL;
328 + if (!bp->overflow_handler) {
329 + /*
330 + * Mismatch breakpoints are required for single-stepping
331 + * breakpoints.
332 + */
333 + if (!core_has_mismatch_brps())
334 + return -EINVAL;
335 +
336 + /* We don't allow mismatch breakpoints in kernel space. */
337 + if (arch_check_bp_in_kernelspace(bp))
338 + return -EPERM;
339 +
340 + /*
341 + * Per-cpu breakpoints are not supported by our stepping
342 + * mechanism.
343 + */
344 + if (!bp->hw.bp_target)
345 + return -EINVAL;
346 +
347 + /*
348 + * We only support specific access types if the fsr
349 + * reports them.
350 + */
351 + if (!debug_exception_updates_fsr() &&
352 + (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
353 + info->ctrl.type == ARM_BREAKPOINT_STORE))
354 + return -EINVAL;
355 }
356 +
357 out:
358 return ret;
359 }
360 @@ -706,10 +729,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
361 goto unlock;
362
363 /* Check that the access type matches. */
364 - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
365 - HW_BREAKPOINT_R;
366 - if (!(access & hw_breakpoint_type(wp)))
367 - goto unlock;
368 + if (debug_exception_updates_fsr()) {
369 + access = (fsr & ARM_FSR_ACCESS_MASK) ?
370 + HW_BREAKPOINT_W : HW_BREAKPOINT_R;
371 + if (!(access & hw_breakpoint_type(wp)))
372 + goto unlock;
373 + }
374
375 /* We have a winner. */
376 info->trigger = addr;
377 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
378 index a8ad1e3..a53a5a3 100644
379 --- a/arch/arm/kernel/traps.c
380 +++ b/arch/arm/kernel/traps.c
381 @@ -388,20 +388,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
382 #endif
383 instr = *(u32 *) pc;
384 } else if (thumb_mode(regs)) {
385 - get_user(instr, (u16 __user *)pc);
386 + if (get_user(instr, (u16 __user *)pc))
387 + goto die_sig;
388 if (is_wide_instruction(instr)) {
389 unsigned int instr2;
390 - get_user(instr2, (u16 __user *)pc+1);
391 + if (get_user(instr2, (u16 __user *)pc+1))
392 + goto die_sig;
393 instr <<= 16;
394 instr |= instr2;
395 }
396 - } else {
397 - get_user(instr, (u32 __user *)pc);
398 + } else if (get_user(instr, (u32 __user *)pc)) {
399 + goto die_sig;
400 }
401
402 if (call_undef_hook(regs, instr) == 0)
403 return;
404
405 +die_sig:
406 #ifdef CONFIG_DEBUG_USER
407 if (user_debug & UDBG_UNDEFINED) {
408 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
409 diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
410 index 11093a7..9b06bb4 100644
411 --- a/arch/arm/lib/getuser.S
412 +++ b/arch/arm/lib/getuser.S
413 @@ -16,8 +16,9 @@
414 * __get_user_X
415 *
416 * Inputs: r0 contains the address
417 + * r1 contains the address limit, which must be preserved
418 * Outputs: r0 is the error code
419 - * r2, r3 contains the zero-extended value
420 + * r2 contains the zero-extended value
421 * lr corrupted
422 *
423 * No other registers must be altered. (see <asm/uaccess.h>
424 @@ -27,33 +28,39 @@
425 * Note also that it is intended that __get_user_bad is not global.
426 */
427 #include <linux/linkage.h>
428 +#include <asm/assembler.h>
429 #include <asm/errno.h>
430 #include <asm/domain.h>
431
432 ENTRY(__get_user_1)
433 + check_uaccess r0, 1, r1, r2, __get_user_bad
434 1: TUSER(ldrb) r2, [r0]
435 mov r0, #0
436 mov pc, lr
437 ENDPROC(__get_user_1)
438
439 ENTRY(__get_user_2)
440 -#ifdef CONFIG_THUMB2_KERNEL
441 -2: TUSER(ldrb) r2, [r0]
442 -3: TUSER(ldrb) r3, [r0, #1]
443 + check_uaccess r0, 2, r1, r2, __get_user_bad
444 +#ifdef CONFIG_CPU_USE_DOMAINS
445 +rb .req ip
446 +2: ldrbt r2, [r0], #1
447 +3: ldrbt rb, [r0], #0
448 #else
449 -2: TUSER(ldrb) r2, [r0], #1
450 -3: TUSER(ldrb) r3, [r0]
451 +rb .req r0
452 +2: ldrb r2, [r0]
453 +3: ldrb rb, [r0, #1]
454 #endif
455 #ifndef __ARMEB__
456 - orr r2, r2, r3, lsl #8
457 + orr r2, r2, rb, lsl #8
458 #else
459 - orr r2, r3, r2, lsl #8
460 + orr r2, rb, r2, lsl #8
461 #endif
462 mov r0, #0
463 mov pc, lr
464 ENDPROC(__get_user_2)
465
466 ENTRY(__get_user_4)
467 + check_uaccess r0, 4, r1, r2, __get_user_bad
468 4: TUSER(ldr) r2, [r0]
469 mov r0, #0
470 mov pc, lr
471 diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
472 index 7db2599..3d73dcb 100644
473 --- a/arch/arm/lib/putuser.S
474 +++ b/arch/arm/lib/putuser.S
475 @@ -16,6 +16,7 @@
476 * __put_user_X
477 *
478 * Inputs: r0 contains the address
479 + * r1 contains the address limit, which must be preserved
480 * r2, r3 contains the value
481 * Outputs: r0 is the error code
482 * lr corrupted
483 @@ -27,16 +28,19 @@
484 * Note also that it is intended that __put_user_bad is not global.
485 */
486 #include <linux/linkage.h>
487 +#include <asm/assembler.h>
488 #include <asm/errno.h>
489 #include <asm/domain.h>
490
491 ENTRY(__put_user_1)
492 + check_uaccess r0, 1, r1, ip, __put_user_bad
493 1: TUSER(strb) r2, [r0]
494 mov r0, #0
495 mov pc, lr
496 ENDPROC(__put_user_1)
497
498 ENTRY(__put_user_2)
499 + check_uaccess r0, 2, r1, ip, __put_user_bad
500 mov ip, r2, lsr #8
501 #ifdef CONFIG_THUMB2_KERNEL
502 #ifndef __ARMEB__
503 @@ -60,12 +64,14 @@ ENTRY(__put_user_2)
504 ENDPROC(__put_user_2)
505
506 ENTRY(__put_user_4)
507 + check_uaccess r0, 4, r1, ip, __put_user_bad
508 4: TUSER(str) r2, [r0]
509 mov r0, #0
510 mov pc, lr
511 ENDPROC(__put_user_4)
512
513 ENTRY(__put_user_8)
514 + check_uaccess r0, 8, r1, ip, __put_user_bad
515 #ifdef CONFIG_THUMB2_KERNEL
516 5: TUSER(str) r2, [r0]
517 6: TUSER(str) r3, [r0, #4]
518 diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
519 index 27f4a61..d3f25c9 100644
520 --- a/arch/arm/mm/mm.h
521 +++ b/arch/arm/mm/mm.h
522 @@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
523 /* permanent static mappings from iotable_init() */
524 #define VM_ARM_STATIC_MAPPING 0x40000000
525
526 +/* empty mapping */
527 +#define VM_ARM_EMPTY_MAPPING 0x20000000
528 +
529 /* mapping type (attributes) for permanent static mappings */
530 #define VM_ARM_MTYPE(mt) ((mt) << 20)
531 #define VM_ARM_MTYPE_MASK (0x1f << 20)
532 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
533 index 75f9f9d..7d41919 100644
534 --- a/arch/arm/mm/mmu.c
535 +++ b/arch/arm/mm/mmu.c
536 @@ -805,7 +805,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
537 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
538 vm->addr = (void *)addr;
539 vm->size = SECTION_SIZE;
540 - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
541 + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
542 vm->caller = pmd_empty_section_gap;
543 vm_area_add_early(vm);
544 }
545 @@ -818,7 +818,7 @@ static void __init fill_pmd_gaps(void)
546
547 /* we're still single threaded hence no lock needed here */
548 for (vm = vmlist; vm; vm = vm->next) {
549 - if (!(vm->flags & VM_ARM_STATIC_MAPPING))
550 + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
551 continue;
552 addr = (unsigned long)vm->addr;
553 if (addr < next)
554 diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
555 index 33aadbc..dcfd573 100644
556 --- a/arch/mips/mm/gup.c
557 +++ b/arch/mips/mm/gup.c
558 @@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
559 do {
560 VM_BUG_ON(compound_head(page) != head);
561 pages[*nr] = page;
562 + if (PageTail(page))
563 + get_huge_page_tail(page);
564 (*nr)++;
565 page++;
566 refs++;
567 diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
568 index 978330c..f80887f 100644
569 --- a/arch/powerpc/platforms/85xx/p1022_ds.c
570 +++ b/arch/powerpc/platforms/85xx/p1022_ds.c
571 @@ -208,6 +208,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
572 u8 __iomem *lbc_lcs0_ba = NULL;
573 u8 __iomem *lbc_lcs1_ba = NULL;
574 phys_addr_t cs0_addr, cs1_addr;
575 + u32 br0, or0, br1, or1;
576 const __be32 *iprop;
577 unsigned int num_laws;
578 u8 b;
579 @@ -256,11 +257,70 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
580 }
581 num_laws = be32_to_cpup(iprop);
582
583 - cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
584 - cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
585 + /*
586 + * Indirect mode requires both BR0 and BR1 to be set to "GPCM",
587 + * otherwise writes to these addresses won't actually appear on the
588 + * local bus, and so the PIXIS won't see them.
589 + *
590 + * In FCM mode, writes go to the NAND controller, which does not pass
591 + * them to the localbus directly. So we force BR0 and BR1 into GPCM
592 + * mode, since we don't care about what's behind the localbus any
593 + * more.
594 + */
595 + br0 = in_be32(&lbc->bank[0].br);
596 + br1 = in_be32(&lbc->bank[1].br);
597 + or0 = in_be32(&lbc->bank[0].or);
598 + or1 = in_be32(&lbc->bank[1].or);
599 +
600 + /* Make sure CS0 and CS1 are programmed */
601 + if (!(br0 & BR_V) || !(br1 & BR_V)) {
602 + pr_err("p1022ds: CS0 and/or CS1 is not programmed\n");
603 + goto exit;
604 + }
605 +
606 + /*
607 + * Use the existing BRx/ORx values if it's already GPCM. Otherwise,
608 + * force the values to simple 32KB GPCM windows with the most
609 + * conservative timing.
610 + */
611 + if ((br0 & BR_MSEL) != BR_MS_GPCM) {
612 + br0 = (br0 & BR_BA) | BR_V;
613 + or0 = 0xFFFF8000 | 0xFF7;
614 + out_be32(&lbc->bank[0].br, br0);
615 + out_be32(&lbc->bank[0].or, or0);
616 + }
617 + if ((br1 & BR_MSEL) != BR_MS_GPCM) {
618 + br1 = (br1 & BR_BA) | BR_V;
619 + or1 = 0xFFFF8000 | 0xFF7;
620 + out_be32(&lbc->bank[1].br, br1);
621 + out_be32(&lbc->bank[1].or, or1);
622 + }
623 +
624 + cs0_addr = lbc_br_to_phys(ecm, num_laws, br0);
625 + if (!cs0_addr) {
626 + pr_err("p1022ds: could not determine physical address for CS0"
627 + " (BR0=%08x)\n", br0);
628 + goto exit;
629 + }
630 + cs1_addr = lbc_br_to_phys(ecm, num_laws, br1);
631 + if (!cs0_addr) {
632 + pr_err("p1022ds: could not determine physical address for CS1"
633 + " (BR1=%08x)\n", br1);
634 + goto exit;
635 + }
636
637 lbc_lcs0_ba = ioremap(cs0_addr, 1);
638 + if (!lbc_lcs0_ba) {
639 + pr_err("p1022ds: could not ioremap CS0 address %llx\n",
640 + (unsigned long long)cs0_addr);
641 + goto exit;
642 + }
643 lbc_lcs1_ba = ioremap(cs1_addr, 1);
644 + if (!lbc_lcs1_ba) {
645 + pr_err("p1022ds: could not ioremap CS1 address %llx\n",
646 + (unsigned long long)cs1_addr);
647 + goto exit;
648 + }
649
650 /* Make sure we're in indirect mode first. */
651 if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
652 @@ -435,6 +495,8 @@ static void __init disable_one_node(struct device_node *np, struct property *new
653 prom_update_property(np, new, old);
654 else
655 prom_add_property(np, new);
656 +
657 + pr_info("p1022ds: disabling %s node\n", np->full_name);
658 }
659
660 /* TRUE if there is a "video=fslfb" command-line parameter. */
661 @@ -499,28 +561,46 @@ static void __init p1022_ds_setup_arch(void)
662 diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
663
664 /*
665 - * Disable the NOR flash node if there is video=fslfb... command-line
666 - * parameter. When the DIU is active, NOR flash is unavailable, so we
667 - * have to disable the node before the MTD driver loads.
668 + * Disable the NOR and NAND flash nodes if there is video=fslfb...
669 + * command-line parameter. When the DIU is active, the localbus is
670 + * unavailable, so we have to disable these nodes before the MTD
671 + * driver loads.
672 */
673 if (fslfb) {
674 struct device_node *np =
675 of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
676
677 if (np) {
678 - np = of_find_compatible_node(np, NULL, "cfi-flash");
679 - if (np) {
680 + struct device_node *np2;
681 +
682 + of_node_get(np);
683 + np2 = of_find_compatible_node(np, NULL, "cfi-flash");
684 + if (np2) {
685 static struct property nor_status = {
686 .name = "status",
687 .value = "disabled",
688 .length = sizeof("disabled"),
689 };
690
691 - pr_info("p1022ds: disabling %s node",
692 - np->full_name);
693 - disable_one_node(np, &nor_status);
694 - of_node_put(np);
695 + disable_one_node(np2, &nor_status);
696 + of_node_put(np2);
697 + }
698 +
699 + of_node_get(np);
700 + np2 = of_find_compatible_node(np, NULL,
701 + "fsl,elbc-fcm-nand");
702 + if (np2) {
703 + static struct property nand_status = {
704 + .name = "status",
705 + .value = "disabled",
706 + .length = sizeof("disabled"),
707 + };
708 +
709 + disable_one_node(np2, &nand_status);
710 + of_node_put(np2);
711 }
712 +
713 + of_node_put(np);
714 }
715
716 }
717 diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
718 index 2297be4..abe8722 100644
719 --- a/arch/s390/oprofile/init.c
720 +++ b/arch/s390/oprofile/init.c
721 @@ -171,7 +171,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
722 if (*offset)
723 return -EINVAL;
724 retval = oprofilefs_ulong_from_user(&val, buf, count);
725 - if (retval)
726 + if (retval <= 0)
727 return retval;
728 if (val < oprofile_min_interval)
729 oprofile_hw_interval = oprofile_min_interval;
730 @@ -214,7 +214,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
731 return -EINVAL;
732
733 retval = oprofilefs_ulong_from_user(&val, buf, count);
734 - if (retval)
735 + if (retval <= 0)
736 return retval;
737 if (val != 0)
738 return -EINVAL;
739 @@ -245,7 +245,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
740 return -EINVAL;
741
742 retval = oprofilefs_ulong_from_user(&val, buf, count);
743 - if (retval)
744 + if (retval <= 0)
745 return retval;
746
747 if (val != 0 && val != 1)
748 @@ -280,7 +280,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
749 return -EINVAL;
750
751 retval = oprofilefs_ulong_from_user(&val, buf, count);
752 - if (retval)
753 + if (retval <= 0)
754 return retval;
755
756 if (val != 0 && val != 1)
757 @@ -319,7 +319,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
758 return -EINVAL;
759
760 retval = oprofilefs_ulong_from_user(&val, buf, count);
761 - if (retval)
762 + if (retval <= 0)
763 return retval;
764
765 if (val != 0 && val != 1)
766 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
767 index c34f96c..1bd321d 100644
768 --- a/arch/x86/include/asm/xen/page.h
769 +++ b/arch/x86/include/asm/xen/page.h
770 @@ -50,7 +50,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
771
772 extern int m2p_add_override(unsigned long mfn, struct page *page,
773 struct gnttab_map_grant_ref *kmap_op);
774 -extern int m2p_remove_override(struct page *page, bool clear_pte);
775 +extern int m2p_remove_override(struct page *page,
776 + struct gnttab_map_grant_ref *kmap_op);
777 extern struct page *m2p_find_override(unsigned long mfn);
778 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
779
780 diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
781 index d0e6e40..5dd467b 100644
782 --- a/arch/x86/pci/fixup.c
783 +++ b/arch/x86/pci/fixup.c
784 @@ -519,3 +519,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
785 }
786 }
787 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
788 +
789 +/*
790 + * Twinhead H12Y needs us to block out a region otherwise we map devices
791 + * there and any access kills the box.
792 + *
793 + * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
794 + *
795 + * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
796 + */
797 +static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
798 +{
799 + if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
800 + pr_info("Reserving memory on Twinhead H12Y\n");
801 + request_mem_region(0xFFB00000, 0x100000, "twinhead");
802 + }
803 +}
804 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
805 diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
806 index 00a0385..3ace817 100644
807 --- a/arch/x86/xen/p2m.c
808 +++ b/arch/x86/xen/p2m.c
809 @@ -714,9 +714,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
810
811 xen_mc_issue(PARAVIRT_LAZY_MMU);
812 }
813 - /* let's use dev_bus_addr to record the old mfn instead */
814 - kmap_op->dev_bus_addr = page->index;
815 - page->index = (unsigned long) kmap_op;
816 }
817 spin_lock_irqsave(&m2p_override_lock, flags);
818 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
819 @@ -743,7 +740,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
820 return 0;
821 }
822 EXPORT_SYMBOL_GPL(m2p_add_override);
823 -int m2p_remove_override(struct page *page, bool clear_pte)
824 +int m2p_remove_override(struct page *page,
825 + struct gnttab_map_grant_ref *kmap_op)
826 {
827 unsigned long flags;
828 unsigned long mfn;
829 @@ -773,10 +771,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
830 WARN_ON(!PagePrivate(page));
831 ClearPagePrivate(page);
832
833 - if (clear_pte) {
834 - struct gnttab_map_grant_ref *map_op =
835 - (struct gnttab_map_grant_ref *) page->index;
836 - set_phys_to_machine(pfn, map_op->dev_bus_addr);
837 + set_phys_to_machine(pfn, page->index);
838 + if (kmap_op != NULL) {
839 if (!PageHighMem(page)) {
840 struct multicall_space mcs;
841 struct gnttab_unmap_grant_ref *unmap_op;
842 @@ -788,13 +784,13 @@ int m2p_remove_override(struct page *page, bool clear_pte)
843 * issued. In this case handle is going to -1 because
844 * it hasn't been modified yet.
845 */
846 - if (map_op->handle == -1)
847 + if (kmap_op->handle == -1)
848 xen_mc_flush();
849 /*
850 - * Now if map_op->handle is negative it means that the
851 + * Now if kmap_op->handle is negative it means that the
852 * hypercall actually returned an error.
853 */
854 - if (map_op->handle == GNTST_general_error) {
855 + if (kmap_op->handle == GNTST_general_error) {
856 printk(KERN_WARNING "m2p_remove_override: "
857 "pfn %lx mfn %lx, failed to modify kernel mappings",
858 pfn, mfn);
859 @@ -804,8 +800,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
860 mcs = xen_mc_entry(
861 sizeof(struct gnttab_unmap_grant_ref));
862 unmap_op = mcs.args;
863 - unmap_op->host_addr = map_op->host_addr;
864 - unmap_op->handle = map_op->handle;
865 + unmap_op->host_addr = kmap_op->host_addr;
866 + unmap_op->handle = kmap_op->handle;
867 unmap_op->dev_bus_addr = 0;
868
869 MULTI_grant_table_op(mcs.mc,
870 @@ -816,10 +812,9 @@ int m2p_remove_override(struct page *page, bool clear_pte)
871 set_pte_at(&init_mm, address, ptep,
872 pfn_pte(pfn, PAGE_KERNEL));
873 __flush_tlb_single(address);
874 - map_op->host_addr = 0;
875 + kmap_op->host_addr = 0;
876 }
877 - } else
878 - set_phys_to_machine(pfn, page->index);
879 + }
880
881 /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
882 * somewhere in this domain, even before being added to the
883 diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
884 index 99de967..017d48a 100644
885 --- a/arch/x86/xen/setup.c
886 +++ b/arch/x86/xen/setup.c
887 @@ -17,6 +17,7 @@
888 #include <asm/e820.h>
889 #include <asm/setup.h>
890 #include <asm/acpi.h>
891 +#include <asm/numa.h>
892 #include <asm/xen/hypervisor.h>
893 #include <asm/xen/hypercall.h>
894
895 @@ -431,4 +432,7 @@ void __init xen_arch_setup(void)
896 disable_cpufreq();
897 WARN_ON(set_pm_idle_to_default());
898 fiddle_vdso();
899 +#ifdef CONFIG_NUMA
900 + numa_off = 1;
901 +#endif
902 }
903 diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
904 index 0500f71..2adef53 100644
905 --- a/drivers/acpi/power.c
906 +++ b/drivers/acpi/power.c
907 @@ -103,6 +103,7 @@ struct acpi_power_resource {
908
909 /* List of devices relying on this power resource */
910 struct acpi_power_resource_device *devices;
911 + struct mutex devices_lock;
912 };
913
914 static struct list_head acpi_power_resource_list;
915 @@ -221,7 +222,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device)
916
917 static int __acpi_power_on(struct acpi_power_resource *resource)
918 {
919 - struct acpi_power_resource_device *device_list = resource->devices;
920 acpi_status status = AE_OK;
921
922 status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
923 @@ -234,19 +234,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
924 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
925 resource->name));
926
927 - while (device_list) {
928 - acpi_power_on_device(device_list->device);
929 -
930 - device_list = device_list->next;
931 - }
932 -
933 return 0;
934 }
935
936 static int acpi_power_on(acpi_handle handle)
937 {
938 int result = 0;
939 + bool resume_device = false;
940 struct acpi_power_resource *resource = NULL;
941 + struct acpi_power_resource_device *device_list;
942
943 result = acpi_power_get_context(handle, &resource);
944 if (result)
945 @@ -262,10 +258,25 @@ static int acpi_power_on(acpi_handle handle)
946 result = __acpi_power_on(resource);
947 if (result)
948 resource->ref_count--;
949 + else
950 + resume_device = true;
951 }
952
953 mutex_unlock(&resource->resource_lock);
954
955 + if (!resume_device)
956 + return result;
957 +
958 + mutex_lock(&resource->devices_lock);
959 +
960 + device_list = resource->devices;
961 + while (device_list) {
962 + acpi_power_on_device(device_list->device);
963 + device_list = device_list->next;
964 + }
965 +
966 + mutex_unlock(&resource->devices_lock);
967 +
968 return result;
969 }
970
971 @@ -351,7 +362,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
972 if (acpi_power_get_context(res_handle, &resource))
973 return;
974
975 - mutex_lock(&resource->resource_lock);
976 + mutex_lock(&resource->devices_lock);
977 prev = NULL;
978 curr = resource->devices;
979 while (curr) {
980 @@ -368,7 +379,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
981 prev = curr;
982 curr = curr->next;
983 }
984 - mutex_unlock(&resource->resource_lock);
985 + mutex_unlock(&resource->devices_lock);
986 }
987
988 /* Unlink dev from all power resources in _PR0 */
989 @@ -409,10 +420,10 @@ static int __acpi_power_resource_register_device(
990
991 power_resource_device->device = powered_device;
992
993 - mutex_lock(&resource->resource_lock);
994 + mutex_lock(&resource->devices_lock);
995 power_resource_device->next = resource->devices;
996 resource->devices = power_resource_device;
997 - mutex_unlock(&resource->resource_lock);
998 + mutex_unlock(&resource->devices_lock);
999
1000 return 0;
1001 }
1002 @@ -457,7 +468,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
1003 return ret;
1004
1005 no_power_resource:
1006 - printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
1007 + printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
1008 return -ENODEV;
1009 }
1010
1011 @@ -715,6 +726,7 @@ static int acpi_power_add(struct acpi_device *device)
1012
1013 resource->device = device;
1014 mutex_init(&resource->resource_lock);
1015 + mutex_init(&resource->devices_lock);
1016 strcpy(resource->name, device->pnp.bus_id);
1017 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
1018 strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
1019 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1020 index ebaf67e..93cbc44 100644
1021 --- a/drivers/ata/ahci.c
1022 +++ b/drivers/ata/ahci.c
1023 @@ -396,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
1024 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
1025 { PCI_DEVICE(0x1b4b, 0x917a),
1026 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
1027 + { PCI_DEVICE(0x1b4b, 0x9192),
1028 + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
1029 { PCI_DEVICE(0x1b4b, 0x91a3),
1030 .driver_data = board_ahci_yes_fbs },
1031
1032 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1033 index d31ee55..cf4837f 100644
1034 --- a/drivers/ata/libata-core.c
1035 +++ b/drivers/ata/libata-core.c
1036 @@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1037
1038 /* Devices which aren't very happy with higher link speeds */
1039 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
1040 + { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
1041
1042 /*
1043 * Devices which choke on SETXFER. Applies only if both the
1044 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
1045 index bd0f394..bb82b18 100644
1046 --- a/drivers/base/power/runtime.c
1047 +++ b/drivers/base/power/runtime.c
1048 @@ -430,7 +430,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
1049 goto repeat;
1050 }
1051
1052 - dev->power.deferred_resume = false;
1053 if (dev->power.no_callbacks)
1054 goto no_callback; /* Assume success. */
1055
1056 @@ -506,6 +505,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
1057 wake_up_all(&dev->power.wait_queue);
1058
1059 if (dev->power.deferred_resume) {
1060 + dev->power.deferred_resume = false;
1061 rpm_resume(dev, 0);
1062 retval = -EAGAIN;
1063 goto out;
1064 @@ -652,6 +652,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
1065 || dev->parent->power.runtime_status == RPM_ACTIVE) {
1066 atomic_inc(&dev->parent->power.child_count);
1067 spin_unlock(&dev->parent->power.lock);
1068 + retval = 1;
1069 goto no_callback; /* Assume success. */
1070 }
1071 spin_unlock(&dev->parent->power.lock);
1072 @@ -735,7 +736,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
1073 }
1074 wake_up_all(&dev->power.wait_queue);
1075
1076 - if (!retval)
1077 + if (retval >= 0)
1078 rpm_idle(dev, RPM_ASYNC);
1079
1080 out:
1081 diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
1082 index 38aa6dd..da33111 100644
1083 --- a/drivers/block/cciss_scsi.c
1084 +++ b/drivers/block/cciss_scsi.c
1085 @@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
1086 }
1087 break;
1088 case CMD_PROTOCOL_ERR:
1089 + cmd->result = DID_ERROR << 16;
1090 dev_warn(&h->pdev->dev,
1091 "%p has protocol error\n", c);
1092 break;
1093 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1094 index 061427a..3c4c225 100644
1095 --- a/drivers/block/nbd.c
1096 +++ b/drivers/block/nbd.c
1097 @@ -445,6 +445,14 @@ static void nbd_clear_que(struct nbd_device *nbd)
1098 req->errors++;
1099 nbd_end_request(req);
1100 }
1101 +
1102 + while (!list_empty(&nbd->waiting_queue)) {
1103 + req = list_entry(nbd->waiting_queue.next, struct request,
1104 + queuelist);
1105 + list_del_init(&req->queuelist);
1106 + req->errors++;
1107 + nbd_end_request(req);
1108 + }
1109 }
1110
1111
1112 @@ -594,6 +602,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1113 nbd->file = NULL;
1114 nbd_clear_que(nbd);
1115 BUG_ON(!list_empty(&nbd->queue_head));
1116 + BUG_ON(!list_empty(&nbd->waiting_queue));
1117 if (file)
1118 fput(file);
1119 return 0;
1120 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
1121 index 73f196c..c6decb9 100644
1122 --- a/drivers/block/xen-blkback/blkback.c
1123 +++ b/drivers/block/xen-blkback/blkback.c
1124 @@ -337,7 +337,7 @@ static void xen_blkbk_unmap(struct pending_req *req)
1125 invcount++;
1126 }
1127
1128 - ret = gnttab_unmap_refs(unmap, pages, invcount, false);
1129 + ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
1130 BUG_ON(ret);
1131 }
1132
1133 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1134 index 9217121..29d31ff 100644
1135 --- a/drivers/bluetooth/btusb.c
1136 +++ b/drivers/bluetooth/btusb.c
1137 @@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
1138 /* Generic Bluetooth USB device */
1139 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
1140
1141 + /* Apple-specific (Broadcom) devices */
1142 + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
1143 +
1144 /* Broadcom SoftSailing reporting vendor specific */
1145 { USB_DEVICE(0x0a5c, 0x21e1) },
1146
1147 @@ -102,15 +105,14 @@ static struct usb_device_id btusb_table[] = {
1148
1149 /* Broadcom BCM20702A0 */
1150 { USB_DEVICE(0x0489, 0xe042) },
1151 - { USB_DEVICE(0x0a5c, 0x21e3) },
1152 - { USB_DEVICE(0x0a5c, 0x21e6) },
1153 - { USB_DEVICE(0x0a5c, 0x21e8) },
1154 - { USB_DEVICE(0x0a5c, 0x21f3) },
1155 { USB_DEVICE(0x413c, 0x8197) },
1156
1157 /* Foxconn - Hon Hai */
1158 { USB_DEVICE(0x0489, 0xe033) },
1159
1160 + /*Broadcom devices with vendor specific id */
1161 + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
1162 +
1163 { } /* Terminating entry */
1164 };
1165
1166 diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
1167 index c0e8164..1a40935 100644
1168 --- a/drivers/cpufreq/powernow-k8.c
1169 +++ b/drivers/cpufreq/powernow-k8.c
1170 @@ -35,7 +35,6 @@
1171 #include <linux/slab.h>
1172 #include <linux/string.h>
1173 #include <linux/cpumask.h>
1174 -#include <linux/sched.h> /* for current / set_cpus_allowed() */
1175 #include <linux/io.h>
1176 #include <linux/delay.h>
1177
1178 @@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
1179 return res;
1180 }
1181
1182 -/* Driver entry point to switch to the target frequency */
1183 -static int powernowk8_target(struct cpufreq_policy *pol,
1184 - unsigned targfreq, unsigned relation)
1185 +struct powernowk8_target_arg {
1186 + struct cpufreq_policy *pol;
1187 + unsigned targfreq;
1188 + unsigned relation;
1189 +};
1190 +
1191 +static long powernowk8_target_fn(void *arg)
1192 {
1193 - cpumask_var_t oldmask;
1194 + struct powernowk8_target_arg *pta = arg;
1195 + struct cpufreq_policy *pol = pta->pol;
1196 + unsigned targfreq = pta->targfreq;
1197 + unsigned relation = pta->relation;
1198 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1199 u32 checkfid;
1200 u32 checkvid;
1201 unsigned int newstate;
1202 - int ret = -EIO;
1203 + int ret;
1204
1205 if (!data)
1206 return -EINVAL;
1207 @@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1208 checkfid = data->currfid;
1209 checkvid = data->currvid;
1210
1211 - /* only run on specific CPU from here on. */
1212 - /* This is poor form: use a workqueue or smp_call_function_single */
1213 - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
1214 - return -ENOMEM;
1215 -
1216 - cpumask_copy(oldmask, tsk_cpus_allowed(current));
1217 - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
1218 -
1219 - if (smp_processor_id() != pol->cpu) {
1220 - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
1221 - goto err_out;
1222 - }
1223 -
1224 if (pending_bit_stuck()) {
1225 printk(KERN_ERR PFX "failing targ, change pending bit set\n");
1226 - goto err_out;
1227 + return -EIO;
1228 }
1229
1230 pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1231 pol->cpu, targfreq, pol->min, pol->max, relation);
1232
1233 if (query_current_values_with_pending_wait(data))
1234 - goto err_out;
1235 + return -EIO;
1236
1237 if (cpu_family != CPU_HW_PSTATE) {
1238 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1239 @@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1240
1241 if (cpufreq_frequency_table_target(pol, data->powernow_table,
1242 targfreq, relation, &newstate))
1243 - goto err_out;
1244 + return -EIO;
1245
1246 mutex_lock(&fidvid_mutex);
1247
1248 @@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1249 ret = transition_frequency_fidvid(data, newstate);
1250 if (ret) {
1251 printk(KERN_ERR PFX "transition frequency failed\n");
1252 - ret = 1;
1253 mutex_unlock(&fidvid_mutex);
1254 - goto err_out;
1255 + return 1;
1256 }
1257 mutex_unlock(&fidvid_mutex);
1258
1259 @@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1260 data->powernow_table[newstate].index);
1261 else
1262 pol->cur = find_khz_freq_from_fid(data->currfid);
1263 - ret = 0;
1264
1265 -err_out:
1266 - set_cpus_allowed_ptr(current, oldmask);
1267 - free_cpumask_var(oldmask);
1268 - return ret;
1269 + return 0;
1270 +}
1271 +
1272 +/* Driver entry point to switch to the target frequency */
1273 +static int powernowk8_target(struct cpufreq_policy *pol,
1274 + unsigned targfreq, unsigned relation)
1275 +{
1276 + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
1277 + .relation = relation };
1278 +
1279 + /*
1280 + * Must run on @pol->cpu. cpufreq core is responsible for ensuring
1281 + * that we're bound to the current CPU and pol->cpu stays online.
1282 + */
1283 + if (smp_processor_id() == pol->cpu)
1284 + return powernowk8_target_fn(&pta);
1285 + else
1286 + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
1287 }
1288
1289 /* Driver entry point to verify the policy and range of frequencies */
1290 diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1291 index bf0d7e4..9ec3943 100644
1292 --- a/drivers/dma/at_hdmac.c
1293 +++ b/drivers/dma/at_hdmac.c
1294 @@ -664,7 +664,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1295 flags);
1296
1297 if (unlikely(!atslave || !sg_len)) {
1298 - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
1299 + dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1300 return NULL;
1301 }
1302
1303 @@ -691,6 +691,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1304
1305 mem = sg_dma_address(sg);
1306 len = sg_dma_len(sg);
1307 + if (unlikely(!len)) {
1308 + dev_dbg(chan2dev(chan),
1309 + "prep_slave_sg: sg(%d) data length is zero\n", i);
1310 + goto err;
1311 + }
1312 mem_width = 2;
1313 if (unlikely(mem & 3 || len & 3))
1314 mem_width = 0;
1315 @@ -726,6 +731,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1316
1317 mem = sg_dma_address(sg);
1318 len = sg_dma_len(sg);
1319 + if (unlikely(!len)) {
1320 + dev_dbg(chan2dev(chan),
1321 + "prep_slave_sg: sg(%d) data length is zero\n", i);
1322 + goto err;
1323 + }
1324 mem_width = 2;
1325 if (unlikely(mem & 3 || len & 3))
1326 mem_width = 0;
1327 @@ -759,6 +769,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1328
1329 err_desc_get:
1330 dev_err(chan2dev(chan), "not enough descriptors available\n");
1331 +err:
1332 atc_desc_put(atchan, first);
1333 return NULL;
1334 }
1335 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1336 index 8c44f17..758122f 100644
1337 --- a/drivers/dma/pl330.c
1338 +++ b/drivers/dma/pl330.c
1339 @@ -1568,17 +1568,19 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1340 goto xfer_exit;
1341 }
1342
1343 - /* Prefer Secure Channel */
1344 - if (!_manager_ns(thrd))
1345 - r->cfg->nonsecure = 0;
1346 - else
1347 - r->cfg->nonsecure = 1;
1348
1349 /* Use last settings, if not provided */
1350 - if (r->cfg)
1351 + if (r->cfg) {
1352 + /* Prefer Secure Channel */
1353 + if (!_manager_ns(thrd))
1354 + r->cfg->nonsecure = 0;
1355 + else
1356 + r->cfg->nonsecure = 1;
1357 +
1358 ccr = _prepare_ccr(r->cfg);
1359 - else
1360 + } else {
1361 ccr = readl(regs + CC(thrd->id));
1362 + }
1363
1364 /* If this req doesn't have valid xfer settings */
1365 if (!_is_valid(ccr)) {
1366 @@ -2935,6 +2937,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
1367 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
1368
1369 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
1370 + if (!pdmac->peripherals) {
1371 + ret = -ENOMEM;
1372 + dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
1373 + goto probe_err5;
1374 + }
1375
1376 for (i = 0; i < num_chan; i++) {
1377 pch = &pdmac->peripherals[i];
1378 diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
1379 index 61c2d08..e42e4b8 100644
1380 --- a/drivers/gpio/gpio-lpc32xx.c
1381 +++ b/drivers/gpio/gpio-lpc32xx.c
1382 @@ -304,6 +304,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
1383 {
1384 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
1385
1386 + __set_gpio_level_p012(group, pin, value);
1387 __set_gpio_dir_p012(group, pin, 0);
1388
1389 return 0;
1390 @@ -314,6 +315,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
1391 {
1392 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
1393
1394 + __set_gpio_level_p3(group, pin, value);
1395 __set_gpio_dir_p3(group, pin, 0);
1396
1397 return 0;
1398 @@ -322,6 +324,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
1399 static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
1400 int value)
1401 {
1402 + struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
1403 +
1404 + __set_gpo_level_p3(group, pin, value);
1405 return 0;
1406 }
1407
1408 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1409 index c79870a..7e479a4 100644
1410 --- a/drivers/gpu/drm/drm_crtc.c
1411 +++ b/drivers/gpu/drm/drm_crtc.c
1412 @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1413 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1414 return -EINVAL;
1415
1416 - if (!req->flags)
1417 + if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
1418 return -EINVAL;
1419
1420 mutex_lock(&dev->mode_config.mutex);
1421 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1422 index 90b9793..342ffb7 100644
1423 --- a/drivers/gpu/drm/i915/intel_crt.c
1424 +++ b/drivers/gpu/drm/i915/intel_crt.c
1425 @@ -266,6 +266,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
1426 return ret;
1427 }
1428
1429 +static struct edid *intel_crt_get_edid(struct drm_connector *connector,
1430 + struct i2c_adapter *i2c)
1431 +{
1432 + struct edid *edid;
1433 +
1434 + edid = drm_get_edid(connector, i2c);
1435 +
1436 + if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
1437 + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
1438 + intel_gmbus_force_bit(i2c, true);
1439 + edid = drm_get_edid(connector, i2c);
1440 + intel_gmbus_force_bit(i2c, false);
1441 + }
1442 +
1443 + return edid;
1444 +}
1445 +
1446 +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
1447 +static int intel_crt_ddc_get_modes(struct drm_connector *connector,
1448 + struct i2c_adapter *adapter)
1449 +{
1450 + struct edid *edid;
1451 +
1452 + edid = intel_crt_get_edid(connector, adapter);
1453 + if (!edid)
1454 + return 0;
1455 +
1456 + return intel_connector_update_modes(connector, edid);
1457 +}
1458 +
1459 static bool intel_crt_detect_ddc(struct drm_connector *connector)
1460 {
1461 struct intel_crt *crt = intel_attached_crt(connector);
1462 @@ -279,7 +309,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
1463 struct edid *edid;
1464 bool is_digital = false;
1465
1466 - edid = drm_get_edid(connector,
1467 + edid = intel_crt_get_edid(connector,
1468 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1469 /*
1470 * This may be a DVI-I connector with a shared DDC
1471 @@ -477,13 +507,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
1472 struct drm_i915_private *dev_priv = dev->dev_private;
1473 int ret;
1474
1475 - ret = intel_ddc_get_modes(connector,
1476 + ret = intel_crt_ddc_get_modes(connector,
1477 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1478 if (ret || !IS_G4X(dev))
1479 return ret;
1480
1481 /* Try to probe digital port for output in DVI-I -> VGA mode. */
1482 - return intel_ddc_get_modes(connector,
1483 + return intel_crt_ddc_get_modes(connector,
1484 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
1485 }
1486
1487 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1488 index 3de3d9b..498bcbe 100644
1489 --- a/drivers/gpu/drm/i915/intel_display.c
1490 +++ b/drivers/gpu/drm/i915/intel_display.c
1491 @@ -1099,7 +1099,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1492 enum pipe pipe, int reg)
1493 {
1494 u32 val = I915_READ(reg);
1495 - WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1496 + WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1497 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1498 reg, pipe_name(pipe));
1499 }
1500 @@ -1116,13 +1116,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1501
1502 reg = PCH_ADPA;
1503 val = I915_READ(reg);
1504 - WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1505 + WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1506 "PCH VGA enabled on transcoder %c, should be disabled\n",
1507 pipe_name(pipe));
1508
1509 reg = PCH_LVDS;
1510 val = I915_READ(reg);
1511 - WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1512 + WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1513 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1514 pipe_name(pipe));
1515
1516 @@ -1487,7 +1487,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1517 enum pipe pipe, int reg)
1518 {
1519 u32 val = I915_READ(reg);
1520 - if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1521 + if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1522 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1523 reg, pipe);
1524 I915_WRITE(reg, val & ~PORT_ENABLE);
1525 @@ -1509,12 +1509,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1526
1527 reg = PCH_ADPA;
1528 val = I915_READ(reg);
1529 - if (adpa_pipe_enabled(dev_priv, val, pipe))
1530 + if (adpa_pipe_enabled(dev_priv, pipe, val))
1531 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1532
1533 reg = PCH_LVDS;
1534 val = I915_READ(reg);
1535 - if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1536 + if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1537 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1538 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1539 POSTING_READ(reg);
1540 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1541 index 715afa1..2cae72d 100644
1542 --- a/drivers/gpu/drm/i915/intel_drv.h
1543 +++ b/drivers/gpu/drm/i915/intel_drv.h
1544 @@ -288,6 +288,8 @@ struct intel_fbc_work {
1545 int interval;
1546 };
1547
1548 +int intel_connector_update_modes(struct drm_connector *connector,
1549 + struct edid *edid);
1550 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
1551 extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
1552
1553 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1554 index 2d7f47b..fb44e9d 100644
1555 --- a/drivers/gpu/drm/i915/intel_hdmi.c
1556 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1557 @@ -271,7 +271,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
1558 u32 temp;
1559 u32 enable_bits = SDVO_ENABLE;
1560
1561 - if (intel_hdmi->has_audio)
1562 + if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
1563 enable_bits |= SDVO_AUDIO_ENABLE;
1564
1565 temp = I915_READ(intel_hdmi->sdvox_reg);
1566 diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
1567 index d1928e7..9a2b270 100644
1568 --- a/drivers/gpu/drm/i915/intel_modes.c
1569 +++ b/drivers/gpu/drm/i915/intel_modes.c
1570 @@ -60,6 +60,25 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
1571 }
1572
1573 /**
1574 + * intel_connector_update_modes - update connector from edid
1575 + * @connector: DRM connector device to use
1576 + * @edid: previously read EDID information
1577 + */
1578 +int intel_connector_update_modes(struct drm_connector *connector,
1579 + struct edid *edid)
1580 +{
1581 + int ret;
1582 +
1583 + drm_mode_connector_update_edid_property(connector, edid);
1584 + ret = drm_add_edid_modes(connector, edid);
1585 + drm_edid_to_eld(connector, edid);
1586 + connector->display_info.raw_edid = NULL;
1587 + kfree(edid);
1588 +
1589 + return ret;
1590 +}
1591 +
1592 +/**
1593 * intel_ddc_get_modes - get modelist from monitor
1594 * @connector: DRM connector device to use
1595 * @adapter: i2c adapter
1596 @@ -70,18 +89,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
1597 struct i2c_adapter *adapter)
1598 {
1599 struct edid *edid;
1600 - int ret = 0;
1601
1602 edid = drm_get_edid(connector, adapter);
1603 - if (edid) {
1604 - drm_mode_connector_update_edid_property(connector, edid);
1605 - ret = drm_add_edid_modes(connector, edid);
1606 - drm_edid_to_eld(connector, edid);
1607 - connector->display_info.raw_edid = NULL;
1608 - kfree(edid);
1609 - }
1610 + if (!edid)
1611 + return 0;
1612
1613 - return ret;
1614 + return intel_connector_update_modes(connector, edid);
1615 }
1616
1617 static const struct drm_prop_enum_list force_audio_names[] = {
1618 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1619 index 12a9e5f..302d3d5 100644
1620 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1621 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1622 @@ -258,6 +258,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
1623 I915_WRITE_HEAD(ring, 0);
1624 ring->write_tail(ring, 0);
1625
1626 + /* Initialize the ring. */
1627 + I915_WRITE_START(ring, obj->gtt_offset);
1628 head = I915_READ_HEAD(ring) & HEAD_ADDR;
1629
1630 /* G45 ring initialization fails to reset head to zero */
1631 @@ -283,11 +285,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
1632 }
1633 }
1634
1635 - /* Initialize the ring. This must happen _after_ we've cleared the ring
1636 - * registers with the above sequence (the readback of the HEAD registers
1637 - * also enforces ordering), otherwise the hw might lose the new ring
1638 - * register values. */
1639 - I915_WRITE_START(ring, obj->gtt_offset);
1640 I915_WRITE_CTL(ring,
1641 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
1642 | RING_VALID);
1643 diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
1644 index a85e112..f233b8f 100644
1645 --- a/drivers/gpu/drm/nouveau/nouveau_display.c
1646 +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
1647 @@ -586,7 +586,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1648 args->size = args->pitch * args->height;
1649 args->size = roundup(args->size, PAGE_SIZE);
1650
1651 - ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
1652 + ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
1653 if (ret)
1654 return ret;
1655
1656 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1657 index a53ca30..19f4082 100644
1658 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1659 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1660 @@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
1661 radeon_crtc->enabled = true;
1662 /* adjust pm to dpms changes BEFORE enabling crtcs */
1663 radeon_pm_compute_clocks(rdev);
1664 - /* disable crtc pair power gating before programming */
1665 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
1666 atombios_powergate_crtc(crtc, ATOM_DISABLE);
1667 atombios_enable_crtc(crtc, ATOM_ENABLE);
1668 @@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
1669 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
1670 atombios_enable_crtc(crtc, ATOM_DISABLE);
1671 radeon_crtc->enabled = false;
1672 - /* power gating is per-pair */
1673 - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
1674 - struct drm_crtc *other_crtc;
1675 - struct radeon_crtc *other_radeon_crtc;
1676 - list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
1677 - other_radeon_crtc = to_radeon_crtc(other_crtc);
1678 - if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
1679 - ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
1680 - ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
1681 - ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
1682 - ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
1683 - ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
1684 - /* if both crtcs in the pair are off, enable power gating */
1685 - if (other_radeon_crtc->enabled == false)
1686 - atombios_powergate_crtc(crtc, ATOM_ENABLE);
1687 - break;
1688 - }
1689 - }
1690 - }
1691 + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
1692 + atombios_powergate_crtc(crtc, ATOM_ENABLE);
1693 /* adjust pm to dpms changes AFTER disabling crtcs */
1694 radeon_pm_compute_clocks(rdev);
1695 break;
1696 @@ -444,11 +426,28 @@ union atom_enable_ss {
1697 static void atombios_crtc_program_ss(struct radeon_device *rdev,
1698 int enable,
1699 int pll_id,
1700 + int crtc_id,
1701 struct radeon_atom_ss *ss)
1702 {
1703 + unsigned i;
1704 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
1705 union atom_enable_ss args;
1706
1707 + if (!enable) {
1708 + for (i = 0; i < rdev->num_crtc; i++) {
1709 + if (rdev->mode_info.crtcs[i] &&
1710 + rdev->mode_info.crtcs[i]->enabled &&
1711 + i != crtc_id &&
1712 + pll_id == rdev->mode_info.crtcs[i]->pll_id) {
1713 + /* one other crtc is using this pll don't turn
1714 + * off spread spectrum as it might turn off
1715 + * display on active crtc
1716 + */
1717 + return;
1718 + }
1719 + }
1720 + }
1721 +
1722 memset(&args, 0, sizeof(args));
1723
1724 if (ASIC_IS_DCE5(rdev)) {
1725 @@ -1039,7 +1038,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1726 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
1727 &ref_div, &post_div);
1728
1729 - atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
1730 + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1731
1732 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1733 encoder_mode, radeon_encoder->encoder_id, mode->clock,
1734 @@ -1062,7 +1061,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1735 ss.step = step_size;
1736 }
1737
1738 - atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
1739 + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
1740 }
1741 }
1742
1743 @@ -1571,11 +1570,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
1744 ASIC_INTERNAL_SS_ON_DCPLL,
1745 rdev->clock.default_dispclk);
1746 if (ss_enabled)
1747 - atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
1748 + atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
1749 /* XXX: DCE5, make sure voltage, dispclk is high enough */
1750 atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
1751 if (ss_enabled)
1752 - atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
1753 + atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
1754 }
1755
1756 }
1757 @@ -1664,9 +1663,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1758 struct drm_device *dev = crtc->dev;
1759 struct radeon_device *rdev = dev->dev_private;
1760 struct radeon_atom_ss ss;
1761 + int i;
1762
1763 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1764
1765 + for (i = 0; i < rdev->num_crtc; i++) {
1766 + if (rdev->mode_info.crtcs[i] &&
1767 + rdev->mode_info.crtcs[i]->enabled &&
1768 + i != radeon_crtc->crtc_id &&
1769 + radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
1770 + /* one other crtc is using this pll don't turn
1771 + * off the pll
1772 + */
1773 + goto done;
1774 + }
1775 + }
1776 +
1777 switch (radeon_crtc->pll_id) {
1778 case ATOM_PPLL1:
1779 case ATOM_PPLL2:
1780 @@ -1683,6 +1695,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1781 default:
1782 break;
1783 }
1784 +done:
1785 radeon_crtc->pll_id = -1;
1786 }
1787
1788 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1789 index a3ae788..2d07fbf 100644
1790 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1791 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1792 @@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1793 struct drm_device *dev = encoder->dev;
1794 struct radeon_device *rdev = dev->dev_private;
1795 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1796 + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1797 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1798 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1799 struct radeon_connector *radeon_connector = NULL;
1800 struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
1801 @@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1802
1803 switch (mode) {
1804 case DRM_MODE_DPMS_ON:
1805 - /* some early dce3.2 boards have a bug in their transmitter control table */
1806 - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
1807 - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1808 - if (ASIC_IS_DCE6(rdev)) {
1809 - /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
1810 - * before reenabling encoder on DPMS ON, otherwise we never
1811 - * get picture
1812 - */
1813 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1814 + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1815 + if (!connector)
1816 + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1817 + else
1818 + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1819 +
1820 + /* setup and enable the encoder */
1821 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1822 + atombios_dig_encoder_setup(encoder,
1823 + ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1824 + dig->panel_mode);
1825 + if (ext_encoder) {
1826 + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1827 + atombios_external_encoder_setup(encoder, ext_encoder,
1828 + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1829 }
1830 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1831 - } else {
1832 + } else if (ASIC_IS_DCE4(rdev)) {
1833 + /* setup and enable the encoder */
1834 + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1835 + /* enable the transmitter */
1836 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1837 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1838 + } else {
1839 + /* setup and enable the encoder and transmitter */
1840 + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1841 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1842 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1843 + /* some early dce3.2 boards have a bug in their transmitter control table */
1844 + if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
1845 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1846 }
1847 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1848 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1849 @@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1850 case DRM_MODE_DPMS_STANDBY:
1851 case DRM_MODE_DPMS_SUSPEND:
1852 case DRM_MODE_DPMS_OFF:
1853 - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
1854 + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1855 + /* disable the transmitter */
1856 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1857 - else
1858 + } else if (ASIC_IS_DCE4(rdev)) {
1859 + /* disable the transmitter */
1860 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1861 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1862 + } else {
1863 + /* disable the encoder and transmitter */
1864 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1865 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1866 + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1867 + }
1868 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
1869 if (ASIC_IS_DCE4(rdev))
1870 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
1871 @@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1872 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1873 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1874 struct drm_encoder *test_encoder;
1875 - struct radeon_encoder_atom_dig *dig;
1876 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1877 uint32_t dig_enc_in_use = 0;
1878
1879 - /* DCE4/5 */
1880 - if (ASIC_IS_DCE4(rdev)) {
1881 - dig = radeon_encoder->enc_priv;
1882 - if (ASIC_IS_DCE41(rdev)) {
1883 + if (ASIC_IS_DCE6(rdev)) {
1884 + /* DCE6 */
1885 + switch (radeon_encoder->encoder_id) {
1886 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1887 + if (dig->linkb)
1888 + return 1;
1889 + else
1890 + return 0;
1891 + break;
1892 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1893 + if (dig->linkb)
1894 + return 3;
1895 + else
1896 + return 2;
1897 + break;
1898 + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1899 + if (dig->linkb)
1900 + return 5;
1901 + else
1902 + return 4;
1903 + break;
1904 + }
1905 + } else if (ASIC_IS_DCE4(rdev)) {
1906 + /* DCE4/5 */
1907 + if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
1908 /* ontario follows DCE4 */
1909 if (rdev->family == CHIP_PALM) {
1910 if (dig->linkb)
1911 @@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1912 struct drm_device *dev = encoder->dev;
1913 struct radeon_device *rdev = dev->dev_private;
1914 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1915 - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
1916
1917 radeon_encoder->pixel_clock = adjusted_mode->clock;
1918
1919 + /* need to call this here rather than in prepare() since we need some crtc info */
1920 + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1921 +
1922 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1923 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1924 atombios_yuv_setup(encoder, true);
1925 @@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1926 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1927 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1928 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1929 - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
1930 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1931 - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1932 -
1933 - if (!connector)
1934 - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
1935 - else
1936 - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
1937 -
1938 - /* setup and enable the encoder */
1939 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1940 - atombios_dig_encoder_setup(encoder,
1941 - ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
1942 - dig->panel_mode);
1943 - } else if (ASIC_IS_DCE4(rdev)) {
1944 - /* disable the transmitter */
1945 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1946 - /* setup and enable the encoder */
1947 - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
1948 -
1949 - /* enable the transmitter */
1950 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1951 - } else {
1952 - /* disable the encoder and transmitter */
1953 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1954 - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
1955 -
1956 - /* setup and enable the encoder and transmitter */
1957 - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1958 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1959 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1960 - }
1961 + /* handled in dpms */
1962 break;
1963 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1964 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1965 @@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1966 break;
1967 }
1968
1969 - if (ext_encoder) {
1970 - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1971 - atombios_external_encoder_setup(encoder, ext_encoder,
1972 - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1973 - else
1974 - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1975 - }
1976 -
1977 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1978
1979 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1980 @@ -2102,7 +2115,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1981 }
1982
1983 radeon_atom_output_lock(encoder, true);
1984 - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1985
1986 if (connector) {
1987 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1988 @@ -2123,6 +2135,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1989
1990 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1991 {
1992 + /* need to call this here as we need the crtc set up */
1993 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
1994 radeon_atom_output_lock(encoder, false);
1995 }
1996 @@ -2163,14 +2176,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1997 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1998 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1999 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
2000 - if (ASIC_IS_DCE4(rdev))
2001 - /* disable the transmitter */
2002 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2003 - else {
2004 - /* disable the encoder and transmitter */
2005 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
2006 - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
2007 - }
2008 + /* handled in dpms */
2009 break;
2010 case ENCODER_OBJECT_ID_INTERNAL_DDI:
2011 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
2012 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2013 index 138b952..66150f0 100644
2014 --- a/drivers/gpu/drm/radeon/radeon.h
2015 +++ b/drivers/gpu/drm/radeon/radeon.h
2016 @@ -138,21 +138,6 @@ struct radeon_device;
2017 /*
2018 * BIOS.
2019 */
2020 -#define ATRM_BIOS_PAGE 4096
2021 -
2022 -#if defined(CONFIG_VGA_SWITCHEROO)
2023 -bool radeon_atrm_supported(struct pci_dev *pdev);
2024 -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
2025 -#else
2026 -static inline bool radeon_atrm_supported(struct pci_dev *pdev)
2027 -{
2028 - return false;
2029 -}
2030 -
2031 -static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
2032 - return -EINVAL;
2033 -}
2034 -#endif
2035 bool radeon_get_bios(struct radeon_device *rdev);
2036
2037
2038 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2039 index b1e3820..5e30e12 100644
2040 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2041 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2042 @@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2043 }
2044
2045 /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
2046 - if ((dev->pdev->device == 0x9802) &&
2047 + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
2048 (dev->pdev->subsystem_vendor == 0x1734) &&
2049 (dev->pdev->subsystem_device == 0x11bd)) {
2050 if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
2051 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2052 index 98724fc..2a2cf0b 100644
2053 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2054 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2055 @@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
2056 /* handle for device - and atpx */
2057 acpi_handle dhandle;
2058 acpi_handle atpx_handle;
2059 - acpi_handle atrm_handle;
2060 } radeon_atpx_priv;
2061
2062 -/* retrieve the ROM in 4k blocks */
2063 -static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
2064 - int offset, int len)
2065 -{
2066 - acpi_status status;
2067 - union acpi_object atrm_arg_elements[2], *obj;
2068 - struct acpi_object_list atrm_arg;
2069 - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
2070 -
2071 - atrm_arg.count = 2;
2072 - atrm_arg.pointer = &atrm_arg_elements[0];
2073 -
2074 - atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
2075 - atrm_arg_elements[0].integer.value = offset;
2076 -
2077 - atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
2078 - atrm_arg_elements[1].integer.value = len;
2079 -
2080 - status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
2081 - if (ACPI_FAILURE(status)) {
2082 - printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
2083 - return -ENODEV;
2084 - }
2085 -
2086 - obj = (union acpi_object *)buffer.pointer;
2087 - memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
2088 - len = obj->buffer.length;
2089 - kfree(buffer.pointer);
2090 - return len;
2091 -}
2092 -
2093 -bool radeon_atrm_supported(struct pci_dev *pdev)
2094 -{
2095 - /* get the discrete ROM only via ATRM */
2096 - if (!radeon_atpx_priv.atpx_detected)
2097 - return false;
2098 -
2099 - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
2100 - return false;
2101 - return true;
2102 -}
2103 -
2104 -
2105 -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
2106 -{
2107 - return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
2108 -}
2109 -
2110 static int radeon_atpx_get_version(acpi_handle handle)
2111 {
2112 acpi_status status;
2113 @@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
2114
2115 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
2116 {
2117 - acpi_handle dhandle, atpx_handle, atrm_handle;
2118 + acpi_handle dhandle, atpx_handle;
2119 acpi_status status;
2120
2121 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
2122 @@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
2123 if (ACPI_FAILURE(status))
2124 return false;
2125
2126 - status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
2127 - if (ACPI_FAILURE(status))
2128 - return false;
2129 -
2130 radeon_atpx_priv.dhandle = dhandle;
2131 radeon_atpx_priv.atpx_handle = atpx_handle;
2132 - radeon_atpx_priv.atrm_handle = atrm_handle;
2133 return true;
2134 }
2135
2136 diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
2137 index 501f488..d306cc8 100644
2138 --- a/drivers/gpu/drm/radeon/radeon_bios.c
2139 +++ b/drivers/gpu/drm/radeon/radeon_bios.c
2140 @@ -32,6 +32,7 @@
2141
2142 #include <linux/vga_switcheroo.h>
2143 #include <linux/slab.h>
2144 +#include <linux/acpi.h>
2145 /*
2146 * BIOS.
2147 */
2148 @@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
2149 return true;
2150 }
2151
2152 +#ifdef CONFIG_ACPI
2153 /* ATRM is used to get the BIOS on the discrete cards in
2154 * dual-gpu systems.
2155 */
2156 +/* retrieve the ROM in 4k blocks */
2157 +#define ATRM_BIOS_PAGE 4096
2158 +/**
2159 + * radeon_atrm_call - fetch a chunk of the vbios
2160 + *
2161 + * @atrm_handle: acpi ATRM handle
2162 + * @bios: vbios image pointer
2163 + * @offset: offset of vbios image data to fetch
2164 + * @len: length of vbios image data to fetch
2165 + *
2166 + * Executes ATRM to fetch a chunk of the discrete
2167 + * vbios image on PX systems (all asics).
2168 + * Returns the length of the buffer fetched.
2169 + */
2170 +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
2171 + int offset, int len)
2172 +{
2173 + acpi_status status;
2174 + union acpi_object atrm_arg_elements[2], *obj;
2175 + struct acpi_object_list atrm_arg;
2176 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
2177 +
2178 + atrm_arg.count = 2;
2179 + atrm_arg.pointer = &atrm_arg_elements[0];
2180 +
2181 + atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
2182 + atrm_arg_elements[0].integer.value = offset;
2183 +
2184 + atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
2185 + atrm_arg_elements[1].integer.value = len;
2186 +
2187 + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
2188 + if (ACPI_FAILURE(status)) {
2189 + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
2190 + return -ENODEV;
2191 + }
2192 +
2193 + obj = (union acpi_object *)buffer.pointer;
2194 + memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
2195 + len = obj->buffer.length;
2196 + kfree(buffer.pointer);
2197 + return len;
2198 +}
2199 +
2200 static bool radeon_atrm_get_bios(struct radeon_device *rdev)
2201 {
2202 int ret;
2203 int size = 256 * 1024;
2204 int i;
2205 + struct pci_dev *pdev = NULL;
2206 + acpi_handle dhandle, atrm_handle;
2207 + acpi_status status;
2208 + bool found = false;
2209 +
2210 + /* ATRM is for the discrete card only */
2211 + if (rdev->flags & RADEON_IS_IGP)
2212 + return false;
2213 +
2214 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
2215 + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
2216 + if (!dhandle)
2217 + continue;
2218 +
2219 + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
2220 + if (!ACPI_FAILURE(status)) {
2221 + found = true;
2222 + break;
2223 + }
2224 + }
2225
2226 - if (!radeon_atrm_supported(rdev->pdev))
2227 + if (!found)
2228 return false;
2229
2230 rdev->bios = kmalloc(size, GFP_KERNEL);
2231 @@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
2232 }
2233
2234 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
2235 - ret = radeon_atrm_get_bios_chunk(rdev->bios,
2236 - (i * ATRM_BIOS_PAGE),
2237 - ATRM_BIOS_PAGE);
2238 + ret = radeon_atrm_call(atrm_handle,
2239 + rdev->bios,
2240 + (i * ATRM_BIOS_PAGE),
2241 + ATRM_BIOS_PAGE);
2242 if (ret < ATRM_BIOS_PAGE)
2243 break;
2244 }
2245 @@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
2246 }
2247 return true;
2248 }
2249 +#else
2250 +static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
2251 +{
2252 + return false;
2253 +}
2254 +#endif
2255
2256 static bool ni_read_disabled_bios(struct radeon_device *rdev)
2257 {
2258 @@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
2259 return legacy_read_disabled_bios(rdev);
2260 }
2261
2262 +#ifdef CONFIG_ACPI
2263 +static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
2264 +{
2265 + bool ret = false;
2266 + struct acpi_table_header *hdr;
2267 + acpi_size tbl_size;
2268 + UEFI_ACPI_VFCT *vfct;
2269 + GOP_VBIOS_CONTENT *vbios;
2270 + VFCT_IMAGE_HEADER *vhdr;
2271 +
2272 + if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
2273 + return false;
2274 + if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
2275 + DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
2276 + goto out_unmap;
2277 + }
2278 +
2279 + vfct = (UEFI_ACPI_VFCT *)hdr;
2280 + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
2281 + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
2282 + goto out_unmap;
2283 + }
2284 +
2285 + vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
2286 + vhdr = &vbios->VbiosHeader;
2287 + DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
2288 + vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
2289 + vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
2290 +
2291 + if (vhdr->PCIBus != rdev->pdev->bus->number ||
2292 + vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
2293 + vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
2294 + vhdr->VendorID != rdev->pdev->vendor ||
2295 + vhdr->DeviceID != rdev->pdev->device) {
2296 + DRM_INFO("ACPI VFCT table is not for this card\n");
2297 + goto out_unmap;
2298 + };
2299 +
2300 + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
2301 + DRM_ERROR("ACPI VFCT image truncated\n");
2302 + goto out_unmap;
2303 + }
2304 +
2305 + rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
2306 + ret = !!rdev->bios;
2307 +
2308 +out_unmap:
2309 + return ret;
2310 +}
2311 +#else
2312 +static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
2313 +{
2314 + return false;
2315 +}
2316 +#endif
2317
2318 bool radeon_get_bios(struct radeon_device *rdev)
2319 {
2320 @@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
2321
2322 r = radeon_atrm_get_bios(rdev);
2323 if (r == false)
2324 + r = radeon_acpi_vfct_bios(rdev);
2325 + if (r == false)
2326 r = igp_read_bios_from_vram(rdev);
2327 if (r == false)
2328 r = radeon_read_bios(rdev);
2329 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2330 index 5992502..de5e0b5 100644
2331 --- a/drivers/gpu/drm/radeon/radeon_device.c
2332 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2333 @@ -772,7 +772,7 @@ int radeon_device_init(struct radeon_device *rdev,
2334 if (rdev->flags & RADEON_IS_AGP)
2335 rdev->need_dma32 = true;
2336 if ((rdev->flags & RADEON_IS_PCI) &&
2337 - (rdev->family < CHIP_RS400))
2338 + (rdev->family <= CHIP_RS740))
2339 rdev->need_dma32 = true;
2340
2341 dma_bits = rdev->need_dma32 ? 32 : 40;
2342 diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
2343 index ba055e9..8d9dc44 100644
2344 --- a/drivers/gpu/drm/udl/udl_connector.c
2345 +++ b/drivers/gpu/drm/udl/udl_connector.c
2346 @@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector)
2347 static int udl_mode_valid(struct drm_connector *connector,
2348 struct drm_display_mode *mode)
2349 {
2350 + struct udl_device *udl = connector->dev->dev_private;
2351 + if (!udl->sku_pixel_limit)
2352 + return 0;
2353 +
2354 + if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
2355 + return MODE_VIRTUAL_Y;
2356 +
2357 return 0;
2358 }
2359
2360 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2361 index 7279b3e..3a4b15a 100644
2362 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2363 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2364 @@ -1159,6 +1159,11 @@ static struct drm_driver driver = {
2365 .open = vmw_driver_open,
2366 .preclose = vmw_preclose,
2367 .postclose = vmw_postclose,
2368 +
2369 + .dumb_create = vmw_dumb_create,
2370 + .dumb_map_offset = vmw_dumb_map_offset,
2371 + .dumb_destroy = vmw_dumb_destroy,
2372 +
2373 .fops = &vmwgfx_driver_fops,
2374 .name = VMWGFX_DRIVER_NAME,
2375 .desc = VMWGFX_DRIVER_DESC,
2376 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2377 index d0f2c07..29c984f 100644
2378 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2379 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2380 @@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
2381 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2382 struct drm_file *file_priv);
2383
2384 +int vmw_dumb_create(struct drm_file *file_priv,
2385 + struct drm_device *dev,
2386 + struct drm_mode_create_dumb *args);
2387 +
2388 +int vmw_dumb_map_offset(struct drm_file *file_priv,
2389 + struct drm_device *dev, uint32_t handle,
2390 + uint64_t *offset);
2391 +int vmw_dumb_destroy(struct drm_file *file_priv,
2392 + struct drm_device *dev,
2393 + uint32_t handle);
2394 /**
2395 * Overlay control - vmwgfx_overlay.c
2396 */
2397 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2398 index f2fb8f1..7e07433 100644
2399 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2400 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
2401 @@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
2402 }
2403
2404
2405 - event = kzalloc(sizeof(event->event), GFP_KERNEL);
2406 + event = kzalloc(sizeof(*event), GFP_KERNEL);
2407 if (unlikely(event == NULL)) {
2408 DRM_ERROR("Failed to allocate an event.\n");
2409 ret = -ENOMEM;
2410 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
2411 index a37abb5..059b32c 100644
2412 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
2413 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
2414 @@ -1917,3 +1917,76 @@ err_ref:
2415 vmw_resource_unreference(&res);
2416 return ret;
2417 }
2418 +
2419 +
2420 +int vmw_dumb_create(struct drm_file *file_priv,
2421 + struct drm_device *dev,
2422 + struct drm_mode_create_dumb *args)
2423 +{
2424 + struct vmw_private *dev_priv = vmw_priv(dev);
2425 + struct vmw_master *vmaster = vmw_master(file_priv->master);
2426 + struct vmw_user_dma_buffer *vmw_user_bo;
2427 + struct ttm_buffer_object *tmp;
2428 + int ret;
2429 +
2430 + args->pitch = args->width * ((args->bpp + 7) / 8);
2431 + args->size = args->pitch * args->height;
2432 +
2433 + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
2434 + if (vmw_user_bo == NULL)
2435 + return -ENOMEM;
2436 +
2437 + ret = ttm_read_lock(&vmaster->lock, true);
2438 + if (ret != 0) {
2439 + kfree(vmw_user_bo);
2440 + return ret;
2441 + }
2442 +
2443 + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
2444 + &vmw_vram_sys_placement, true,
2445 + &vmw_user_dmabuf_destroy);
2446 + if (ret != 0)
2447 + goto out_no_dmabuf;
2448 +
2449 + tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
2450 + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
2451 + &vmw_user_bo->base,
2452 + false,
2453 + ttm_buffer_type,
2454 + &vmw_user_dmabuf_release, NULL);
2455 + if (unlikely(ret != 0))
2456 + goto out_no_base_object;
2457 +
2458 + args->handle = vmw_user_bo->base.hash.key;
2459 +
2460 +out_no_base_object:
2461 + ttm_bo_unref(&tmp);
2462 +out_no_dmabuf:
2463 + ttm_read_unlock(&vmaster->lock);
2464 + return ret;
2465 +}
2466 +
2467 +int vmw_dumb_map_offset(struct drm_file *file_priv,
2468 + struct drm_device *dev, uint32_t handle,
2469 + uint64_t *offset)
2470 +{
2471 + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
2472 + struct vmw_dma_buffer *out_buf;
2473 + int ret;
2474 +
2475 + ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
2476 + if (ret != 0)
2477 + return -EINVAL;
2478 +
2479 + *offset = out_buf->base.addr_space_offset;
2480 + vmw_dmabuf_unreference(&out_buf);
2481 + return 0;
2482 +}
2483 +
2484 +int vmw_dumb_destroy(struct drm_file *file_priv,
2485 + struct drm_device *dev,
2486 + uint32_t handle)
2487 +{
2488 + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
2489 + handle, TTM_REF_USAGE);
2490 +}
2491 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
2492 index d44ea58..88d2010 100644
2493 --- a/drivers/hid/hid-logitech-dj.c
2494 +++ b/drivers/hid/hid-logitech-dj.c
2495 @@ -185,6 +185,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
2496 static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
2497 size_t count,
2498 unsigned char report_type);
2499 +static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
2500
2501 static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
2502 struct dj_report *dj_report)
2503 @@ -225,6 +226,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
2504 if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
2505 SPFUNCTION_DEVICE_LIST_EMPTY) {
2506 dbg_hid("%s: device list is empty\n", __func__);
2507 + djrcv_dev->querying_devices = false;
2508 return;
2509 }
2510
2511 @@ -235,6 +237,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
2512 return;
2513 }
2514
2515 + if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
2516 + /* The device is already known. No need to reallocate it. */
2517 + dbg_hid("%s: device is already known\n", __func__);
2518 + return;
2519 + }
2520 +
2521 dj_hiddev = hid_allocate_device();
2522 if (IS_ERR(dj_hiddev)) {
2523 dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
2524 @@ -298,6 +306,7 @@ static void delayedwork_callback(struct work_struct *work)
2525 struct dj_report dj_report;
2526 unsigned long flags;
2527 int count;
2528 + int retval;
2529
2530 dbg_hid("%s\n", __func__);
2531
2532 @@ -330,6 +339,25 @@ static void delayedwork_callback(struct work_struct *work)
2533 logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
2534 break;
2535 default:
2536 + /* A normal report (i. e. not belonging to a pair/unpair notification)
2537 + * arriving here, means that the report arrived but we did not have a
2538 + * paired dj_device associated to the report's device_index, this
2539 + * means that the original "device paired" notification corresponding
2540 + * to this dj_device never arrived to this driver. The reason is that
2541 + * hid-core discards all packets coming from a device while probe() is
2542 + * executing. */
2543 + if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
2544 + /* ok, we don't know the device, just re-ask the
2545 + * receiver for the list of connected devices. */
2546 + retval = logi_dj_recv_query_paired_devices(djrcv_dev);
2547 + if (!retval) {
2548 + /* everything went fine, so just leave */
2549 + break;
2550 + }
2551 + dev_err(&djrcv_dev->hdev->dev,
2552 + "%s:logi_dj_recv_query_paired_devices "
2553 + "error:%d\n", __func__, retval);
2554 + }
2555 dbg_hid("%s: unexpected report type\n", __func__);
2556 }
2557 }
2558 @@ -360,6 +388,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
2559 if (!djdev) {
2560 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
2561 " is NULL, index %d\n", dj_report->device_index);
2562 + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
2563 +
2564 + if (schedule_work(&djrcv_dev->work) == 0) {
2565 + dbg_hid("%s: did not schedule the work item, was already "
2566 + "queued\n", __func__);
2567 + }
2568 return;
2569 }
2570
2571 @@ -390,6 +424,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
2572 if (dj_device == NULL) {
2573 dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
2574 " is NULL, index %d\n", dj_report->device_index);
2575 + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
2576 +
2577 + if (schedule_work(&djrcv_dev->work) == 0) {
2578 + dbg_hid("%s: did not schedule the work item, was already "
2579 + "queued\n", __func__);
2580 + }
2581 return;
2582 }
2583
2584 @@ -437,6 +477,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
2585 return logi_dj_recv_send_report(djrcv_dev, &dj_report);
2586 }
2587
2588 +
2589 static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
2590 unsigned timeout)
2591 {
2592 diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
2593 index fd28a5e..4a40003 100644
2594 --- a/drivers/hid/hid-logitech-dj.h
2595 +++ b/drivers/hid/hid-logitech-dj.h
2596 @@ -101,6 +101,7 @@ struct dj_receiver_dev {
2597 struct work_struct work;
2598 struct kfifo notif_fifo;
2599 spinlock_t lock;
2600 + bool querying_devices;
2601 };
2602
2603 struct dj_device {
2604 diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
2605 index f85ce70..9815f9c 100644
2606 --- a/drivers/hwmon/ad7314.c
2607 +++ b/drivers/hwmon/ad7314.c
2608 @@ -94,10 +94,18 @@ static ssize_t ad7314_show_temperature(struct device *dev,
2609 }
2610 }
2611
2612 +static ssize_t ad7314_show_name(struct device *dev,
2613 + struct device_attribute *devattr, char *buf)
2614 +{
2615 + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
2616 +}
2617 +
2618 +static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
2619 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
2620 ad7314_show_temperature, NULL, 0);
2621
2622 static struct attribute *ad7314_attributes[] = {
2623 + &dev_attr_name.attr,
2624 &sensor_dev_attr_temp1_input.dev_attr.attr,
2625 NULL,
2626 };
2627 diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
2628 index e65c6e4..7bf4ce3 100644
2629 --- a/drivers/hwmon/ads7871.c
2630 +++ b/drivers/hwmon/ads7871.c
2631 @@ -139,6 +139,12 @@ static ssize_t show_voltage(struct device *dev,
2632 }
2633 }
2634
2635 +static ssize_t ads7871_show_name(struct device *dev,
2636 + struct device_attribute *devattr, char *buf)
2637 +{
2638 + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
2639 +}
2640 +
2641 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
2642 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
2643 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
2644 @@ -148,6 +154,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
2645 static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
2646 static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
2647
2648 +static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
2649 +
2650 static struct attribute *ads7871_attributes[] = {
2651 &sensor_dev_attr_in0_input.dev_attr.attr,
2652 &sensor_dev_attr_in1_input.dev_attr.attr,
2653 @@ -157,6 +165,7 @@ static struct attribute *ads7871_attributes[] = {
2654 &sensor_dev_attr_in5_input.dev_attr.attr,
2655 &sensor_dev_attr_in6_input.dev_attr.attr,
2656 &sensor_dev_attr_in7_input.dev_attr.attr,
2657 + &dev_attr_name.attr,
2658 NULL
2659 };
2660
2661 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
2662 index e8e18ca..ac2d6cb 100644
2663 --- a/drivers/hwmon/fam15h_power.c
2664 +++ b/drivers/hwmon/fam15h_power.c
2665 @@ -128,12 +128,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
2666 * counter saturations resulting in bogus power readings.
2667 * We correct this value ourselves to cope with older BIOSes.
2668 */
2669 -static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
2670 +static const struct pci_device_id affected_device[] = {
2671 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
2672 { 0 }
2673 };
2674
2675 -static void __devinit tweak_runavg_range(struct pci_dev *pdev)
2676 +static void tweak_runavg_range(struct pci_dev *pdev)
2677 {
2678 u32 val;
2679
2680 @@ -157,6 +157,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev)
2681 REG_TDP_RUNNING_AVERAGE, val);
2682 }
2683
2684 +#ifdef CONFIG_PM
2685 +static int fam15h_power_resume(struct pci_dev *pdev)
2686 +{
2687 + tweak_runavg_range(pdev);
2688 + return 0;
2689 +}
2690 +#else
2691 +#define fam15h_power_resume NULL
2692 +#endif
2693 +
2694 static void __devinit fam15h_power_init_data(struct pci_dev *f4,
2695 struct fam15h_power_data *data)
2696 {
2697 @@ -255,6 +265,7 @@ static struct pci_driver fam15h_power_driver = {
2698 .id_table = fam15h_power_id_table,
2699 .probe = fam15h_power_probe,
2700 .remove = __devexit_p(fam15h_power_remove),
2701 + .resume = fam15h_power_resume,
2702 };
2703
2704 static int __init fam15h_power_init(void)
2705 diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
2706 index 0018c7d..1a174f0 100644
2707 --- a/drivers/hwmon/twl4030-madc-hwmon.c
2708 +++ b/drivers/hwmon/twl4030-madc-hwmon.c
2709 @@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
2710 struct device_attribute *devattr, char *buf)
2711 {
2712 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
2713 - struct twl4030_madc_request req;
2714 + struct twl4030_madc_request req = {
2715 + .channels = 1 << attr->index,
2716 + .method = TWL4030_MADC_SW2,
2717 + .type = TWL4030_MADC_WAIT,
2718 + };
2719 long val;
2720
2721 - req.channels = (1 << attr->index);
2722 - req.method = TWL4030_MADC_SW2;
2723 - req.func_cb = NULL;
2724 val = twl4030_madc_conversion(&req);
2725 if (val < 0)
2726 return val;
2727 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2728 index 6918773..d6cc77a 100644
2729 --- a/drivers/input/serio/i8042-x86ia64io.h
2730 +++ b/drivers/input/serio/i8042-x86ia64io.h
2731 @@ -335,6 +335,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
2732 },
2733 {
2734 .matches = {
2735 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
2736 + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
2737 + },
2738 + },
2739 + {
2740 + .matches = {
2741 DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
2742 DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
2743 },
2744 diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
2745 index 6777ca0..73ca321 100644
2746 --- a/drivers/iommu/intr_remapping.c
2747 +++ b/drivers/iommu/intr_remapping.c
2748 @@ -752,6 +752,7 @@ int __init parse_ioapics_under_ir(void)
2749 {
2750 struct dmar_drhd_unit *drhd;
2751 int ir_supported = 0;
2752 + int ioapic_idx;
2753
2754 for_each_drhd_unit(drhd) {
2755 struct intel_iommu *iommu = drhd->iommu;
2756 @@ -764,13 +765,20 @@ int __init parse_ioapics_under_ir(void)
2757 }
2758 }
2759
2760 - if (ir_supported && ir_ioapic_num != nr_ioapics) {
2761 - printk(KERN_WARNING
2762 - "Not all IO-APIC's listed under remapping hardware\n");
2763 - return -1;
2764 + if (!ir_supported)
2765 + return 0;
2766 +
2767 + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
2768 + int ioapic_id = mpc_ioapic_id(ioapic_idx);
2769 + if (!map_ioapic_to_ir(ioapic_id)) {
2770 + pr_err(FW_BUG "ioapic %d has no mapping iommu, "
2771 + "interrupt remapping will be disabled\n",
2772 + ioapic_id);
2773 + return -1;
2774 + }
2775 }
2776
2777 - return ir_supported;
2778 + return 1;
2779 }
2780
2781 int __init ir_dev_scope_init(void)
2782 diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
2783 index 5405ec6..baf2686 100644
2784 --- a/drivers/isdn/isdnloop/isdnloop.c
2785 +++ b/drivers/isdn/isdnloop/isdnloop.c
2786 @@ -16,7 +16,6 @@
2787 #include <linux/sched.h>
2788 #include "isdnloop.h"
2789
2790 -static char *revision = "$Revision: 1.11.6.7 $";
2791 static char *isdnloop_id = "loop0";
2792
2793 MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
2794 @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
2795 static int __init
2796 isdnloop_init(void)
2797 {
2798 - char *p;
2799 - char rev[10];
2800 -
2801 - if ((p = strchr(revision, ':'))) {
2802 - strcpy(rev, p + 1);
2803 - p = strchr(rev, '$');
2804 - *p = 0;
2805 - } else
2806 - strcpy(rev, " ??? ");
2807 - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
2808 -
2809 if (isdnloop_id)
2810 return (isdnloop_addcard(isdnloop_id));
2811
2812 diff --git a/drivers/md/md.c b/drivers/md/md.c
2813 index 9ee8ce3..529ce89 100644
2814 --- a/drivers/md/md.c
2815 +++ b/drivers/md/md.c
2816 @@ -1143,8 +1143,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
2817 ret = 0;
2818 }
2819 rdev->sectors = rdev->sb_start;
2820 - /* Limit to 4TB as metadata cannot record more than that */
2821 - if (rdev->sectors >= (2ULL << 32))
2822 + /* Limit to 4TB as metadata cannot record more than that.
2823 + * (not needed for Linear and RAID0 as metadata doesn't
2824 + * record this size)
2825 + */
2826 + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
2827 rdev->sectors = (2ULL << 32) - 2;
2828
2829 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
2830 @@ -1426,7 +1429,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2831 /* Limit to 4TB as metadata cannot record more than that.
2832 * 4TB == 2^32 KB, or 2*2^32 sectors.
2833 */
2834 - if (num_sectors >= (2ULL << 32))
2835 + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
2836 num_sectors = (2ULL << 32) - 2;
2837 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2838 rdev->sb_page);
2839 @@ -7417,6 +7420,8 @@ static int remove_and_add_spares(struct mddev *mddev)
2840 }
2841 }
2842 }
2843 + if (removed)
2844 + set_bit(MD_CHANGE_DEVS, &mddev->flags);
2845 return spares;
2846 }
2847
2848 @@ -7430,9 +7435,11 @@ static void reap_sync_thread(struct mddev *mddev)
2849 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2850 /* success...*/
2851 /* activate any spares */
2852 - if (mddev->pers->spare_active(mddev))
2853 + if (mddev->pers->spare_active(mddev)) {
2854 sysfs_notify(&mddev->kobj, NULL,
2855 "degraded");
2856 + set_bit(MD_CHANGE_DEVS, &mddev->flags);
2857 + }
2858 }
2859 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2860 mddev->pers->finish_reshape)
2861 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2862 index a954c95..1f7e8cd 100644
2863 --- a/drivers/md/raid10.c
2864 +++ b/drivers/md/raid10.c
2865 @@ -612,20 +612,24 @@ static int raid10_mergeable_bvec(struct request_queue *q,
2866 max = biovec->bv_len;
2867
2868 if (mddev->merge_check_needed) {
2869 - struct r10bio r10_bio;
2870 + struct {
2871 + struct r10bio r10_bio;
2872 + struct r10dev devs[conf->copies];
2873 + } on_stack;
2874 + struct r10bio *r10_bio = &on_stack.r10_bio;
2875 int s;
2876 - r10_bio.sector = sector;
2877 - raid10_find_phys(conf, &r10_bio);
2878 + r10_bio->sector = sector;
2879 + raid10_find_phys(conf, r10_bio);
2880 rcu_read_lock();
2881 for (s = 0; s < conf->copies; s++) {
2882 - int disk = r10_bio.devs[s].devnum;
2883 + int disk = r10_bio->devs[s].devnum;
2884 struct md_rdev *rdev = rcu_dereference(
2885 conf->mirrors[disk].rdev);
2886 if (rdev && !test_bit(Faulty, &rdev->flags)) {
2887 struct request_queue *q =
2888 bdev_get_queue(rdev->bdev);
2889 if (q->merge_bvec_fn) {
2890 - bvm->bi_sector = r10_bio.devs[s].addr
2891 + bvm->bi_sector = r10_bio->devs[s].addr
2892 + rdev->data_offset;
2893 bvm->bi_bdev = rdev->bdev;
2894 max = min(max, q->merge_bvec_fn(
2895 @@ -637,7 +641,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
2896 struct request_queue *q =
2897 bdev_get_queue(rdev->bdev);
2898 if (q->merge_bvec_fn) {
2899 - bvm->bi_sector = r10_bio.devs[s].addr
2900 + bvm->bi_sector = r10_bio->devs[s].addr
2901 + rdev->data_offset;
2902 bvm->bi_bdev = rdev->bdev;
2903 max = min(max, q->merge_bvec_fn(
2904 diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
2905 index 7c615613..24d45b8 100644
2906 --- a/drivers/md/raid10.h
2907 +++ b/drivers/md/raid10.h
2908 @@ -104,7 +104,7 @@ struct r10bio {
2909 * We choose the number when they are allocated.
2910 * We sometimes need an extra bio to write to the replacement.
2911 */
2912 - struct {
2913 + struct r10dev {
2914 struct bio *bio;
2915 union {
2916 struct bio *repl_bio; /* used for resync and
2917 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2918 index 73a5800..0240576 100644
2919 --- a/drivers/md/raid5.c
2920 +++ b/drivers/md/raid5.c
2921 @@ -380,6 +380,8 @@ static int calc_degraded(struct r5conf *conf)
2922 degraded = 0;
2923 for (i = 0; i < conf->previous_raid_disks; i++) {
2924 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
2925 + if (rdev && test_bit(Faulty, &rdev->flags))
2926 + rdev = rcu_dereference(conf->disks[i].replacement);
2927 if (!rdev || test_bit(Faulty, &rdev->flags))
2928 degraded++;
2929 else if (test_bit(In_sync, &rdev->flags))
2930 @@ -404,6 +406,8 @@ static int calc_degraded(struct r5conf *conf)
2931 degraded2 = 0;
2932 for (i = 0; i < conf->raid_disks; i++) {
2933 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
2934 + if (rdev && test_bit(Faulty, &rdev->flags))
2935 + rdev = rcu_dereference(conf->disks[i].replacement);
2936 if (!rdev || test_bit(Faulty, &rdev->flags))
2937 degraded2++;
2938 else if (test_bit(In_sync, &rdev->flags))
2939 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2940 index 6e16b09..cabc19c 100644
2941 --- a/drivers/media/rc/rc-main.c
2942 +++ b/drivers/media/rc/rc-main.c
2943 @@ -775,10 +775,11 @@ static ssize_t show_protocols(struct device *device,
2944 if (dev->driver_type == RC_DRIVER_SCANCODE) {
2945 enabled = dev->rc_map.rc_type;
2946 allowed = dev->allowed_protos;
2947 - } else {
2948 + } else if (dev->raw) {
2949 enabled = dev->raw->enabled_protocols;
2950 allowed = ir_raw_get_allowed_protocols();
2951 - }
2952 + } else
2953 + return -ENODEV;
2954
2955 IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
2956 (long long)allowed,
2957 diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
2958 index 7930ca5..235bf7d 100644
2959 --- a/drivers/media/video/cx25821/cx25821-core.c
2960 +++ b/drivers/media/video/cx25821/cx25821-core.c
2961 @@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
2962 list_add_tail(&dev->devlist, &cx25821_devlist);
2963 mutex_unlock(&cx25821_devlist_mutex);
2964
2965 - strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
2966 - strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
2967 -
2968 if (dev->pci->device != 0x8210) {
2969 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
2970 __func__, dev->pci->device);
2971 diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
2972 index b9aa801..029f293 100644
2973 --- a/drivers/media/video/cx25821/cx25821.h
2974 +++ b/drivers/media/video/cx25821/cx25821.h
2975 @@ -187,7 +187,7 @@ enum port {
2976 };
2977
2978 struct cx25821_board {
2979 - char *name;
2980 + const char *name;
2981 enum port porta;
2982 enum port portb;
2983 enum port portc;
2984 diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
2985 index 17bbacb..cc2ae7e 100644
2986 --- a/drivers/misc/sgi-xp/xpc_uv.c
2987 +++ b/drivers/misc/sgi-xp/xpc_uv.c
2988 @@ -18,6 +18,8 @@
2989 #include <linux/interrupt.h>
2990 #include <linux/delay.h>
2991 #include <linux/device.h>
2992 +#include <linux/cpu.h>
2993 +#include <linux/module.h>
2994 #include <linux/err.h>
2995 #include <linux/slab.h>
2996 #include <asm/uv/uv_hub.h>
2997 @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
2998 XPC_NOTIFY_MSG_SIZE_UV)
2999 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
3000
3001 +static int xpc_mq_node = -1;
3002 +
3003 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
3004 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
3005
3006 @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
3007 #if defined CONFIG_X86_64
3008 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
3009 UV_AFFINITY_CPU);
3010 - if (mq->irq < 0) {
3011 - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
3012 - -mq->irq);
3013 + if (mq->irq < 0)
3014 return mq->irq;
3015 - }
3016
3017 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
3018
3019 @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
3020 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
3021
3022 nid = cpu_to_node(cpu);
3023 - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
3024 - pg_order);
3025 + page = alloc_pages_exact_node(nid,
3026 + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
3027 + pg_order);
3028 if (page == NULL) {
3029 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
3030 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
3031 @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
3032 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
3033 };
3034
3035 +static int
3036 +xpc_init_mq_node(int nid)
3037 +{
3038 + int cpu;
3039 +
3040 + get_online_cpus();
3041 +
3042 + for_each_cpu(cpu, cpumask_of_node(nid)) {
3043 + xpc_activate_mq_uv =
3044 + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
3045 + XPC_ACTIVATE_IRQ_NAME,
3046 + xpc_handle_activate_IRQ_uv);
3047 + if (!IS_ERR(xpc_activate_mq_uv))
3048 + break;
3049 + }
3050 + if (IS_ERR(xpc_activate_mq_uv)) {
3051 + put_online_cpus();
3052 + return PTR_ERR(xpc_activate_mq_uv);
3053 + }
3054 +
3055 + for_each_cpu(cpu, cpumask_of_node(nid)) {
3056 + xpc_notify_mq_uv =
3057 + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
3058 + XPC_NOTIFY_IRQ_NAME,
3059 + xpc_handle_notify_IRQ_uv);
3060 + if (!IS_ERR(xpc_notify_mq_uv))
3061 + break;
3062 + }
3063 + if (IS_ERR(xpc_notify_mq_uv)) {
3064 + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
3065 + put_online_cpus();
3066 + return PTR_ERR(xpc_notify_mq_uv);
3067 + }
3068 +
3069 + put_online_cpus();
3070 + return 0;
3071 +}
3072 +
3073 int
3074 xpc_init_uv(void)
3075 {
3076 + int nid;
3077 + int ret = 0;
3078 +
3079 xpc_arch_ops = xpc_arch_ops_uv;
3080
3081 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
3082 @@ -1742,21 +1785,21 @@ xpc_init_uv(void)
3083 return -E2BIG;
3084 }
3085
3086 - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
3087 - XPC_ACTIVATE_IRQ_NAME,
3088 - xpc_handle_activate_IRQ_uv);
3089 - if (IS_ERR(xpc_activate_mq_uv))
3090 - return PTR_ERR(xpc_activate_mq_uv);
3091 + if (xpc_mq_node < 0)
3092 + for_each_online_node(nid) {
3093 + ret = xpc_init_mq_node(nid);
3094
3095 - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
3096 - XPC_NOTIFY_IRQ_NAME,
3097 - xpc_handle_notify_IRQ_uv);
3098 - if (IS_ERR(xpc_notify_mq_uv)) {
3099 - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
3100 - return PTR_ERR(xpc_notify_mq_uv);
3101 - }
3102 + if (!ret)
3103 + break;
3104 + }
3105 + else
3106 + ret = xpc_init_mq_node(xpc_mq_node);
3107
3108 - return 0;
3109 + if (ret < 0)
3110 + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
3111 + -ret);
3112 +
3113 + return ret;
3114 }
3115
3116 void
3117 @@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
3118 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
3119 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
3120 }
3121 +
3122 +module_param(xpc_mq_node, int, 0);
3123 +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
3124 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
3125 index dabec55..833ff16 100644
3126 --- a/drivers/mmc/card/block.c
3127 +++ b/drivers/mmc/card/block.c
3128 @@ -1430,7 +1430,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
3129 /* complete ongoing async transfer before issuing discard */
3130 if (card->host->areq)
3131 mmc_blk_issue_rw_rq(mq, NULL);
3132 - if (req->cmd_flags & REQ_SECURE)
3133 + if (req->cmd_flags & REQ_SECURE &&
3134 + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
3135 ret = mmc_blk_issue_secdiscard_rq(mq, req);
3136 else
3137 ret = mmc_blk_issue_discard_rq(mq, req);
3138 @@ -1730,6 +1731,7 @@ force_ro_fail:
3139 #define CID_MANFID_SANDISK 0x2
3140 #define CID_MANFID_TOSHIBA 0x11
3141 #define CID_MANFID_MICRON 0x13
3142 +#define CID_MANFID_SAMSUNG 0x15
3143
3144 static const struct mmc_fixup blk_fixups[] =
3145 {
3146 @@ -1766,6 +1768,28 @@ static const struct mmc_fixup blk_fixups[] =
3147 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
3148 MMC_QUIRK_LONG_READ_TIME),
3149
3150 + /*
3151 + * On these Samsung MoviNAND parts, performing secure erase or
3152 + * secure trim can result in unrecoverable corruption due to a
3153 + * firmware bug.
3154 + */
3155 + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3156 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3157 + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3158 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3159 + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3160 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3161 + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3162 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3163 + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3164 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3165 + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3166 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3167 + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3168 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3169 + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
3170 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
3171 +
3172 END_FIXUP
3173 };
3174
3175 diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
3176 index e3f5af9..b6def20 100644
3177 --- a/drivers/mmc/host/mxs-mmc.c
3178 +++ b/drivers/mmc/host/mxs-mmc.c
3179 @@ -280,11 +280,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
3180 writel(stat & MXS_MMC_IRQ_BITS,
3181 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
3182
3183 + spin_unlock(&host->lock);
3184 +
3185 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
3186 mmc_signal_sdio_irq(host->mmc);
3187
3188 - spin_unlock(&host->lock);
3189 -
3190 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
3191 cmd->error = -ETIMEDOUT;
3192 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
3193 diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
3194 index b97b2f5..d25f9ab 100644
3195 --- a/drivers/mmc/host/sdhci-esdhc.h
3196 +++ b/drivers/mmc/host/sdhci-esdhc.h
3197 @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
3198 int div = 1;
3199 u32 temp;
3200
3201 + if (clock == 0)
3202 + goto out;
3203 +
3204 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
3205 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
3206 | ESDHC_CLOCK_MASK);
3207 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
3208
3209 - if (clock == 0)
3210 - goto out;
3211 -
3212 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
3213 pre_div *= 2;
3214
3215 diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
3216 index 17cec0c..c015fc0 100644
3217 --- a/drivers/mtd/ubi/vtbl.c
3218 +++ b/drivers/mtd/ubi/vtbl.c
3219 @@ -346,7 +346,7 @@ retry:
3220 */
3221 err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
3222 vid_hdr, 0);
3223 - kfree(new_seb);
3224 + kmem_cache_free(si->scan_leb_slab, new_seb);
3225 ubi_free_vid_hdr(ubi, vid_hdr);
3226 return err;
3227
3228 @@ -359,7 +359,7 @@ write_error:
3229 list_add(&new_seb->u.list, &si->erase);
3230 goto retry;
3231 }
3232 - kfree(new_seb);
3233 + kmem_cache_free(si->scan_leb_slab, new_seb);
3234 out_free:
3235 ubi_free_vid_hdr(ubi, vid_hdr);
3236 return err;
3237 diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
3238 index 08c893c..e7823dd 100644
3239 --- a/drivers/net/can/janz-ican3.c
3240 +++ b/drivers/net/can/janz-ican3.c
3241 @@ -1250,7 +1250,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
3242 */
3243 static int ican3_reset_module(struct ican3_dev *mod)
3244 {
3245 - u8 val = 1 << mod->num;
3246 unsigned long start;
3247 u8 runold, runnew;
3248
3249 @@ -1264,8 +1263,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
3250 runold = ioread8(mod->dpm + TARGET_RUNNING);
3251
3252 /* reset the module */
3253 - iowrite8(val, &mod->ctrl->reset_assert);
3254 - iowrite8(val, &mod->ctrl->reset_deassert);
3255 + iowrite8(0x00, &mod->dpmctrl->hwreset);
3256
3257 /* wait until the module has finished resetting and is running */
3258 start = jiffies;
3259 diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
3260 index 346785c..9d60742 100644
3261 --- a/drivers/net/can/mcp251x.c
3262 +++ b/drivers/net/can/mcp251x.c
3263 @@ -83,6 +83,11 @@
3264 #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
3265 #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
3266 #define INSTRUCTION_RESET 0xC0
3267 +#define RTS_TXB0 0x01
3268 +#define RTS_TXB1 0x02
3269 +#define RTS_TXB2 0x04
3270 +#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
3271 +
3272
3273 /* MPC251x registers */
3274 #define CANSTAT 0x0e
3275 @@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
3276 static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
3277 int tx_buf_idx)
3278 {
3279 + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
3280 u32 sid, eid, exide, rtr;
3281 u8 buf[SPI_TRANSFER_BUF_LEN];
3282
3283 @@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
3284 buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
3285 memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
3286 mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
3287 - mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
3288 +
3289 + /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
3290 + priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
3291 + mcp251x_spi_trans(priv->spi, 1);
3292 }
3293
3294 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
3295 diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
3296 index 4accd7e..5ec2700 100644
3297 --- a/drivers/net/can/ti_hecc.c
3298 +++ b/drivers/net/can/ti_hecc.c
3299 @@ -984,12 +984,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
3300 struct net_device *ndev = platform_get_drvdata(pdev);
3301 struct ti_hecc_priv *priv = netdev_priv(ndev);
3302
3303 + unregister_candev(ndev);
3304 clk_disable(priv->clk);
3305 clk_put(priv->clk);
3306 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3307 iounmap(priv->base);
3308 release_mem_region(res->start, resource_size(res));
3309 - unregister_candev(ndev);
3310 free_candev(ndev);
3311 platform_set_drvdata(pdev, NULL);
3312
3313 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
3314 index 24381e1..0819a74 100644
3315 --- a/drivers/net/ethernet/freescale/gianfar.c
3316 +++ b/drivers/net/ethernet/freescale/gianfar.c
3317 @@ -1037,7 +1037,7 @@ static int gfar_probe(struct platform_device *ofdev)
3318
3319 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3320 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3321 - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3322 + dev->features |= NETIF_F_HW_VLAN_RX;
3323 }
3324
3325 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
3326 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
3327 index 9010cea..b68d28a 100644
3328 --- a/drivers/net/ethernet/ibm/ibmveth.c
3329 +++ b/drivers/net/ethernet/ibm/ibmveth.c
3330 @@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
3331 }
3332
3333 if (adapter->rx_queue.queue_addr != NULL) {
3334 - if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
3335 - dma_unmap_single(dev,
3336 - adapter->rx_queue.queue_dma,
3337 - adapter->rx_queue.queue_len,
3338 - DMA_BIDIRECTIONAL);
3339 - adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
3340 - }
3341 - kfree(adapter->rx_queue.queue_addr);
3342 + dma_free_coherent(dev, adapter->rx_queue.queue_len,
3343 + adapter->rx_queue.queue_addr,
3344 + adapter->rx_queue.queue_dma);
3345 adapter->rx_queue.queue_addr = NULL;
3346 }
3347
3348 @@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
3349 goto err_out;
3350 }
3351
3352 + dev = &adapter->vdev->dev;
3353 +
3354 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
3355 rxq_entries;
3356 - adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
3357 - GFP_KERNEL);
3358 + adapter->rx_queue.queue_addr =
3359 + dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
3360 + &adapter->rx_queue.queue_dma, GFP_KERNEL);
3361
3362 if (!adapter->rx_queue.queue_addr) {
3363 netdev_err(netdev, "unable to allocate rx queue pages\n");
3364 @@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
3365 goto err_out;
3366 }
3367
3368 - dev = &adapter->vdev->dev;
3369 -
3370 adapter->buffer_list_dma = dma_map_single(dev,
3371 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
3372 adapter->filter_list_dma = dma_map_single(dev,
3373 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
3374 - adapter->rx_queue.queue_dma = dma_map_single(dev,
3375 - adapter->rx_queue.queue_addr,
3376 - adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
3377
3378 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
3379 - (dma_mapping_error(dev, adapter->filter_list_dma)) ||
3380 - (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
3381 + (dma_mapping_error(dev, adapter->filter_list_dma))) {
3382 netdev_err(netdev, "unable to map filter or buffer list "
3383 "pages\n");
3384 rc = -ENOMEM;
3385 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
3386 index a73bbe7..5fb74c4 100644
3387 --- a/drivers/net/ethernet/realtek/r8169.c
3388 +++ b/drivers/net/ethernet/realtek/r8169.c
3389 @@ -3737,6 +3737,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
3390 case RTL_GIGA_MAC_VER_22:
3391 case RTL_GIGA_MAC_VER_23:
3392 case RTL_GIGA_MAC_VER_24:
3393 + case RTL_GIGA_MAC_VER_34:
3394 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
3395 break;
3396 default:
3397 diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
3398 index 4a00053..954b885 100644
3399 --- a/drivers/net/ethernet/sfc/efx.c
3400 +++ b/drivers/net/ethernet/sfc/efx.c
3401 @@ -1498,6 +1498,11 @@ static int efx_probe_all(struct efx_nic *efx)
3402 goto fail2;
3403 }
3404
3405 + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
3406 + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
3407 + rc = -EINVAL;
3408 + goto fail3;
3409 + }
3410 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
3411
3412 rc = efx_probe_filters(efx);
3413 @@ -2065,6 +2070,7 @@ static int efx_register_netdev(struct efx_nic *efx)
3414 net_dev->irq = efx->pci_dev->irq;
3415 net_dev->netdev_ops = &efx_netdev_ops;
3416 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
3417 + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
3418
3419 rtnl_lock();
3420
3421 diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
3422 index be8f915..70755c9 100644
3423 --- a/drivers/net/ethernet/sfc/efx.h
3424 +++ b/drivers/net/ethernet/sfc/efx.h
3425 @@ -30,6 +30,7 @@ extern netdev_tx_t
3426 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
3427 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
3428 extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
3429 +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
3430
3431 /* RX */
3432 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
3433 @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
3434 #define EFX_MAX_EVQ_SIZE 16384UL
3435 #define EFX_MIN_EVQ_SIZE 512UL
3436
3437 -/* The smallest [rt]xq_entries that the driver supports. Callers of
3438 - * efx_wake_queue() assume that they can subsequently send at least one
3439 - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
3440 -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
3441 +/* Maximum number of TCP segments we support for soft-TSO */
3442 +#define EFX_TSO_MAX_SEGS 100
3443 +
3444 +/* The smallest [rt]xq_entries that the driver supports. RX minimum
3445 + * is a bit arbitrary. For TX, we must have space for at least 2
3446 + * TSO skbs.
3447 + */
3448 +#define EFX_RXQ_MIN_ENT 128U
3449 +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
3450
3451 /* Filters */
3452 extern int efx_probe_filters(struct efx_nic *efx);
3453 diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
3454 index f22f45f..ff64def 100644
3455 --- a/drivers/net/ethernet/sfc/ethtool.c
3456 +++ b/drivers/net/ethernet/sfc/ethtool.c
3457 @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
3458 struct ethtool_ringparam *ring)
3459 {
3460 struct efx_nic *efx = netdev_priv(net_dev);
3461 + u32 txq_entries;
3462
3463 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
3464 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
3465 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
3466 return -EINVAL;
3467
3468 - if (ring->rx_pending < EFX_MIN_RING_SIZE ||
3469 - ring->tx_pending < EFX_MIN_RING_SIZE) {
3470 + if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
3471 netif_err(efx, drv, efx->net_dev,
3472 - "TX and RX queues cannot be smaller than %ld\n",
3473 - EFX_MIN_RING_SIZE);
3474 + "RX queues cannot be smaller than %u\n",
3475 + EFX_RXQ_MIN_ENT);
3476 return -EINVAL;
3477 }
3478
3479 - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
3480 + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
3481 + if (txq_entries != ring->tx_pending)
3482 + netif_warn(efx, drv, efx->net_dev,
3483 + "increasing TX queue size to minimum of %u\n",
3484 + txq_entries);
3485 +
3486 + return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
3487 }
3488
3489 static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
3490 @@ -857,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
3491 &ip_entry->ip4dst, &ip_entry->pdst);
3492 if (rc != 0) {
3493 rc = efx_filter_get_ipv4_full(
3494 - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
3495 - &ip_entry->ip4dst, &ip_entry->pdst);
3496 + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
3497 + &ip_entry->ip4src, &ip_entry->psrc);
3498 EFX_WARN_ON_PARANOID(rc);
3499 ip_mask->ip4src = ~0;
3500 ip_mask->psrc = ~0;
3501 diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
3502 index 94d0365..305430d 100644
3503 --- a/drivers/net/ethernet/sfc/tx.c
3504 +++ b/drivers/net/ethernet/sfc/tx.c
3505 @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
3506 return len;
3507 }
3508
3509 +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
3510 +{
3511 + /* Header and payload descriptor for each output segment, plus
3512 + * one for every input fragment boundary within a segment
3513 + */
3514 + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
3515 +
3516 + /* Possibly one more per segment for the alignment workaround */
3517 + if (EFX_WORKAROUND_5391(efx))
3518 + max_descs += EFX_TSO_MAX_SEGS;
3519 +
3520 + /* Possibly more for PCIe page boundaries within input fragments */
3521 + if (PAGE_SIZE > EFX_PAGE_SIZE)
3522 + max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
3523 + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
3524 +
3525 + return max_descs;
3526 +}
3527 +
3528 /*
3529 * Add a socket buffer to a TX queue
3530 *
3531 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
3532 index c1d602d..b99c418 100644
3533 --- a/drivers/net/macvtap.c
3534 +++ b/drivers/net/macvtap.c
3535 @@ -506,10 +506,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
3536 if (copy > size) {
3537 ++from;
3538 --count;
3539 - }
3540 + offset = 0;
3541 + } else
3542 + offset += size;
3543 copy -= size;
3544 offset1 += size;
3545 - offset = 0;
3546 }
3547
3548 if (len == offset1)
3549 @@ -519,25 +520,28 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
3550 struct page *page[MAX_SKB_FRAGS];
3551 int num_pages;
3552 unsigned long base;
3553 + unsigned long truesize;
3554
3555 - len = from->iov_len - offset1;
3556 + len = from->iov_len - offset;
3557 if (!len) {
3558 - offset1 = 0;
3559 + offset = 0;
3560 ++from;
3561 continue;
3562 }
3563 - base = (unsigned long)from->iov_base + offset1;
3564 + base = (unsigned long)from->iov_base + offset;
3565 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
3566 if (i + size > MAX_SKB_FRAGS)
3567 return -EMSGSIZE;
3568 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
3569 - if (num_pages != size)
3570 - /* put_page is in skb free */
3571 - return -EFAULT;
3572 + if (num_pages != size) {
3573 + for (i = 0; i < num_pages; i++)
3574 + put_page(page[i]);
3575 + }
3576 + truesize = size * PAGE_SIZE;
3577 skb->data_len += len;
3578 skb->len += len;
3579 - skb->truesize += len;
3580 - atomic_add(len, &skb->sk->sk_wmem_alloc);
3581 + skb->truesize += truesize;
3582 + atomic_add(truesize, &skb->sk->sk_wmem_alloc);
3583 while (len) {
3584 int off = base & ~PAGE_MASK;
3585 int size = min_t(int, len, PAGE_SIZE - off);
3586 @@ -548,7 +552,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
3587 len -= size;
3588 i++;
3589 }
3590 - offset1 = 0;
3591 + offset = 0;
3592 ++from;
3593 }
3594 return 0;
3595 @@ -712,10 +716,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
3596 if (!skb)
3597 goto err;
3598
3599 - if (zerocopy) {
3600 + if (zerocopy)
3601 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
3602 - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
3603 - } else
3604 + else
3605 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
3606 len);
3607 if (err)
3608 @@ -734,8 +737,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
3609 rcu_read_lock_bh();
3610 vlan = rcu_dereference_bh(q->vlan);
3611 /* copy skb_ubuf_info for callback when skb has no error */
3612 - if (zerocopy)
3613 + if (zerocopy) {
3614 skb_shinfo(skb)->destructor_arg = m->msg_control;
3615 + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
3616 + }
3617 if (vlan)
3618 macvlan_start_xmit(skb, vlan->dev);
3619 else
3620 diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
3621 index f9347ea..63ffbdf 100644
3622 --- a/drivers/net/netconsole.c
3623 +++ b/drivers/net/netconsole.c
3624 @@ -648,7 +648,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
3625 flags);
3626 dev_put(nt->np.dev);
3627 nt->np.dev = NULL;
3628 - netconsole_target_put(nt);
3629 }
3630 nt->enabled = 0;
3631 stopped = true;
3632 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
3633 index 885dbdd..f617566 100644
3634 --- a/drivers/net/ppp/pptp.c
3635 +++ b/drivers/net/ppp/pptp.c
3636 @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
3637 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
3638 goto tx_error;
3639
3640 - rt = ip_route_output_ports(&init_net, &fl4, NULL,
3641 + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
3642 opt->dst_addr.sin_addr.s_addr,
3643 opt->src_addr.sin_addr.s_addr,
3644 0, 0, IPPROTO_GRE,
3645 @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
3646 po->chan.private = sk;
3647 po->chan.ops = &pptp_chan_ops;
3648
3649 - rt = ip_route_output_ports(&init_net, &fl4, sk,
3650 + rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
3651 opt->dst_addr.sin_addr.s_addr,
3652 opt->src_addr.sin_addr.s_addr,
3653 0, 0,
3654 diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
3655 index 609fcc3..ef84a58 100644
3656 --- a/drivers/net/usb/asix.c
3657 +++ b/drivers/net/usb/asix.c
3658 @@ -1604,6 +1604,10 @@ static const struct usb_device_id products [] = {
3659 USB_DEVICE (0x2001, 0x3c05),
3660 .driver_info = (unsigned long) &ax88772_info,
3661 }, {
3662 + // DLink DUB-E100 H/W Ver C1
3663 + USB_DEVICE (0x2001, 0x1a02),
3664 + .driver_info = (unsigned long) &ax88772_info,
3665 +}, {
3666 // Linksys USB1000
3667 USB_DEVICE (0x1737, 0x0039),
3668 .driver_info = (unsigned long) &ax88178_info,
3669 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3670 index c2ae426..8669c77 100644
3671 --- a/drivers/net/usb/qmi_wwan.c
3672 +++ b/drivers/net/usb/qmi_wwan.c
3673 @@ -346,6 +346,15 @@ static const struct driver_info qmi_wwan_force_int1 = {
3674 .data = BIT(1), /* interface whitelist bitmap */
3675 };
3676
3677 +static const struct driver_info qmi_wwan_force_int2 = {
3678 + .description = "Qualcomm WWAN/QMI device",
3679 + .flags = FLAG_WWAN,
3680 + .bind = qmi_wwan_bind_shared,
3681 + .unbind = qmi_wwan_unbind_shared,
3682 + .manage_power = qmi_wwan_manage_power,
3683 + .data = BIT(2), /* interface whitelist bitmap */
3684 +};
3685 +
3686 static const struct driver_info qmi_wwan_force_int3 = {
3687 .description = "Qualcomm WWAN/QMI device",
3688 .flags = FLAG_WWAN,
3689 @@ -407,6 +416,14 @@ static const struct usb_device_id products[] = {
3690 .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
3691 .driver_info = (unsigned long)&qmi_wwan_info,
3692 },
3693 + { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
3694 + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
3695 + .idVendor = HUAWEI_VENDOR_ID,
3696 + .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
3697 + .bInterfaceSubClass = 1,
3698 + .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
3699 + .driver_info = (unsigned long)&qmi_wwan_info,
3700 + },
3701 { /* Huawei E392, E398 and possibly others in "Windows mode"
3702 * using a combined control and data interface without any CDC
3703 * functional descriptors
3704 @@ -427,6 +444,15 @@ static const struct usb_device_id products[] = {
3705 .bInterfaceProtocol = 0xff,
3706 .driver_info = (unsigned long)&qmi_wwan_shared,
3707 },
3708 + { /* Pantech UML290 - newer firmware */
3709 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3710 + .idVendor = 0x106c,
3711 + .idProduct = 0x3718,
3712 + .bInterfaceClass = 0xff,
3713 + .bInterfaceSubClass = 0xf1,
3714 + .bInterfaceProtocol = 0xff,
3715 + .driver_info = (unsigned long)&qmi_wwan_shared,
3716 + },
3717 { /* ZTE MF820D */
3718 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3719 .idVendor = 0x19d2,
3720 @@ -436,6 +462,15 @@ static const struct usb_device_id products[] = {
3721 .bInterfaceProtocol = 0xff,
3722 .driver_info = (unsigned long)&qmi_wwan_force_int4,
3723 },
3724 + { /* ZTE MF821D */
3725 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3726 + .idVendor = 0x19d2,
3727 + .idProduct = 0x0326,
3728 + .bInterfaceClass = 0xff,
3729 + .bInterfaceSubClass = 0xff,
3730 + .bInterfaceProtocol = 0xff,
3731 + .driver_info = (unsigned long)&qmi_wwan_force_int4,
3732 + },
3733 { /* ZTE (Vodafone) K3520-Z */
3734 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3735 .idVendor = 0x19d2,
3736 @@ -472,6 +507,15 @@ static const struct usb_device_id products[] = {
3737 .bInterfaceProtocol = 0xff,
3738 .driver_info = (unsigned long)&qmi_wwan_force_int4,
3739 },
3740 + { /* ZTE (Vodafone) K3765-Z */
3741 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3742 + .idVendor = 0x19d2,
3743 + .idProduct = 0x2002,
3744 + .bInterfaceClass = 0xff,
3745 + .bInterfaceSubClass = 0xff,
3746 + .bInterfaceProtocol = 0xff,
3747 + .driver_info = (unsigned long)&qmi_wwan_force_int4,
3748 + },
3749 { /* ZTE (Vodafone) K4505-Z */
3750 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3751 .idVendor = 0x19d2,
3752 @@ -481,6 +525,24 @@ static const struct usb_device_id products[] = {
3753 .bInterfaceProtocol = 0xff,
3754 .driver_info = (unsigned long)&qmi_wwan_force_int4,
3755 },
3756 + { /* ZTE (Vodafone) K5006-Z */
3757 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3758 + .idVendor = 0x19d2,
3759 + .idProduct = 0x1018,
3760 + .bInterfaceClass = 0xff,
3761 + .bInterfaceSubClass = 0xff,
3762 + .bInterfaceProtocol = 0xff,
3763 + .driver_info = (unsigned long)&qmi_wwan_force_int3,
3764 + },
3765 + { /* ZTE MF60 */
3766 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3767 + .idVendor = 0x19d2,
3768 + .idProduct = 0x1402,
3769 + .bInterfaceClass = 0xff,
3770 + .bInterfaceSubClass = 0xff,
3771 + .bInterfaceProtocol = 0xff,
3772 + .driver_info = (unsigned long)&qmi_wwan_force_int2,
3773 + },
3774 { /* Sierra Wireless MC77xx in QMI mode */
3775 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3776 .idVendor = 0x1199,
3777 @@ -490,6 +552,33 @@ static const struct usb_device_id products[] = {
3778 .bInterfaceProtocol = 0xff,
3779 .driver_info = (unsigned long)&qmi_wwan_sierra,
3780 },
3781 + { /* Sierra Wireless MC7700 */
3782 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3783 + .idVendor = 0x0f3d,
3784 + .idProduct = 0x68a2,
3785 + .bInterfaceClass = 0xff,
3786 + .bInterfaceSubClass = 0xff,
3787 + .bInterfaceProtocol = 0xff,
3788 + .driver_info = (unsigned long)&qmi_wwan_sierra,
3789 + },
3790 + { /* Sierra Wireless MC7750 */
3791 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3792 + .idVendor = 0x114f,
3793 + .idProduct = 0x68a2,
3794 + .bInterfaceClass = 0xff,
3795 + .bInterfaceSubClass = 0xff,
3796 + .bInterfaceProtocol = 0xff,
3797 + .driver_info = (unsigned long)&qmi_wwan_sierra,
3798 + },
3799 + { /* Sierra Wireless EM7700 */
3800 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
3801 + .idVendor = 0x1199,
3802 + .idProduct = 0x901c,
3803 + .bInterfaceClass = 0xff,
3804 + .bInterfaceSubClass = 0xff,
3805 + .bInterfaceProtocol = 0xff,
3806 + .driver_info = (unsigned long)&qmi_wwan_sierra,
3807 + },
3808
3809 /* Gobi 1000 devices */
3810 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
3811 @@ -517,6 +606,8 @@ static const struct usb_device_id products[] = {
3812 {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
3813 {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
3814 {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
3815 + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
3816 + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
3817 {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
3818 {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
3819 {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
3820 @@ -531,6 +622,10 @@ static const struct usb_device_id products[] = {
3821 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
3822 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
3823 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
3824 + {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
3825 + {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
3826 + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
3827 +
3828 { } /* END */
3829 };
3830 MODULE_DEVICE_TABLE(usb, products);
3831 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
3832 index 4187435..4db878d 100644
3833 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
3834 +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
3835 @@ -764,8 +764,11 @@ static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
3836 {
3837 char iovbuf[32];
3838 int retcode;
3839 + __le32 arp_mode_le;
3840
3841 - brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
3842 + arp_mode_le = cpu_to_le32(arp_mode);
3843 + brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
3844 + sizeof(iovbuf));
3845 retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
3846 iovbuf, sizeof(iovbuf));
3847 retcode = retcode >= 0 ? 0 : retcode;
3848 @@ -781,8 +784,11 @@ static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
3849 {
3850 char iovbuf[32];
3851 int retcode;
3852 + __le32 arp_enable_le;
3853
3854 - brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4,
3855 + arp_enable_le = cpu_to_le32(arp_enable);
3856 +
3857 + brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
3858 iovbuf, sizeof(iovbuf));
3859 retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
3860 iovbuf, sizeof(iovbuf));
3861 @@ -803,10 +809,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
3862 char buf[128], *ptr;
3863 u32 dongle_align = drvr->bus_if->align;
3864 u32 glom = 0;
3865 - u32 roaming = 1;
3866 - uint bcn_timeout = 3;
3867 - int scan_assoc_time = 40;
3868 - int scan_unassoc_time = 40;
3869 + __le32 roaming_le = cpu_to_le32(1);
3870 + __le32 bcn_timeout_le = cpu_to_le32(3);
3871 + __le32 scan_assoc_time_le = cpu_to_le32(40);
3872 + __le32 scan_unassoc_time_le = cpu_to_le32(40);
3873 int i;
3874
3875 mutex_lock(&drvr->proto_block);
3876 @@ -841,14 +847,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
3877
3878 /* Setup timeout if Beacons are lost and roam is off to report
3879 link down */
3880 - brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
3881 + brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
3882 sizeof(iovbuf));
3883 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
3884 sizeof(iovbuf));
3885
3886 /* Enable/Disable build-in roaming to allowed ext supplicant to take
3887 of romaing */
3888 - brcmf_c_mkiovar("roam_off", (char *)&roaming, 4,
3889 + brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
3890 iovbuf, sizeof(iovbuf));
3891 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
3892 sizeof(iovbuf));
3893 @@ -863,9 +869,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
3894 sizeof(iovbuf));
3895
3896 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
3897 - (char *)&scan_assoc_time, sizeof(scan_assoc_time));
3898 + (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
3899 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
3900 - (char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
3901 + (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
3902
3903 /* Set and enable ARP offload feature */
3904 brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
3905 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
3906 index d13ae9c..e360939 100644
3907 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
3908 +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
3909 @@ -500,8 +500,10 @@ static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
3910 params_le->active_time = cpu_to_le32(-1);
3911 params_le->passive_time = cpu_to_le32(-1);
3912 params_le->home_time = cpu_to_le32(-1);
3913 - if (ssid && ssid->SSID_len)
3914 - memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid));
3915 + if (ssid && ssid->SSID_len) {
3916 + params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
3917 + memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
3918 + }
3919 }
3920
3921 static s32
3922 diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
3923 index 3a6b402..0ea85f4 100644
3924 --- a/drivers/net/wireless/rt2x00/rt2400pci.c
3925 +++ b/drivers/net/wireless/rt2x00/rt2400pci.c
3926 @@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3927 static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
3928 {
3929 int retval;
3930 + u32 reg;
3931
3932 /*
3933 * Allocate eeprom data.
3934 @@ -1624,6 +1625,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
3935 return retval;
3936
3937 /*
3938 + * Enable rfkill polling by setting GPIO direction of the
3939 + * rfkill switch GPIO pin correctly.
3940 + */
3941 + rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
3942 + rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
3943 + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
3944 +
3945 + /*
3946 * Initialize hw specifications.
3947 */
3948 retval = rt2400pci_probe_hw_mode(rt2x00dev);
3949 diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
3950 index d3a4a68..7564ae9 100644
3951 --- a/drivers/net/wireless/rt2x00/rt2400pci.h
3952 +++ b/drivers/net/wireless/rt2x00/rt2400pci.h
3953 @@ -670,6 +670,7 @@
3954 #define GPIOCSR_BIT5 FIELD32(0x00000020)
3955 #define GPIOCSR_BIT6 FIELD32(0x00000040)
3956 #define GPIOCSR_BIT7 FIELD32(0x00000080)
3957 +#define GPIOCSR_BIT8 FIELD32(0x00000100)
3958
3959 /*
3960 * BBPPCSR: BBP Pin control register.
3961 diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
3962 index dcc0e1f..aa10c48 100644
3963 --- a/drivers/net/wireless/rt2x00/rt2500pci.c
3964 +++ b/drivers/net/wireless/rt2x00/rt2500pci.c
3965 @@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3966 static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
3967 {
3968 int retval;
3969 + u32 reg;
3970
3971 /*
3972 * Allocate eeprom data.
3973 @@ -1942,6 +1943,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
3974 return retval;
3975
3976 /*
3977 + * Enable rfkill polling by setting GPIO direction of the
3978 + * rfkill switch GPIO pin correctly.
3979 + */
3980 + rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
3981 + rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
3982 + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
3983 +
3984 + /*
3985 * Initialize hw specifications.
3986 */
3987 retval = rt2500pci_probe_hw_mode(rt2x00dev);
3988 diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
3989 index 1de9c75..e0a7efc 100644
3990 --- a/drivers/net/wireless/rt2x00/rt2500usb.c
3991 +++ b/drivers/net/wireless/rt2x00/rt2500usb.c
3992 @@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
3993 u16 reg;
3994
3995 rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
3996 - return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
3997 + return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
3998 }
3999
4000 #ifdef CONFIG_RT2X00_LIB_LEDS
4001 @@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4002 static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4003 {
4004 int retval;
4005 + u16 reg;
4006
4007 /*
4008 * Allocate eeprom data.
4009 @@ -1781,6 +1782,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4010 return retval;
4011
4012 /*
4013 + * Enable rfkill polling by setting GPIO direction of the
4014 + * rfkill switch GPIO pin correctly.
4015 + */
4016 + rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
4017 + rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
4018 + rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
4019 +
4020 + /*
4021 * Initialize hw specifications.
4022 */
4023 retval = rt2500usb_probe_hw_mode(rt2x00dev);
4024 diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
4025 index b493306..196bd51 100644
4026 --- a/drivers/net/wireless/rt2x00/rt2500usb.h
4027 +++ b/drivers/net/wireless/rt2x00/rt2500usb.h
4028 @@ -189,14 +189,15 @@
4029 * MAC_CSR19: GPIO control register.
4030 */
4031 #define MAC_CSR19 0x0426
4032 -#define MAC_CSR19_BIT0 FIELD32(0x0001)
4033 -#define MAC_CSR19_BIT1 FIELD32(0x0002)
4034 -#define MAC_CSR19_BIT2 FIELD32(0x0004)
4035 -#define MAC_CSR19_BIT3 FIELD32(0x0008)
4036 -#define MAC_CSR19_BIT4 FIELD32(0x0010)
4037 -#define MAC_CSR19_BIT5 FIELD32(0x0020)
4038 -#define MAC_CSR19_BIT6 FIELD32(0x0040)
4039 -#define MAC_CSR19_BIT7 FIELD32(0x0080)
4040 +#define MAC_CSR19_BIT0 FIELD16(0x0001)
4041 +#define MAC_CSR19_BIT1 FIELD16(0x0002)
4042 +#define MAC_CSR19_BIT2 FIELD16(0x0004)
4043 +#define MAC_CSR19_BIT3 FIELD16(0x0008)
4044 +#define MAC_CSR19_BIT4 FIELD16(0x0010)
4045 +#define MAC_CSR19_BIT5 FIELD16(0x0020)
4046 +#define MAC_CSR19_BIT6 FIELD16(0x0040)
4047 +#define MAC_CSR19_BIT7 FIELD16(0x0080)
4048 +#define MAC_CSR19_BIT8 FIELD16(0x0100)
4049
4050 /*
4051 * MAC_CSR20: LED control register.
4052 diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
4053 index 0397bbf..ff81e76 100644
4054 --- a/drivers/net/wireless/rt2x00/rt2800pci.c
4055 +++ b/drivers/net/wireless/rt2x00/rt2800pci.c
4056 @@ -973,6 +973,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4057 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
4058 {
4059 int retval;
4060 + u32 reg;
4061
4062 /*
4063 * Allocate eeprom data.
4064 @@ -986,6 +987,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
4065 return retval;
4066
4067 /*
4068 + * Enable rfkill polling by setting GPIO direction of the
4069 + * rfkill switch GPIO pin correctly.
4070 + */
4071 + rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
4072 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
4073 + rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
4074 +
4075 + /*
4076 * Initialize hw specifications.
4077 */
4078 retval = rt2800_probe_hw_mode(rt2x00dev);
4079 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
4080 index 7e1a492..65cb425 100644
4081 --- a/drivers/net/wireless/rt2x00/rt2800usb.c
4082 +++ b/drivers/net/wireless/rt2x00/rt2800usb.c
4083 @@ -667,8 +667,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
4084 skb_pull(entry->skb, RXINFO_DESC_SIZE);
4085
4086 /*
4087 - * FIXME: we need to check for rx_pkt_len validity
4088 + * Check for rx_pkt_len validity. Return if invalid, leaving
4089 + * rxdesc->size zeroed out by the upper level.
4090 */
4091 + if (unlikely(rx_pkt_len == 0 ||
4092 + rx_pkt_len > entry->queue->data_size)) {
4093 + ERROR(entry->queue->rt2x00dev,
4094 + "Bad frame size %d, forcing to 0\n", rx_pkt_len);
4095 + return;
4096 + }
4097 +
4098 rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
4099
4100 /*
4101 @@ -736,6 +744,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
4102 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4103 {
4104 int retval;
4105 + u32 reg;
4106
4107 /*
4108 * Allocate eeprom data.
4109 @@ -749,6 +758,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4110 return retval;
4111
4112 /*
4113 + * Enable rfkill polling by setting GPIO direction of the
4114 + * rfkill switch GPIO pin correctly.
4115 + */
4116 + rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
4117 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
4118 + rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
4119 +
4120 + /*
4121 * Initialize hw specifications.
4122 */
4123 retval = rt2800_probe_hw_mode(rt2x00dev);
4124 @@ -1157,6 +1174,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
4125 { USB_DEVICE(0x1690, 0x0744) },
4126 { USB_DEVICE(0x1690, 0x0761) },
4127 { USB_DEVICE(0x1690, 0x0764) },
4128 + /* ASUS */
4129 + { USB_DEVICE(0x0b05, 0x179d) },
4130 /* Cisco */
4131 { USB_DEVICE(0x167b, 0x4001) },
4132 /* EnGenius */
4133 @@ -1222,7 +1241,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
4134 { USB_DEVICE(0x0b05, 0x1760) },
4135 { USB_DEVICE(0x0b05, 0x1761) },
4136 { USB_DEVICE(0x0b05, 0x1790) },
4137 - { USB_DEVICE(0x0b05, 0x179d) },
4138 /* AzureWave */
4139 { USB_DEVICE(0x13d3, 0x3262) },
4140 { USB_DEVICE(0x13d3, 0x3284) },
4141 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
4142 index 90cc5e7..12b1ff5 100644
4143 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
4144 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
4145 @@ -628,7 +628,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
4146 */
4147 if (unlikely(rxdesc.size == 0 ||
4148 rxdesc.size > entry->queue->data_size)) {
4149 - WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
4150 + ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
4151 rxdesc.size, entry->queue->data_size);
4152 dev_kfree_skb(entry->skb);
4153 goto renew_skb;
4154 diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
4155 index 0f4bf8c..bdaba3f 100644
4156 --- a/drivers/net/wireless/rt2x00/rt61pci.c
4157 +++ b/drivers/net/wireless/rt2x00/rt61pci.c
4158 @@ -2832,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4159 static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
4160 {
4161 int retval;
4162 + u32 reg;
4163
4164 /*
4165 * Disable power saving.
4166 @@ -2850,6 +2851,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
4167 return retval;
4168
4169 /*
4170 + * Enable rfkill polling by setting GPIO direction of the
4171 + * rfkill switch GPIO pin correctly.
4172 + */
4173 + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
4174 + rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
4175 + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
4176 +
4177 + /*
4178 * Initialize hw specifications.
4179 */
4180 retval = rt61pci_probe_hw_mode(rt2x00dev);
4181 diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
4182 index e3cd6db..8f3da5a 100644
4183 --- a/drivers/net/wireless/rt2x00/rt61pci.h
4184 +++ b/drivers/net/wireless/rt2x00/rt61pci.h
4185 @@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
4186 #define MAC_CSR13_BIT10 FIELD32(0x00000400)
4187 #define MAC_CSR13_BIT11 FIELD32(0x00000800)
4188 #define MAC_CSR13_BIT12 FIELD32(0x00001000)
4189 +#define MAC_CSR13_BIT13 FIELD32(0x00002000)
4190
4191 /*
4192 * MAC_CSR14: LED control register.
4193 diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
4194 index e477a96..fda8671 100644
4195 --- a/drivers/net/wireless/rt2x00/rt73usb.c
4196 +++ b/drivers/net/wireless/rt2x00/rt73usb.c
4197 @@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4198 static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4199 {
4200 int retval;
4201 + u32 reg;
4202
4203 /*
4204 * Allocate eeprom data.
4205 @@ -2190,6 +2191,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
4206 return retval;
4207
4208 /*
4209 + * Enable rfkill polling by setting GPIO direction of the
4210 + * rfkill switch GPIO pin correctly.
4211 + */
4212 + rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
4213 + rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
4214 + rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
4215 +
4216 + /*
4217 * Initialize hw specifications.
4218 */
4219 retval = rt73usb_probe_hw_mode(rt2x00dev);
4220 diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
4221 index 9f6b470..df1cc11 100644
4222 --- a/drivers/net/wireless/rt2x00/rt73usb.h
4223 +++ b/drivers/net/wireless/rt2x00/rt73usb.h
4224 @@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
4225 #define MAC_CSR13_BIT10 FIELD32(0x00000400)
4226 #define MAC_CSR13_BIT11 FIELD32(0x00000800)
4227 #define MAC_CSR13_BIT12 FIELD32(0x00001000)
4228 +#define MAC_CSR13_BIT13 FIELD32(0x00002000)
4229 +#define MAC_CSR13_BIT14 FIELD32(0x00004000)
4230 +#define MAC_CSR13_BIT15 FIELD32(0x00008000)
4231
4232 /*
4233 * MAC_CSR14: LED control register.
4234 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
4235 index 04c3aef..2925094 100644
4236 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
4237 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
4238 @@ -117,6 +117,7 @@
4239
4240 #define CHIP_VER_B BIT(4)
4241 #define CHIP_92C_BITMASK BIT(0)
4242 +#define CHIP_UNKNOWN BIT(7)
4243 #define CHIP_92C_1T2R 0x03
4244 #define CHIP_92C 0x01
4245 #define CHIP_88C 0x00
4246 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4247 index 5c4d9bc..509f661 100644
4248 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4249 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
4250 @@ -995,8 +995,16 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
4251 version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C :
4252 VERSION_A_CHIP_88C;
4253 } else {
4254 - version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C :
4255 - VERSION_B_CHIP_88C;
4256 + version = (enum version_8192c) (CHIP_VER_B |
4257 + ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) |
4258 + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
4259 + if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 &
4260 + CHIP_VER_RTL_MASK)) {
4261 + version = (enum version_8192c)(version |
4262 + ((((value32 & CHIP_VER_RTL_MASK) == BIT(12))
4263 + ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
4264 + CHIP_VENDOR_UMC));
4265 + }
4266 }
4267
4268 switch (version) {
4269 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
4270 index 2c3b733..4e2a458 100644
4271 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
4272 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
4273 @@ -162,10 +162,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
4274
4275 /* request fw */
4276 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
4277 - !IS_92C_SERIAL(rtlhal->version))
4278 + !IS_92C_SERIAL(rtlhal->version)) {
4279 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
4280 - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
4281 + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
4282 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
4283 + pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
4284 + }
4285
4286 rtlpriv->max_fw_size = 0x4000;
4287 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
4288 diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
4289 index e38f91b..110c777 100644
4290 --- a/drivers/platform/x86/asus-laptop.c
4291 +++ b/drivers/platform/x86/asus-laptop.c
4292 @@ -863,9 +863,9 @@ static ssize_t show_infos(struct device *dev,
4293 * The significance of others is yet to be found.
4294 * If we don't find the method, we assume the device are present.
4295 */
4296 - rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
4297 + rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
4298 if (!ACPI_FAILURE(rv))
4299 - len += sprintf(page + len, "HRWS value : %#x\n",
4300 + len += sprintf(page + len, "HWRS value : %#x\n",
4301 (uint) temp);
4302 /*
4303 * Another value for userspace: the ASYM method returns 0x02 for
4304 @@ -1751,9 +1751,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
4305 * The significance of others is yet to be found.
4306 */
4307 status =
4308 - acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
4309 + acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
4310 if (!ACPI_FAILURE(status))
4311 - pr_notice(" HRWS returned %x", (int)hwrs_result);
4312 + pr_notice(" HWRS returned %x", (int)hwrs_result);
4313
4314 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
4315 asus->have_rsts = true;
4316 diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
4317 index 99a30b5..6de14fd 100644
4318 --- a/drivers/platform/x86/asus-nb-wmi.c
4319 +++ b/drivers/platform/x86/asus-nb-wmi.c
4320 @@ -94,6 +94,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
4321 { KE_KEY, 0x8A, { KEY_PROG1 } },
4322 { KE_KEY, 0x95, { KEY_MEDIA } },
4323 { KE_KEY, 0x99, { KEY_PHONE } },
4324 + { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
4325 + { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
4326 + { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
4327 + { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
4328 { KE_KEY, 0xb5, { KEY_CALC } },
4329 { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
4330 { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
4331 diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
4332 index 77074cc..fd5c7af 100644
4333 --- a/drivers/rtc/rtc-rs5c348.c
4334 +++ b/drivers/rtc/rtc-rs5c348.c
4335 @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
4336 tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
4337 tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
4338 if (!pdata->rtc_24h) {
4339 - tm->tm_hour %= 12;
4340 - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
4341 + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
4342 + tm->tm_hour -= 20;
4343 + tm->tm_hour %= 12;
4344 tm->tm_hour += 12;
4345 + } else
4346 + tm->tm_hour %= 12;
4347 }
4348 tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
4349 tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
4350 diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
4351 index 258abea..63ccc0f 100644
4352 --- a/drivers/rtc/rtc-twl.c
4353 +++ b/drivers/rtc/rtc-twl.c
4354 @@ -495,6 +495,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
4355 if (ret < 0)
4356 goto out1;
4357
4358 + /* ensure interrupts are disabled, bootloaders can be strange */
4359 + ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
4360 + if (ret < 0)
4361 + dev_warn(&pdev->dev, "unable to disable interrupt\n");
4362 +
4363 /* init cached IRQ enable bits */
4364 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
4365 if (ret < 0)
4366 diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
4367 index f9d6f41..72de3ba 100644
4368 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c
4369 +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
4370 @@ -1264,6 +1264,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
4371 int rc = 0;
4372 u64 mask64;
4373
4374 + memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
4375 + memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
4376 +
4377 bnx2i_adjust_qp_size(hba);
4378
4379 iscsi_init.flags =
4380 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
4381 index 500e20d..7c49e0a 100644
4382 --- a/drivers/scsi/hpsa.c
4383 +++ b/drivers/scsi/hpsa.c
4384 @@ -1264,8 +1264,9 @@ static void complete_scsi_command(struct CommandList *cp)
4385 }
4386 break;
4387 case CMD_PROTOCOL_ERR:
4388 + cmd->result = DID_ERROR << 16;
4389 dev_warn(&h->pdev->dev, "cp %p has "
4390 - "protocol error \n", cp);
4391 + "protocol error\n", cp);
4392 break;
4393 case CMD_HARDWARE_ERR:
4394 cmd->result = DID_ERROR << 16;
4395 diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
4396 index fe5d396..e2516ba 100644
4397 --- a/drivers/scsi/lpfc/Makefile
4398 +++ b/drivers/scsi/lpfc/Makefile
4399 @@ -22,7 +22,9 @@
4400 ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
4401 ccflags-$(GCOV) += -O0
4402
4403 +ifdef WARNINGS_BECOME_ERRORS
4404 ccflags-y += -Werror
4405 +endif
4406
4407 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
4408
4409 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
4410 index c4cef56..db79362 100644
4411 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
4412 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
4413 @@ -1202,6 +1202,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
4414 u16 message_control;
4415
4416
4417 + /* Check whether controller SAS2008 B0 controller,
4418 + if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
4419 + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
4420 + ioc->pdev->revision == 0x01) {
4421 + return -EINVAL;
4422 + }
4423 +
4424 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
4425 if (!base) {
4426 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
4427 diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
4428 index 1b38431..6661610 100644
4429 --- a/drivers/scsi/virtio_scsi.c
4430 +++ b/drivers/scsi/virtio_scsi.c
4431 @@ -198,7 +198,7 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
4432 int i;
4433
4434 for_each_sg(table->sgl, sg_elem, table->nents, i)
4435 - sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
4436 + sg[idx++] = *sg_elem;
4437
4438 *p_idx = idx;
4439 }
4440 diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
4441 index 6eecbde..66b6e3d 100644
4442 --- a/drivers/staging/android/android_alarm.h
4443 +++ b/drivers/staging/android/android_alarm.h
4444 @@ -110,10 +110,12 @@ enum android_alarm_return_flags {
4445 #define ANDROID_ALARM_WAIT _IO('a', 1)
4446
4447 #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
4448 +#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size)
4449 +
4450 /* Set alarm */
4451 #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
4452 #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
4453 -#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
4454 +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec)
4455 #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
4456 #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
4457 #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
4458 diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
4459 index c2dd0ed..d2dd75e 100644
4460 --- a/drivers/staging/comedi/drivers/das08.c
4461 +++ b/drivers/staging/comedi/drivers/das08.c
4462 @@ -653,7 +653,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev,
4463 int chan;
4464
4465 lsb = data[0] & 0xff;
4466 - msb = (data[0] >> 8) & 0xf;
4467 + msb = (data[0] >> 8) & 0xff;
4468
4469 chan = CR_CHAN(insn->chanspec);
4470
4471 diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
4472 index 945d962..4afc3b4 100644
4473 --- a/drivers/staging/media/lirc/lirc_sir.c
4474 +++ b/drivers/staging/media/lirc/lirc_sir.c
4475 @@ -52,6 +52,7 @@
4476 #include <linux/io.h>
4477 #include <asm/irq.h>
4478 #include <linux/fcntl.h>
4479 +#include <linux/platform_device.h>
4480 #ifdef LIRC_ON_SA1100
4481 #include <asm/hardware.h>
4482 #ifdef CONFIG_SA1100_COLLIE
4483 @@ -487,9 +488,11 @@ static struct lirc_driver driver = {
4484 .owner = THIS_MODULE,
4485 };
4486
4487 +static struct platform_device *lirc_sir_dev;
4488
4489 static int init_chrdev(void)
4490 {
4491 + driver.dev = &lirc_sir_dev->dev;
4492 driver.minor = lirc_register_driver(&driver);
4493 if (driver.minor < 0) {
4494 printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
4495 @@ -1215,20 +1218,71 @@ static int init_lirc_sir(void)
4496 return 0;
4497 }
4498
4499 +static int __devinit lirc_sir_probe(struct platform_device *dev)
4500 +{
4501 + return 0;
4502 +}
4503 +
4504 +static int __devexit lirc_sir_remove(struct platform_device *dev)
4505 +{
4506 + return 0;
4507 +}
4508 +
4509 +static struct platform_driver lirc_sir_driver = {
4510 + .probe = lirc_sir_probe,
4511 + .remove = __devexit_p(lirc_sir_remove),
4512 + .driver = {
4513 + .name = "lirc_sir",
4514 + .owner = THIS_MODULE,
4515 + },
4516 +};
4517
4518 static int __init lirc_sir_init(void)
4519 {
4520 int retval;
4521
4522 + retval = platform_driver_register(&lirc_sir_driver);
4523 + if (retval) {
4524 + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
4525 + "failed!\n");
4526 + return -ENODEV;
4527 + }
4528 +
4529 + lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
4530 + if (!lirc_sir_dev) {
4531 + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
4532 + "failed!\n");
4533 + retval = -ENOMEM;
4534 + goto pdev_alloc_fail;
4535 + }
4536 +
4537 + retval = platform_device_add(lirc_sir_dev);
4538 + if (retval) {
4539 + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
4540 + "failed!\n");
4541 + retval = -ENODEV;
4542 + goto pdev_add_fail;
4543 + }
4544 +
4545 retval = init_chrdev();
4546 if (retval < 0)
4547 - return retval;
4548 + goto fail;
4549 +
4550 retval = init_lirc_sir();
4551 if (retval) {
4552 drop_chrdev();
4553 - return retval;
4554 + goto fail;
4555 }
4556 +
4557 return 0;
4558 +
4559 +fail:
4560 + platform_device_del(lirc_sir_dev);
4561 +pdev_add_fail:
4562 + platform_device_put(lirc_sir_dev);
4563 +pdev_alloc_fail:
4564 + platform_driver_unregister(&lirc_sir_driver);
4565 + return retval;
4566 }
4567
4568 static void __exit lirc_sir_exit(void)
4569 @@ -1236,6 +1290,8 @@ static void __exit lirc_sir_exit(void)
4570 drop_hardware();
4571 drop_chrdev();
4572 drop_port();
4573 + platform_device_unregister(lirc_sir_dev);
4574 + platform_driver_unregister(&lirc_sir_driver);
4575 printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
4576 }
4577
4578 diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
4579 index 0e26d5f..495ee12 100644
4580 --- a/drivers/staging/rtl8712/recv_linux.c
4581 +++ b/drivers/staging/rtl8712/recv_linux.c
4582 @@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
4583 if (skb == NULL)
4584 goto _recv_indicatepkt_drop;
4585 skb->data = precv_frame->u.hdr.rx_data;
4586 -#ifdef NET_SKBUFF_DATA_USES_OFFSET
4587 - skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
4588 - precv_frame->u.hdr.rx_head);
4589 -#else
4590 - skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
4591 -#endif
4592 skb->len = precv_frame->u.hdr.len;
4593 + skb_set_tail_pointer(skb, skb->len);
4594 if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
4595 skb->ip_summed = CHECKSUM_UNNECESSARY;
4596 else
4597 diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
4598 index 92b34e2..40e2488 100644
4599 --- a/drivers/staging/speakup/main.c
4600 +++ b/drivers/staging/speakup/main.c
4601 @@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc)
4602
4603 static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
4604 {
4605 - static u_char *goto_buf = "\0\0\0\0\0\0";
4606 + static u_char goto_buf[8];
4607 static int num;
4608 int maxlen, go_pos;
4609 char *cp;
4610 diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
4611 index c0edf97..08021f4 100644
4612 --- a/drivers/staging/vt6656/dpc.c
4613 +++ b/drivers/staging/vt6656/dpc.c
4614 @@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
4615 } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
4616 cbHeaderSize += 6;
4617 pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
4618 - if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
4619 + if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
4620 (*pwType == cpu_to_le16(0xF380))) {
4621 cbHeaderSize -= 8;
4622 pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
4623 diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
4624 index 9b64b10..fe21868 100644
4625 --- a/drivers/staging/vt6656/rxtx.c
4626 +++ b/drivers/staging/vt6656/rxtx.c
4627 @@ -1701,7 +1701,7 @@ s_bPacketToWirelessUsb(
4628 // 802.1H
4629 if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
4630 if (pDevice->dwDiagRefCount == 0) {
4631 - if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
4632 + if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
4633 (psEthHeader->wType == cpu_to_le16(0xF380))) {
4634 memcpy((PBYTE) (pbyPayloadHead),
4635 abySNAP_Bridgetunnel, 6);
4636 @@ -2840,10 +2840,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
4637 Packet_Type = skb->data[ETH_HLEN+1];
4638 Descriptor_type = skb->data[ETH_HLEN+1+1+2];
4639 Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
4640 - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
4641 - /* 802.1x OR eapol-key challenge frame transfer */
4642 - if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
4643 - (Packet_Type == 3)) {
4644 + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
4645 + /* 802.1x OR eapol-key challenge frame transfer */
4646 + if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
4647 + (Packet_Type == 3)) {
4648 bTxeapol_key = TRUE;
4649 if(!(Key_info & BIT3) && //WPA or RSN group-key challenge
4650 (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
4651 @@ -2989,19 +2989,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
4652 }
4653 }
4654
4655 - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
4656 - if (pDevice->byBBType != BB_TYPE_11A) {
4657 - pDevice->wCurrentRate = RATE_1M;
4658 - pDevice->byACKRate = RATE_1M;
4659 - pDevice->byTopCCKBasicRate = RATE_1M;
4660 - pDevice->byTopOFDMBasicRate = RATE_6M;
4661 - } else {
4662 - pDevice->wCurrentRate = RATE_6M;
4663 - pDevice->byACKRate = RATE_6M;
4664 - pDevice->byTopCCKBasicRate = RATE_1M;
4665 - pDevice->byTopOFDMBasicRate = RATE_6M;
4666 - }
4667 - }
4668 + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
4669 + if (pDevice->byBBType != BB_TYPE_11A) {
4670 + pDevice->wCurrentRate = RATE_1M;
4671 + pDevice->byACKRate = RATE_1M;
4672 + pDevice->byTopCCKBasicRate = RATE_1M;
4673 + pDevice->byTopOFDMBasicRate = RATE_6M;
4674 + } else {
4675 + pDevice->wCurrentRate = RATE_6M;
4676 + pDevice->byACKRate = RATE_6M;
4677 + pDevice->byTopCCKBasicRate = RATE_1M;
4678 + pDevice->byTopOFDMBasicRate = RATE_6M;
4679 + }
4680 + }
4681
4682 DBG_PRT(MSG_LEVEL_DEBUG,
4683 KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
4684 @@ -3017,7 +3017,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
4685
4686 if (bNeedEncryption == TRUE) {
4687 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
4688 - if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
4689 + if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
4690 bNeedEncryption = FALSE;
4691 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
4692 if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
4693 diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
4694 index 2734dac..1812bed 100644
4695 --- a/drivers/staging/zcache/zcache-main.c
4696 +++ b/drivers/staging/zcache/zcache-main.c
4697 @@ -1259,13 +1259,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
4698 void *pampd, struct tmem_pool *pool,
4699 struct tmem_oid *oid, uint32_t index)
4700 {
4701 - int ret = 0;
4702 -
4703 BUG_ON(!is_ephemeral(pool));
4704 - zbud_decompress((struct page *)(data), pampd);
4705 + if (zbud_decompress((struct page *)(data), pampd) < 0)
4706 + return -EINVAL;
4707 zbud_free_and_delist((struct zbud_hdr *)pampd);
4708 atomic_dec(&zcache_curr_eph_pampd_count);
4709 - return ret;
4710 + return 0;
4711 }
4712
4713 /*
4714 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
4715 index 222f1c5..d594460 100644
4716 --- a/drivers/target/target_core_transport.c
4717 +++ b/drivers/target/target_core_transport.c
4718 @@ -3167,15 +3167,20 @@ static int transport_generic_cmd_sequencer(
4719 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
4720 goto out_invalid_cdb_field;
4721 }
4722 -
4723 + /*
4724 + * For the overflow case keep the existing fabric provided
4725 + * ->data_length. Otherwise for the underflow case, reset
4726 + * ->data_length to the smaller SCSI expected data transfer
4727 + * length.
4728 + */
4729 if (size > cmd->data_length) {
4730 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
4731 cmd->residual_count = (size - cmd->data_length);
4732 } else {
4733 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
4734 cmd->residual_count = (cmd->data_length - size);
4735 + cmd->data_length = size;
4736 }
4737 - cmd->data_length = size;
4738 }
4739
4740 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
4741 diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
4742 index e7fecee..0de7ed7 100644
4743 --- a/drivers/tty/serial/imx.c
4744 +++ b/drivers/tty/serial/imx.c
4745 @@ -756,6 +756,7 @@ static int imx_startup(struct uart_port *port)
4746 }
4747 }
4748
4749 + spin_lock_irqsave(&sport->port.lock, flags);
4750 /*
4751 * Finally, clear and enable interrupts
4752 */
4753 @@ -809,7 +810,6 @@ static int imx_startup(struct uart_port *port)
4754 /*
4755 * Enable modem status interrupts
4756 */
4757 - spin_lock_irqsave(&sport->port.lock,flags);
4758 imx_enable_ms(&sport->port);
4759 spin_unlock_irqrestore(&sport->port.lock,flags);
4760
4761 @@ -839,10 +839,13 @@ static void imx_shutdown(struct uart_port *port)
4762 {
4763 struct imx_port *sport = (struct imx_port *)port;
4764 unsigned long temp;
4765 + unsigned long flags;
4766
4767 + spin_lock_irqsave(&sport->port.lock, flags);
4768 temp = readl(sport->port.membase + UCR2);
4769 temp &= ~(UCR2_TXEN);
4770 writel(temp, sport->port.membase + UCR2);
4771 + spin_unlock_irqrestore(&sport->port.lock, flags);
4772
4773 if (USE_IRDA(sport)) {
4774 struct imxuart_platform_data *pdata;
4775 @@ -871,12 +874,14 @@ static void imx_shutdown(struct uart_port *port)
4776 * Disable all interrupts, port and break condition.
4777 */
4778
4779 + spin_lock_irqsave(&sport->port.lock, flags);
4780 temp = readl(sport->port.membase + UCR1);
4781 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
4782 if (USE_IRDA(sport))
4783 temp &= ~(UCR1_IREN);
4784
4785 writel(temp, sport->port.membase + UCR1);
4786 + spin_unlock_irqrestore(&sport->port.lock, flags);
4787 }
4788
4789 static void
4790 @@ -1219,6 +1224,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
4791 struct imx_port *sport = imx_ports[co->index];
4792 struct imx_port_ucrs old_ucr;
4793 unsigned int ucr1;
4794 + unsigned long flags;
4795 +
4796 + spin_lock_irqsave(&sport->port.lock, flags);
4797
4798 /*
4799 * First, save UCR1/2/3 and then disable interrupts
4800 @@ -1244,6 +1252,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
4801 while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
4802
4803 imx_port_ucrs_restore(&sport->port, &old_ucr);
4804 +
4805 + spin_unlock_irqrestore(&sport->port.lock, flags);
4806 }
4807
4808 /*
4809 diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
4810 index c2816f4..7d47514 100644
4811 --- a/drivers/tty/serial/pch_uart.c
4812 +++ b/drivers/tty/serial/pch_uart.c
4813 @@ -252,6 +252,9 @@ struct eg20t_port {
4814 dma_addr_t rx_buf_dma;
4815
4816 struct dentry *debugfs;
4817 +
4818 + /* protect the eg20t_port private structure and io access to membase */
4819 + spinlock_t lock;
4820 };
4821
4822 /**
4823 @@ -754,7 +757,8 @@ static void pch_dma_rx_complete(void *arg)
4824 tty_flip_buffer_push(tty);
4825 tty_kref_put(tty);
4826 async_tx_ack(priv->desc_rx);
4827 - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
4828 + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
4829 + PCH_UART_HAL_RX_ERR_INT);
4830 }
4831
4832 static void pch_dma_tx_complete(void *arg)
4833 @@ -809,7 +813,8 @@ static int handle_rx_to(struct eg20t_port *priv)
4834 int rx_size;
4835 int ret;
4836 if (!priv->start_rx) {
4837 - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
4838 + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
4839 + PCH_UART_HAL_RX_ERR_INT);
4840 return 0;
4841 }
4842 buf = &priv->rxbuf;
4843 @@ -1056,7 +1061,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
4844 unsigned int iid;
4845 unsigned long flags;
4846
4847 - spin_lock_irqsave(&priv->port.lock, flags);
4848 + spin_lock_irqsave(&priv->lock, flags);
4849 handled = 0;
4850 while ((iid = pch_uart_hal_get_iid(priv)) > 1) {
4851 switch (iid) {
4852 @@ -1071,11 +1076,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
4853 case PCH_UART_IID_RDR: /* Received Data Ready */
4854 if (priv->use_dma) {
4855 pch_uart_hal_disable_interrupt(priv,
4856 - PCH_UART_HAL_RX_INT);
4857 + PCH_UART_HAL_RX_INT |
4858 + PCH_UART_HAL_RX_ERR_INT);
4859 ret = dma_handle_rx(priv);
4860 if (!ret)
4861 pch_uart_hal_enable_interrupt(priv,
4862 - PCH_UART_HAL_RX_INT);
4863 + PCH_UART_HAL_RX_INT |
4864 + PCH_UART_HAL_RX_ERR_INT);
4865 } else {
4866 ret = handle_rx(priv);
4867 }
4868 @@ -1107,7 +1114,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
4869 priv->int_dis_flag = 0;
4870 }
4871
4872 - spin_unlock_irqrestore(&priv->port.lock, flags);
4873 + spin_unlock_irqrestore(&priv->lock, flags);
4874 return IRQ_RETVAL(handled);
4875 }
4876
4877 @@ -1199,7 +1206,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
4878 struct eg20t_port *priv;
4879 priv = container_of(port, struct eg20t_port, port);
4880 priv->start_rx = 0;
4881 - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
4882 + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
4883 + PCH_UART_HAL_RX_ERR_INT);
4884 priv->int_dis_flag = 1;
4885 }
4886
4887 @@ -1218,9 +1226,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
4888 unsigned long flags;
4889
4890 priv = container_of(port, struct eg20t_port, port);
4891 - spin_lock_irqsave(&port->lock, flags);
4892 + spin_lock_irqsave(&priv->lock, flags);
4893 pch_uart_hal_set_break(priv, ctl);
4894 - spin_unlock_irqrestore(&port->lock, flags);
4895 + spin_unlock_irqrestore(&priv->lock, flags);
4896 }
4897
4898 /* Grab any interrupt resources and initialise any low level driver state. */
4899 @@ -1255,6 +1263,7 @@ static int pch_uart_startup(struct uart_port *port)
4900 break;
4901 case 16:
4902 fifo_size = PCH_UART_HAL_FIFO16;
4903 + break;
4904 case 1:
4905 default:
4906 fifo_size = PCH_UART_HAL_FIFO_DIS;
4907 @@ -1292,7 +1301,8 @@ static int pch_uart_startup(struct uart_port *port)
4908 pch_request_dma(port);
4909
4910 priv->start_rx = 1;
4911 - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
4912 + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
4913 + PCH_UART_HAL_RX_ERR_INT);
4914 uart_update_timeout(port, CS8, default_baud);
4915
4916 return 0;
4917 @@ -1350,7 +1360,7 @@ static void pch_uart_set_termios(struct uart_port *port,
4918 stb = PCH_UART_HAL_STB1;
4919
4920 if (termios->c_cflag & PARENB) {
4921 - if (!(termios->c_cflag & PARODD))
4922 + if (termios->c_cflag & PARODD)
4923 parity = PCH_UART_HAL_PARITY_ODD;
4924 else
4925 parity = PCH_UART_HAL_PARITY_EVEN;
4926 @@ -1368,7 +1378,8 @@ static void pch_uart_set_termios(struct uart_port *port,
4927
4928 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
4929
4930 - spin_lock_irqsave(&port->lock, flags);
4931 + spin_lock_irqsave(&priv->lock, flags);
4932 + spin_lock(&port->lock);
4933
4934 uart_update_timeout(port, termios->c_cflag, baud);
4935 rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
4936 @@ -1381,7 +1392,8 @@ static void pch_uart_set_termios(struct uart_port *port,
4937 tty_termios_encode_baud_rate(termios, baud, baud);
4938
4939 out:
4940 - spin_unlock_irqrestore(&port->lock, flags);
4941 + spin_unlock(&port->lock);
4942 + spin_unlock_irqrestore(&priv->lock, flags);
4943 }
4944
4945 static const char *pch_uart_type(struct uart_port *port)
4946 @@ -1531,8 +1543,9 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
4947 {
4948 struct eg20t_port *priv;
4949 unsigned long flags;
4950 + int priv_locked = 1;
4951 + int port_locked = 1;
4952 u8 ier;
4953 - int locked = 1;
4954
4955 priv = pch_uart_ports[co->index];
4956
4957 @@ -1540,12 +1553,16 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
4958
4959 local_irq_save(flags);
4960 if (priv->port.sysrq) {
4961 - /* serial8250_handle_port() already took the lock */
4962 - locked = 0;
4963 + spin_lock(&priv->lock);
4964 + /* serial8250_handle_port() already took the port lock */
4965 + port_locked = 0;
4966 } else if (oops_in_progress) {
4967 - locked = spin_trylock(&priv->port.lock);
4968 - } else
4969 + priv_locked = spin_trylock(&priv->lock);
4970 + port_locked = spin_trylock(&priv->port.lock);
4971 + } else {
4972 + spin_lock(&priv->lock);
4973 spin_lock(&priv->port.lock);
4974 + }
4975
4976 /*
4977 * First save the IER then disable the interrupts
4978 @@ -1563,8 +1580,10 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
4979 wait_for_xmitr(priv, BOTH_EMPTY);
4980 iowrite8(ier, priv->membase + UART_IER);
4981
4982 - if (locked)
4983 + if (port_locked)
4984 spin_unlock(&priv->port.lock);
4985 + if (priv_locked)
4986 + spin_unlock(&priv->lock);
4987 local_irq_restore(flags);
4988 }
4989
4990 @@ -1662,6 +1681,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
4991 pci_enable_msi(pdev);
4992 pci_set_master(pdev);
4993
4994 + spin_lock_init(&priv->lock);
4995 +
4996 iobase = pci_resource_start(pdev, 0);
4997 mapbase = pci_resource_start(pdev, 1);
4998 priv->mapbase = mapbase;
4999 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
5000 index 01d247e..524fe24 100644
5001 --- a/drivers/usb/class/cdc-wdm.c
5002 +++ b/drivers/usb/class/cdc-wdm.c
5003 @@ -134,12 +134,14 @@ static struct usb_driver wdm_driver;
5004 /* return intfdata if we own the interface, else look up intf in the list */
5005 static struct wdm_device *wdm_find_device(struct usb_interface *intf)
5006 {
5007 - struct wdm_device *desc = NULL;
5008 + struct wdm_device *desc;
5009
5010 spin_lock(&wdm_device_list_lock);
5011 list_for_each_entry(desc, &wdm_device_list, device_list)
5012 if (desc->intf == intf)
5013 - break;
5014 + goto found;
5015 + desc = NULL;
5016 +found:
5017 spin_unlock(&wdm_device_list_lock);
5018
5019 return desc;
5020 @@ -147,12 +149,14 @@ static struct wdm_device *wdm_find_device(struct usb_interface *intf)
5021
5022 static struct wdm_device *wdm_find_device_by_minor(int minor)
5023 {
5024 - struct wdm_device *desc = NULL;
5025 + struct wdm_device *desc;
5026
5027 spin_lock(&wdm_device_list_lock);
5028 list_for_each_entry(desc, &wdm_device_list, device_list)
5029 if (desc->intf->minor == minor)
5030 - break;
5031 + goto found;
5032 + desc = NULL;
5033 +found:
5034 spin_unlock(&wdm_device_list_lock);
5035
5036 return desc;
5037 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
5038 index d956965..3440812 100644
5039 --- a/drivers/usb/core/devices.c
5040 +++ b/drivers/usb/core/devices.c
5041 @@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
5042 /* print devices for all busses */
5043 list_for_each_entry(bus, &usb_bus_list, bus_list) {
5044 /* recurse through all children of the root hub */
5045 - if (!bus->root_hub)
5046 + if (!bus_to_hcd(bus)->rh_registered)
5047 continue;
5048 usb_lock_device(bus->root_hub);
5049 ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
5050 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
5051 index 140d3e1..e2cc8df 100644
5052 --- a/drivers/usb/core/hcd.c
5053 +++ b/drivers/usb/core/hcd.c
5054 @@ -1002,10 +1002,7 @@ static int register_root_hub(struct usb_hcd *hcd)
5055 if (retval) {
5056 dev_err (parent_dev, "can't register root hub for %s, %d\n",
5057 dev_name(&usb_dev->dev), retval);
5058 - }
5059 - mutex_unlock(&usb_bus_list_lock);
5060 -
5061 - if (retval == 0) {
5062 + } else {
5063 spin_lock_irq (&hcd_root_hub_lock);
5064 hcd->rh_registered = 1;
5065 spin_unlock_irq (&hcd_root_hub_lock);
5066 @@ -1014,6 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
5067 if (HCD_DEAD(hcd))
5068 usb_hc_died (hcd); /* This time clean up */
5069 }
5070 + mutex_unlock(&usb_bus_list_lock);
5071
5072 return retval;
5073 }
5074 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5075 index 32d3adc..8b2a9d8 100644
5076 --- a/drivers/usb/core/quirks.c
5077 +++ b/drivers/usb/core/quirks.c
5078 @@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = {
5079 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
5080 USB_QUIRK_CONFIG_INTF_STRINGS },
5081
5082 + /* Microchip Joss Optical infrared touchboard device */
5083 + { USB_DEVICE(0x04d8, 0x000c), .driver_info =
5084 + USB_QUIRK_CONFIG_INTF_STRINGS },
5085 +
5086 /* Samsung Android phone modem - ID conflict with SPH-I500 */
5087 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
5088 USB_QUIRK_CONFIG_INTF_STRINGS },
5089 diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
5090 index 3584a16..e4d87d7 100644
5091 --- a/drivers/usb/dwc3/ep0.c
5092 +++ b/drivers/usb/dwc3/ep0.c
5093 @@ -569,7 +569,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
5094 transferred = min_t(u32, ur->length,
5095 transfer_size - length);
5096 memcpy(ur->buf, dwc->ep0_bounce, transferred);
5097 - dwc->ep0_bounced = false;
5098 } else {
5099 transferred = ur->length - length;
5100 }
5101 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5102 index 5255fe9..b8d4697 100644
5103 --- a/drivers/usb/dwc3/gadget.c
5104 +++ b/drivers/usb/dwc3/gadget.c
5105 @@ -238,8 +238,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
5106 if (req->request.status == -EINPROGRESS)
5107 req->request.status = status;
5108
5109 - usb_gadget_unmap_request(&dwc->gadget, &req->request,
5110 - req->direction);
5111 + if (dwc->ep0_bounced && dep->number == 0)
5112 + dwc->ep0_bounced = false;
5113 + else
5114 + usb_gadget_unmap_request(&dwc->gadget, &req->request,
5115 + req->direction);
5116
5117 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
5118 req, dep->name, req->request.actual,
5119 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
5120 index 36ca507..7261e8f 100644
5121 --- a/drivers/usb/host/ehci-q.c
5122 +++ b/drivers/usb/host/ehci-q.c
5123 @@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
5124 else {
5125 qtd = list_entry (qh->qtd_list.next,
5126 struct ehci_qtd, qtd_list);
5127 - /* first qtd may already be partially processed */
5128 - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
5129 + /*
5130 + * first qtd may already be partially processed.
5131 + * If we come here during unlink, the QH overlay region
5132 + * might have reference to the just unlinked qtd. The
5133 + * qtd is updated in qh_completions(). Update the QH
5134 + * overlay here.
5135 + */
5136 + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
5137 + qh->hw->hw_qtd_next = qtd->hw_next;
5138 qtd = NULL;
5139 + }
5140 }
5141
5142 if (qtd)
5143 diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
5144 index 55d3d64..7841b0a 100644
5145 --- a/drivers/usb/host/ohci-at91.c
5146 +++ b/drivers/usb/host/ohci-at91.c
5147 @@ -466,7 +466,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
5148 /* From the GPIO notifying the over-current situation, find
5149 * out the corresponding port */
5150 at91_for_each_port(port) {
5151 - if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
5152 + if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
5153 + gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
5154 gpio = pdata->overcurrent_pin[port];
5155 break;
5156 }
5157 @@ -569,6 +570,16 @@ static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev)
5158
5159 if (pdata) {
5160 at91_for_each_port(i) {
5161 + /*
5162 + * do not configure PIO if not in relation with
5163 + * real USB port on board
5164 + */
5165 + if (i >= pdata->ports) {
5166 + pdata->vbus_pin[i] = -EINVAL;
5167 + pdata->overcurrent_pin[i] = -EINVAL;
5168 + break;
5169 + }
5170 +
5171 if (!gpio_is_valid(pdata->vbus_pin[i]))
5172 continue;
5173 gpio = pdata->vbus_pin[i];
5174 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
5175 index c5e9e4a..966d148 100644
5176 --- a/drivers/usb/host/pci-quirks.c
5177 +++ b/drivers/usb/host/pci-quirks.c
5178 @@ -75,7 +75,9 @@
5179 #define NB_PIF0_PWRDOWN_1 0x01100013
5180
5181 #define USB_INTEL_XUSB2PR 0xD0
5182 +#define USB_INTEL_USB2PRM 0xD4
5183 #define USB_INTEL_USB3_PSSEN 0xD8
5184 +#define USB_INTEL_USB3PRM 0xDC
5185
5186 static struct amd_chipset_info {
5187 struct pci_dev *nb_dev;
5188 @@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
5189 return;
5190 }
5191
5192 - ports_available = 0xffffffff;
5193 + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
5194 + * Indicate the ports that can be changed from OS.
5195 + */
5196 + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
5197 + &ports_available);
5198 +
5199 + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
5200 + ports_available);
5201 +
5202 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
5203 - * Register, to turn on SuperSpeed terminations for all
5204 - * available ports.
5205 + * Register, to turn on SuperSpeed terminations for the
5206 + * switchable ports.
5207 */
5208 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
5209 cpu_to_le32(ports_available));
5210 @@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
5211 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
5212 "under xHCI: 0x%x\n", ports_available);
5213
5214 - ports_available = 0xffffffff;
5215 + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
5216 + * Indicate the USB 2.0 ports to be controlled by the xHCI host.
5217 + */
5218 +
5219 + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
5220 + &ports_available);
5221 +
5222 + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
5223 + ports_available);
5224 +
5225 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
5226 * switch the USB 2.0 power and data lines over to the xHCI
5227 * host.
5228 @@ -822,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
5229 void __iomem *op_reg_base;
5230 u32 val;
5231 int timeout;
5232 + int len = pci_resource_len(pdev, 0);
5233
5234 if (!mmio_resource_enabled(pdev, 0))
5235 return;
5236
5237 - base = ioremap_nocache(pci_resource_start(pdev, 0),
5238 - pci_resource_len(pdev, 0));
5239 + base = ioremap_nocache(pci_resource_start(pdev, 0), len);
5240 if (base == NULL)
5241 return;
5242
5243 @@ -837,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
5244 */
5245 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
5246 do {
5247 + if ((ext_cap_offset + sizeof(val)) > len) {
5248 + /* We're reading garbage from the controller */
5249 + dev_warn(&pdev->dev,
5250 + "xHCI controller failing to respond");
5251 + return;
5252 + }
5253 +
5254 if (!ext_cap_offset)
5255 /* We've reached the end of the extended capabilities */
5256 goto hc_init;
5257 +
5258 val = readl(base + ext_cap_offset);
5259 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
5260 break;
5261 @@ -870,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
5262 /* Disable any BIOS SMIs and clear all SMI events*/
5263 writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
5264
5265 +hc_init:
5266 if (usb_is_intel_switchable_xhci(pdev))
5267 usb_enable_xhci_ports(pdev);
5268 -hc_init:
5269 +
5270 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
5271
5272 /* Wait for the host controller to be ready before writing any
5273 diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
5274 index ef004a5..7f69a39 100644
5275 --- a/drivers/usb/host/pci-quirks.h
5276 +++ b/drivers/usb/host/pci-quirks.h
5277 @@ -15,6 +15,7 @@ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
5278 static inline void usb_amd_quirk_pll_disable(void) {}
5279 static inline void usb_amd_quirk_pll_enable(void) {}
5280 static inline void usb_amd_dev_put(void) {}
5281 +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
5282 #endif /* CONFIG_PCI */
5283
5284 #endif /* __LINUX_USB_PCI_QUIRKS_H */
5285 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
5286 index bbf3c0c..de07b75 100644
5287 --- a/drivers/usb/host/xhci-hub.c
5288 +++ b/drivers/usb/host/xhci-hub.c
5289 @@ -493,11 +493,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
5290 * when this bit is set.
5291 */
5292 pls |= USB_PORT_STAT_CONNECTION;
5293 + } else {
5294 + /*
5295 + * If CAS bit isn't set but the Port is already at
5296 + * Compliance Mode, fake a connection so the USB core
5297 + * notices the Compliance state and resets the port.
5298 + * This resolves an issue generated by the SN65LVPE502CP
5299 + * in which sometimes the port enters compliance mode
5300 + * caused by a delay on the host-device negotiation.
5301 + */
5302 + if (pls == USB_SS_PORT_LS_COMP_MOD)
5303 + pls |= USB_PORT_STAT_CONNECTION;
5304 }
5305 +
5306 /* update status field */
5307 *status |= pls;
5308 }
5309
5310 +/*
5311 + * Function for Compliance Mode Quirk.
5312 + *
5313 + * This Function verifies if all xhc USB3 ports have entered U0, if so,
5314 + * the compliance mode timer is deleted. A port won't enter
5315 + * compliance mode if it has previously entered U0.
5316 + */
5317 +void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
5318 +{
5319 + u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
5320 + bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
5321 +
5322 + if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
5323 + return;
5324 +
5325 + if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
5326 + xhci->port_status_u0 |= 1 << wIndex;
5327 + if (xhci->port_status_u0 == all_ports_seen_u0) {
5328 + del_timer_sync(&xhci->comp_mode_recovery_timer);
5329 + xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
5330 + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
5331 + }
5332 + }
5333 +}
5334 +
5335 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
5336 u16 wIndex, char *buf, u16 wLength)
5337 {
5338 @@ -644,6 +681,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
5339 /* Update Port Link State for super speed ports*/
5340 if (hcd->speed == HCD_USB3) {
5341 xhci_hub_report_link_state(&status, temp);
5342 + /*
5343 + * Verify if all USB3 Ports Have entered U0 already.
5344 + * Delete Compliance Mode Timer if so.
5345 + */
5346 + xhci_del_comp_mod_timer(xhci, temp, wIndex);
5347 }
5348 if (bus_state->port_c_suspend & (1 << wIndex))
5349 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
5350 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
5351 index 689bc18..df90fe5 100644
5352 --- a/drivers/usb/host/xhci-plat.c
5353 +++ b/drivers/usb/host/xhci-plat.c
5354 @@ -118,7 +118,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
5355 goto put_hcd;
5356 }
5357
5358 - hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
5359 + hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
5360 if (!hcd->regs) {
5361 dev_dbg(&pdev->dev, "error mapping memory\n");
5362 ret = -EFAULT;
5363 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5364 index 7beed53..6467d79 100644
5365 --- a/drivers/usb/host/xhci.c
5366 +++ b/drivers/usb/host/xhci.c
5367 @@ -26,6 +26,7 @@
5368 #include <linux/module.h>
5369 #include <linux/moduleparam.h>
5370 #include <linux/slab.h>
5371 +#include <linux/dmi.h>
5372
5373 #include "xhci.h"
5374
5375 @@ -398,6 +399,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
5376
5377 #endif
5378
5379 +static void compliance_mode_recovery(unsigned long arg)
5380 +{
5381 + struct xhci_hcd *xhci;
5382 + struct usb_hcd *hcd;
5383 + u32 temp;
5384 + int i;
5385 +
5386 + xhci = (struct xhci_hcd *)arg;
5387 +
5388 + for (i = 0; i < xhci->num_usb3_ports; i++) {
5389 + temp = xhci_readl(xhci, xhci->usb3_ports[i]);
5390 + if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
5391 + /*
5392 + * Compliance Mode Detected. Letting USB Core
5393 + * handle the Warm Reset
5394 + */
5395 + xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
5396 + i + 1);
5397 + xhci_dbg(xhci, "Attempting Recovery routine!\n");
5398 + hcd = xhci->shared_hcd;
5399 +
5400 + if (hcd->state == HC_STATE_SUSPENDED)
5401 + usb_hcd_resume_root_hub(hcd);
5402 +
5403 + usb_hcd_poll_rh_status(hcd);
5404 + }
5405 + }
5406 +
5407 + if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
5408 + mod_timer(&xhci->comp_mode_recovery_timer,
5409 + jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
5410 +}
5411 +
5412 +/*
5413 + * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
5414 + * that causes ports behind that hardware to enter compliance mode sometimes.
5415 + * The quirk creates a timer that polls every 2 seconds the link state of
5416 + * each host controller's port and recovers it by issuing a Warm reset
5417 + * if Compliance mode is detected, otherwise the port will become "dead" (no
5418 + * device connections or disconnections will be detected anymore). Becasue no
5419 + * status event is generated when entering compliance mode (per xhci spec),
5420 + * this quirk is needed on systems that have the failing hardware installed.
5421 + */
5422 +static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
5423 +{
5424 + xhci->port_status_u0 = 0;
5425 + init_timer(&xhci->comp_mode_recovery_timer);
5426 +
5427 + xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
5428 + xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
5429 + xhci->comp_mode_recovery_timer.expires = jiffies +
5430 + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
5431 +
5432 + set_timer_slack(&xhci->comp_mode_recovery_timer,
5433 + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
5434 + add_timer(&xhci->comp_mode_recovery_timer);
5435 + xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
5436 +}
5437 +
5438 +/*
5439 + * This function identifies the systems that have installed the SN65LVPE502CP
5440 + * USB3.0 re-driver and that need the Compliance Mode Quirk.
5441 + * Systems:
5442 + * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
5443 + */
5444 +static bool compliance_mode_recovery_timer_quirk_check(void)
5445 +{
5446 + const char *dmi_product_name, *dmi_sys_vendor;
5447 +
5448 + dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
5449 + dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
5450 +
5451 + if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
5452 + return false;
5453 +
5454 + if (strstr(dmi_product_name, "Z420") ||
5455 + strstr(dmi_product_name, "Z620") ||
5456 + strstr(dmi_product_name, "Z820"))
5457 + return true;
5458 +
5459 + return false;
5460 +}
5461 +
5462 +static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
5463 +{
5464 + return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
5465 +}
5466 +
5467 +
5468 /*
5469 * Initialize memory for HCD and xHC (one-time init).
5470 *
5471 @@ -421,6 +511,12 @@ int xhci_init(struct usb_hcd *hcd)
5472 retval = xhci_mem_init(xhci, GFP_KERNEL);
5473 xhci_dbg(xhci, "Finished xhci_init\n");
5474
5475 + /* Initializing Compliance Mode Recovery Data If Needed */
5476 + if (compliance_mode_recovery_timer_quirk_check()) {
5477 + xhci->quirks |= XHCI_COMP_MODE_QUIRK;
5478 + compliance_mode_recovery_timer_init(xhci);
5479 + }
5480 +
5481 return retval;
5482 }
5483
5484 @@ -629,6 +725,11 @@ void xhci_stop(struct usb_hcd *hcd)
5485 del_timer_sync(&xhci->event_ring_timer);
5486 #endif
5487
5488 + /* Deleting Compliance Mode Recovery Timer */
5489 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
5490 + (!(xhci_all_ports_seen_u0(xhci))))
5491 + del_timer_sync(&xhci->comp_mode_recovery_timer);
5492 +
5493 if (xhci->quirks & XHCI_AMD_PLL_FIX)
5494 usb_amd_dev_put();
5495
5496 @@ -659,7 +760,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
5497 {
5498 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5499
5500 - if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
5501 + if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
5502 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
5503
5504 spin_lock_irq(&xhci->lock);
5505 @@ -806,6 +907,16 @@ int xhci_suspend(struct xhci_hcd *xhci)
5506 }
5507 spin_unlock_irq(&xhci->lock);
5508
5509 + /*
5510 + * Deleting Compliance Mode Recovery Timer because the xHCI Host
5511 + * is about to be suspended.
5512 + */
5513 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
5514 + (!(xhci_all_ports_seen_u0(xhci)))) {
5515 + del_timer_sync(&xhci->comp_mode_recovery_timer);
5516 + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
5517 + }
5518 +
5519 /* step 5: remove core well power */
5520 /* synchronize irq when using MSI-X */
5521 xhci_msix_sync_irqs(xhci);
5522 @@ -938,6 +1049,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
5523 usb_hcd_resume_root_hub(hcd);
5524 usb_hcd_resume_root_hub(xhci->shared_hcd);
5525 }
5526 +
5527 + /*
5528 + * If system is subject to the Quirk, Compliance Mode Timer needs to
5529 + * be re-initialized Always after a system resume. Ports are subject
5530 + * to suffer the Compliance Mode issue again. It doesn't matter if
5531 + * ports have entered previously to U0 before system's suspension.
5532 + */
5533 + if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
5534 + compliance_mode_recovery_timer_init(xhci);
5535 +
5536 return retval;
5537 }
5538 #endif /* CONFIG_PM */
5539 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
5540 index 19ae30f..6e77f3b 100644
5541 --- a/drivers/usb/host/xhci.h
5542 +++ b/drivers/usb/host/xhci.h
5543 @@ -1487,6 +1487,7 @@ struct xhci_hcd {
5544 #define XHCI_AMD_0x96_HOST (1 << 9)
5545 #define XHCI_TRUST_TX_LENGTH (1 << 10)
5546 #define XHCI_SPURIOUS_REBOOT (1 << 13)
5547 +#define XHCI_COMP_MODE_QUIRK (1 << 14)
5548 unsigned int num_active_eps;
5549 unsigned int limit_active_eps;
5550 /* There are two roothubs to keep track of bus suspend info for */
5551 @@ -1503,6 +1504,11 @@ struct xhci_hcd {
5552 unsigned sw_lpm_support:1;
5553 /* support xHCI 1.0 spec USB2 hardware LPM */
5554 unsigned hw_lpm_support:1;
5555 + /* Compliance Mode Recovery Data */
5556 + struct timer_list comp_mode_recovery_timer;
5557 + u32 port_status_u0;
5558 +/* Compliance Mode Timer Triggered every 2 seconds */
5559 +#define COMP_MODE_RCVRY_MSECS 2000
5560 };
5561
5562 /* convert between an HCD pointer and the corresponding EHCI_HCD */
5563 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
5564 index f5819cb..4d2b7d3 100644
5565 --- a/drivers/usb/serial/ftdi_sio.c
5566 +++ b/drivers/usb/serial/ftdi_sio.c
5567 @@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = {
5568 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
5569 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
5570 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
5571 + { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
5572 { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
5573 { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
5574 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
5575 @@ -804,13 +805,32 @@ static struct usb_device_id id_table_combined [] = {
5576 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
5577 { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
5578 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
5579 - { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
5580 + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
5581 + USB_CLASS_VENDOR_SPEC,
5582 + USB_SUBCLASS_VENDOR_SPEC, 0x00) },
5583 { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
5584 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
5585 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
5586 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
5587 { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
5588 + { USB_DEVICE(FTDI_VID, PI_C865_PID) },
5589 + { USB_DEVICE(FTDI_VID, PI_C857_PID) },
5590 + { USB_DEVICE(PI_VID, PI_C866_PID) },
5591 + { USB_DEVICE(PI_VID, PI_C663_PID) },
5592 + { USB_DEVICE(PI_VID, PI_C725_PID) },
5593 + { USB_DEVICE(PI_VID, PI_E517_PID) },
5594 + { USB_DEVICE(PI_VID, PI_C863_PID) },
5595 { USB_DEVICE(PI_VID, PI_E861_PID) },
5596 + { USB_DEVICE(PI_VID, PI_C867_PID) },
5597 + { USB_DEVICE(PI_VID, PI_E609_PID) },
5598 + { USB_DEVICE(PI_VID, PI_E709_PID) },
5599 + { USB_DEVICE(PI_VID, PI_100F_PID) },
5600 + { USB_DEVICE(PI_VID, PI_1011_PID) },
5601 + { USB_DEVICE(PI_VID, PI_1012_PID) },
5602 + { USB_DEVICE(PI_VID, PI_1013_PID) },
5603 + { USB_DEVICE(PI_VID, PI_1014_PID) },
5604 + { USB_DEVICE(PI_VID, PI_1015_PID) },
5605 + { USB_DEVICE(PI_VID, PI_1016_PID) },
5606 { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
5607 { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
5608 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
5609 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
5610 index 5dd96ca..41fe582 100644
5611 --- a/drivers/usb/serial/ftdi_sio_ids.h
5612 +++ b/drivers/usb/serial/ftdi_sio_ids.h
5613 @@ -75,6 +75,9 @@
5614 #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
5615 #define FTDI_OPENDCC_GBM_PID 0xBFDC
5616
5617 +/* NZR SEM 16+ USB (http://www.nzr.de) */
5618 +#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
5619 +
5620 /*
5621 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
5622 */
5623 @@ -539,7 +542,10 @@
5624 /*
5625 * Microchip Technology, Inc.
5626 *
5627 - * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
5628 + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
5629 + * used by single function CDC ACM class based firmware demo
5630 + * applications. The VID/PID has also been used in firmware
5631 + * emulating FTDI serial chips by:
5632 * Hornby Elite - Digital Command Control Console
5633 * http://www.hornby.com/hornby-dcc/controllers/
5634 */
5635 @@ -791,8 +797,27 @@
5636 * Physik Instrumente
5637 * http://www.physikinstrumente.com/en/products/
5638 */
5639 +/* These two devices use the VID of FTDI */
5640 +#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */
5641 +#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */
5642 +
5643 #define PI_VID 0x1a72 /* Vendor ID */
5644 -#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
5645 +#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */
5646 +#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */
5647 +#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */
5648 +#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */
5649 +#define PI_C863_PID 0x1007 /* PI C-863 */
5650 +#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */
5651 +#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */
5652 +#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */
5653 +#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */
5654 +#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */
5655 +#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */
5656 +#define PI_1012_PID 0x1012 /* PI Motion Controller */
5657 +#define PI_1013_PID 0x1013 /* PI Motion Controller */
5658 +#define PI_1014_PID 0x1014 /* PI Device */
5659 +#define PI_1015_PID 0x1015 /* PI Device */
5660 +#define PI_1016_PID 0x1016 /* PI Digital Servo Module */
5661
5662 /*
5663 * Kondo Kagaku Co.Ltd.
5664 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5665 index ee693cc..a49099d 100644
5666 --- a/drivers/usb/serial/option.c
5667 +++ b/drivers/usb/serial/option.c
5668 @@ -886,8 +886,6 @@ static const struct usb_device_id option_ids[] = {
5669 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
5670 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
5671 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
5672 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
5673 - .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
5674 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
5675 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
5676 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
5677 @@ -1092,6 +1090,10 @@ static const struct usb_device_id option_ids[] = {
5678 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
5679 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
5680 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
5681 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
5682 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
5683 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
5684 +
5685 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
5686 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
5687 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
5688 diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
5689 index 2e471c2..88e9204 100644
5690 --- a/drivers/video/console/fbcon.c
5691 +++ b/drivers/video/console/fbcon.c
5692 @@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work)
5693 struct vc_data *vc = NULL;
5694 int c;
5695 int mode;
5696 + int ret;
5697 +
5698 + /* FIXME: we should sort out the unbind locking instead */
5699 + /* instead we just fail to flash the cursor if we can't get
5700 + * the lock instead of blocking fbcon deinit */
5701 + ret = console_trylock();
5702 + if (ret == 0)
5703 + return;
5704
5705 - console_lock();
5706 if (ops && ops->currcon != -1)
5707 vc = vc_cons[ops->currcon].d;
5708
5709 diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
5710 index 9f13b89..6019929 100644
5711 --- a/drivers/watchdog/hpwdt.c
5712 +++ b/drivers/watchdog/hpwdt.c
5713 @@ -806,6 +806,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
5714 hpwdt_timer_reg = pci_mem_addr + 0x70;
5715 hpwdt_timer_con = pci_mem_addr + 0x72;
5716
5717 + /* Make sure that timer is disabled until /dev/watchdog is opened */
5718 + hpwdt_stop();
5719 +
5720 /* Make sure that we have a valid soft_margin */
5721 if (hpwdt_change_timer(soft_margin))
5722 hpwdt_change_timer(DEFAULT_MARGIN);
5723 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
5724 index 1ffd03b..7f12416 100644
5725 --- a/drivers/xen/gntdev.c
5726 +++ b/drivers/xen/gntdev.c
5727 @@ -314,8 +314,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
5728 }
5729 }
5730
5731 - err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
5732 - pages, true);
5733 + err = gnttab_unmap_refs(map->unmap_ops + offset,
5734 + use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
5735 + pages);
5736 if (err)
5737 return err;
5738
5739 diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
5740 index f100ce2..fda491c 100644
5741 --- a/drivers/xen/grant-table.c
5742 +++ b/drivers/xen/grant-table.c
5743 @@ -774,7 +774,8 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
5744 EXPORT_SYMBOL_GPL(gnttab_map_refs);
5745
5746 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
5747 - struct page **pages, unsigned int count, bool clear_pte)
5748 + struct gnttab_map_grant_ref *kmap_ops,
5749 + struct page **pages, unsigned int count)
5750 {
5751 int i, ret;
5752
5753 @@ -786,7 +787,8 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
5754 return ret;
5755
5756 for (i = 0; i < count; i++) {
5757 - ret = m2p_remove_override(pages[i], clear_pte);
5758 + ret = m2p_remove_override(pages[i], kmap_ops ?
5759 + &kmap_ops[i] : NULL);
5760 if (ret)
5761 return ret;
5762 }
5763 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
5764 index fbb9da9..33ef60d 100644
5765 --- a/fs/cifs/cifs_unicode.c
5766 +++ b/fs/cifs/cifs_unicode.c
5767 @@ -328,6 +328,6 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
5768 }
5769
5770 ctoUTF16_out:
5771 - return i;
5772 + return j;
5773 }
5774
5775 diff --git a/fs/dcache.c b/fs/dcache.c
5776 index b80531c..10fab26 100644
5777 --- a/fs/dcache.c
5778 +++ b/fs/dcache.c
5779 @@ -373,7 +373,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
5780 * Inform try_to_ascend() that we are no longer attached to the
5781 * dentry tree
5782 */
5783 - dentry->d_flags |= DCACHE_DISCONNECTED;
5784 + dentry->d_flags |= DCACHE_DENTRY_KILLED;
5785 if (parent)
5786 spin_unlock(&parent->d_lock);
5787 dentry_iput(dentry);
5788 @@ -1030,7 +1030,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
5789 * or deletion
5790 */
5791 if (new != old->d_parent ||
5792 - (old->d_flags & DCACHE_DISCONNECTED) ||
5793 + (old->d_flags & DCACHE_DENTRY_KILLED) ||
5794 (!locked && read_seqretry(&rename_lock, seq))) {
5795 spin_unlock(&new->d_lock);
5796 new = NULL;
5797 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
5798 index ab35b11..6f5fb1a 100644
5799 --- a/fs/ecryptfs/inode.c
5800 +++ b/fs/ecryptfs/inode.c
5801 @@ -621,6 +621,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5802 struct dentry *lower_old_dir_dentry;
5803 struct dentry *lower_new_dir_dentry;
5804 struct dentry *trap = NULL;
5805 + struct inode *target_inode;
5806
5807 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
5808 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
5809 @@ -628,6 +629,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5810 dget(lower_new_dentry);
5811 lower_old_dir_dentry = dget_parent(lower_old_dentry);
5812 lower_new_dir_dentry = dget_parent(lower_new_dentry);
5813 + target_inode = new_dentry->d_inode;
5814 trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
5815 /* source should not be ancestor of target */
5816 if (trap == lower_old_dentry) {
5817 @@ -643,6 +645,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5818 lower_new_dir_dentry->d_inode, lower_new_dentry);
5819 if (rc)
5820 goto out_lock;
5821 + if (target_inode)
5822 + fsstack_copy_attr_all(target_inode,
5823 + ecryptfs_inode_to_lower(target_inode));
5824 fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
5825 if (new_dir != old_dir)
5826 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
5827 diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
5828 index 3250f28..58ddc38 100644
5829 --- a/fs/lockd/svc.c
5830 +++ b/fs/lockd/svc.c
5831 @@ -251,10 +251,9 @@ out_err:
5832 return err;
5833 }
5834
5835 -static int lockd_up_net(struct net *net)
5836 +static int lockd_up_net(struct svc_serv *serv, struct net *net)
5837 {
5838 struct lockd_net *ln = net_generic(net, lockd_net_id);
5839 - struct svc_serv *serv = nlmsvc_rqst->rq_server;
5840 int error;
5841
5842 if (ln->nlmsvc_users++)
5843 @@ -276,10 +275,9 @@ err_rpcb:
5844 return error;
5845 }
5846
5847 -static void lockd_down_net(struct net *net)
5848 +static void lockd_down_net(struct svc_serv *serv, struct net *net)
5849 {
5850 struct lockd_net *ln = net_generic(net, lockd_net_id);
5851 - struct svc_serv *serv = nlmsvc_rqst->rq_server;
5852
5853 if (ln->nlmsvc_users) {
5854 if (--ln->nlmsvc_users == 0) {
5855 @@ -307,7 +305,7 @@ int lockd_up(struct net *net)
5856 * Check whether we're already up and running.
5857 */
5858 if (nlmsvc_rqst) {
5859 - error = lockd_up_net(net);
5860 + error = lockd_up_net(nlmsvc_rqst->rq_server, net);
5861 goto out;
5862 }
5863
5864 @@ -378,7 +376,7 @@ out:
5865 return error;
5866
5867 err_start:
5868 - lockd_down_net(net);
5869 + lockd_down_net(serv, net);
5870 goto destroy_and_out;
5871 }
5872 EXPORT_SYMBOL_GPL(lockd_up);
5873 @@ -390,7 +388,7 @@ void
5874 lockd_down(struct net *net)
5875 {
5876 mutex_lock(&nlmsvc_mutex);
5877 - lockd_down_net(net);
5878 + lockd_down_net(nlmsvc_rqst->rq_server, net);
5879 if (nlmsvc_users) {
5880 if (--nlmsvc_users)
5881 goto out;
5882 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
5883 index e8bbfa5..edf4119 100644
5884 --- a/fs/nfs/inode.c
5885 +++ b/fs/nfs/inode.c
5886 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
5887 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
5888 nfsi->attrtimeo_timestamp = jiffies;
5889
5890 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
5891 + memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
5892 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
5893 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
5894 else
5895 diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
5896 index a1e416b..a7a043d 100644
5897 --- a/fs/nfs/nfs3proc.c
5898 +++ b/fs/nfs/nfs3proc.c
5899 @@ -644,7 +644,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
5900 u64 cookie, struct page **pages, unsigned int count, int plus)
5901 {
5902 struct inode *dir = dentry->d_inode;
5903 - __be32 *verf = NFS_COOKIEVERF(dir);
5904 + __be32 *verf = NFS_I(dir)->cookieverf;
5905 struct nfs3_readdirargs arg = {
5906 .fh = NFS_FH(dir),
5907 .cookie = cookie,
5908 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5909 index dc57324..b106b97 100644
5910 --- a/fs/nfs/nfs4proc.c
5911 +++ b/fs/nfs/nfs4proc.c
5912 @@ -3150,11 +3150,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
5913 dentry->d_parent->d_name.name,
5914 dentry->d_name.name,
5915 (unsigned long long)cookie);
5916 - nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
5917 + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
5918 res.pgbase = args.pgbase;
5919 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5920 if (status >= 0) {
5921 - memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
5922 + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
5923 status += args.pgbase;
5924 }
5925
5926 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
5927 index c54aae3..c8ac9a1 100644
5928 --- a/fs/nfs/nfs4xdr.c
5929 +++ b/fs/nfs/nfs4xdr.c
5930 @@ -6081,7 +6081,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5931 status = decode_open(xdr, res);
5932 if (status)
5933 goto out;
5934 - if (decode_getfh(xdr, &res->fh) != 0)
5935 + status = decode_getfh(xdr, &res->fh);
5936 + if (status)
5937 goto out;
5938 if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
5939 goto out;
5940 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
5941 index 7b55f51..5976e24 100644
5942 --- a/fs/nfs/super.c
5943 +++ b/fs/nfs/super.c
5944 @@ -1886,6 +1886,7 @@ static int nfs_validate_mount_data(void *options,
5945
5946 memcpy(sap, &data->addr, sizeof(data->addr));
5947 args->nfs_server.addrlen = sizeof(data->addr);
5948 + args->nfs_server.port = ntohs(data->addr.sin_port);
5949 if (!nfs_verify_server_address(sap))
5950 goto out_no_address;
5951
5952 @@ -2598,6 +2599,7 @@ static int nfs4_validate_mount_data(void *options,
5953 return -EFAULT;
5954 if (!nfs_verify_server_address(sap))
5955 goto out_no_address;
5956 + args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
5957
5958 if (data->auth_flavourlen) {
5959 if (data->auth_flavourlen > 1)
5960 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
5961 index 3ab12eb..d014727 100644
5962 --- a/fs/nfsd/nfsctl.c
5963 +++ b/fs/nfsd/nfsctl.c
5964 @@ -663,9 +663,7 @@ static ssize_t __write_ports_addfd(char *buf)
5965
5966 err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
5967 if (err < 0) {
5968 - if (nfsd_serv->sv_nrthreads == 1)
5969 - svc_shutdown_net(nfsd_serv, net);
5970 - svc_destroy(nfsd_serv);
5971 + nfsd_destroy(net);
5972 return err;
5973 }
5974
5975 @@ -734,9 +732,7 @@ out_close:
5976 svc_xprt_put(xprt);
5977 }
5978 out_err:
5979 - if (nfsd_serv->sv_nrthreads == 1)
5980 - svc_shutdown_net(nfsd_serv, net);
5981 - svc_destroy(nfsd_serv);
5982 + nfsd_destroy(net);
5983 return err;
5984 }
5985
5986 diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
5987 index 1671429..1336a65 100644
5988 --- a/fs/nfsd/nfsd.h
5989 +++ b/fs/nfsd/nfsd.h
5990 @@ -73,6 +73,17 @@ int nfsd_nrpools(void);
5991 int nfsd_get_nrthreads(int n, int *);
5992 int nfsd_set_nrthreads(int n, int *);
5993
5994 +static inline void nfsd_destroy(struct net *net)
5995 +{
5996 + int destroy = (nfsd_serv->sv_nrthreads == 1);
5997 +
5998 + if (destroy)
5999 + svc_shutdown_net(nfsd_serv, net);
6000 + svc_destroy(nfsd_serv);
6001 + if (destroy)
6002 + nfsd_serv = NULL;
6003 +}
6004 +
6005 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
6006 #ifdef CONFIG_NFSD_V2_ACL
6007 extern struct svc_version nfsd_acl_version2;
6008 diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
6009 index bcda12a..b6f8e65 100644
6010 --- a/fs/nfsd/nfssvc.c
6011 +++ b/fs/nfsd/nfssvc.c
6012 @@ -254,8 +254,6 @@ static void nfsd_shutdown(void)
6013
6014 static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
6015 {
6016 - /* When last nfsd thread exits we need to do some clean-up */
6017 - nfsd_serv = NULL;
6018 nfsd_shutdown();
6019
6020 svc_rpcb_cleanup(serv, net);
6021 @@ -332,6 +330,7 @@ static int nfsd_get_default_max_blksize(void)
6022 int nfsd_create_serv(void)
6023 {
6024 int error;
6025 + struct net *net = current->nsproxy->net_ns;
6026
6027 WARN_ON(!mutex_is_locked(&nfsd_mutex));
6028 if (nfsd_serv) {
6029 @@ -346,7 +345,7 @@ int nfsd_create_serv(void)
6030 if (nfsd_serv == NULL)
6031 return -ENOMEM;
6032
6033 - error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
6034 + error = svc_bind(nfsd_serv, net);
6035 if (error < 0) {
6036 svc_destroy(nfsd_serv);
6037 return error;
6038 @@ -427,11 +426,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
6039 if (err)
6040 break;
6041 }
6042 -
6043 - if (nfsd_serv->sv_nrthreads == 1)
6044 - svc_shutdown_net(nfsd_serv, net);
6045 - svc_destroy(nfsd_serv);
6046 -
6047 + nfsd_destroy(net);
6048 return err;
6049 }
6050
6051 @@ -478,9 +473,7 @@ out_shutdown:
6052 if (error < 0 && !nfsd_up_before)
6053 nfsd_shutdown();
6054 out_destroy:
6055 - if (nfsd_serv->sv_nrthreads == 1)
6056 - svc_shutdown_net(nfsd_serv, net);
6057 - svc_destroy(nfsd_serv); /* Release server */
6058 + nfsd_destroy(net); /* Release server */
6059 out:
6060 mutex_unlock(&nfsd_mutex);
6061 return error;
6062 @@ -563,12 +556,13 @@ nfsd(void *vrqstp)
6063 nfsdstats.th_cnt --;
6064
6065 out:
6066 - if (rqstp->rq_server->sv_nrthreads == 1)
6067 - svc_shutdown_net(rqstp->rq_server, &init_net);
6068 + rqstp->rq_server = NULL;
6069
6070 /* Release the thread */
6071 svc_exit_thread(rqstp);
6072
6073 + nfsd_destroy(&init_net);
6074 +
6075 /* Release module */
6076 mutex_unlock(&nfsd_mutex);
6077 module_put_and_exit(0);
6078 @@ -682,9 +676,7 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
6079
6080 mutex_lock(&nfsd_mutex);
6081 /* this function really, really should have been called svc_put() */
6082 - if (nfsd_serv->sv_nrthreads == 1)
6083 - svc_shutdown_net(nfsd_serv, net);
6084 - svc_destroy(nfsd_serv);
6085 + nfsd_destroy(net);
6086 mutex_unlock(&nfsd_mutex);
6087 return ret;
6088 }
6089 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
6090 index 21d836f..ab53521 100644
6091 --- a/fs/proc/proc_sysctl.c
6092 +++ b/fs/proc/proc_sysctl.c
6093 @@ -462,9 +462,6 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
6094
6095 err = ERR_PTR(-ENOMEM);
6096 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
6097 - if (h)
6098 - sysctl_head_finish(h);
6099 -
6100 if (!inode)
6101 goto out;
6102
6103 @@ -473,6 +470,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
6104 d_add(dentry, inode);
6105
6106 out:
6107 + if (h)
6108 + sysctl_head_finish(h);
6109 sysctl_head_finish(head);
6110 return err;
6111 }
6112 diff --git a/fs/stat.c b/fs/stat.c
6113 index c733dc5..dc6d0be 100644
6114 --- a/fs/stat.c
6115 +++ b/fs/stat.c
6116 @@ -57,7 +57,7 @@ EXPORT_SYMBOL(vfs_getattr);
6117
6118 int vfs_fstat(unsigned int fd, struct kstat *stat)
6119 {
6120 - struct file *f = fget(fd);
6121 + struct file *f = fget_raw(fd);
6122 int error = -EBADF;
6123
6124 if (f) {
6125 diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
6126 index 580a6d3..c04e0db 100644
6127 --- a/include/asm-generic/mutex-xchg.h
6128 +++ b/include/asm-generic/mutex-xchg.h
6129 @@ -26,7 +26,13 @@ static inline void
6130 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
6131 {
6132 if (unlikely(atomic_xchg(count, 0) != 1))
6133 - fail_fn(count);
6134 + /*
6135 + * We failed to acquire the lock, so mark it contended
6136 + * to ensure that any waiting tasks are woken up by the
6137 + * unlock slow path.
6138 + */
6139 + if (likely(atomic_xchg(count, -1) != 1))
6140 + fail_fn(count);
6141 }
6142
6143 /**
6144 @@ -43,7 +49,8 @@ static inline int
6145 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
6146 {
6147 if (unlikely(atomic_xchg(count, 0) != 1))
6148 - return fail_fn(count);
6149 + if (likely(atomic_xchg(count, -1) != 1))
6150 + return fail_fn(count);
6151 return 0;
6152 }
6153
6154 diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
6155 index 4a0aae3..9242310 100644
6156 --- a/include/drm/drm_mode.h
6157 +++ b/include/drm/drm_mode.h
6158 @@ -343,8 +343,9 @@ struct drm_mode_mode_cmd {
6159 struct drm_mode_modeinfo mode;
6160 };
6161
6162 -#define DRM_MODE_CURSOR_BO (1<<0)
6163 -#define DRM_MODE_CURSOR_MOVE (1<<1)
6164 +#define DRM_MODE_CURSOR_BO 0x01
6165 +#define DRM_MODE_CURSOR_MOVE 0x02
6166 +#define DRM_MODE_CURSOR_FLAGS 0x03
6167
6168 /*
6169 * depending on the value in flags different members are used.
6170 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
6171 index 7e11f14..1332df0 100644
6172 --- a/include/linux/dcache.h
6173 +++ b/include/linux/dcache.h
6174 @@ -191,6 +191,8 @@ struct dentry_operations {
6175 #define DCACHE_MANAGED_DENTRY \
6176 (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
6177
6178 +#define DCACHE_DENTRY_KILLED 0x100000
6179 +
6180 extern seqlock_t rename_lock;
6181
6182 static inline int dname_external(struct dentry *dentry)
6183 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
6184 index e4baff5..e7bafa4 100644
6185 --- a/include/linux/init_task.h
6186 +++ b/include/linux/init_task.h
6187 @@ -123,8 +123,17 @@ extern struct group_info init_groups;
6188
6189 extern struct cred init_cred;
6190
6191 +extern struct task_group root_task_group;
6192 +
6193 +#ifdef CONFIG_CGROUP_SCHED
6194 +# define INIT_CGROUP_SCHED(tsk) \
6195 + .sched_task_group = &root_task_group,
6196 +#else
6197 +# define INIT_CGROUP_SCHED(tsk)
6198 +#endif
6199 +
6200 #ifdef CONFIG_PERF_EVENTS
6201 -# define INIT_PERF_EVENTS(tsk) \
6202 +# define INIT_PERF_EVENTS(tsk) \
6203 .perf_event_mutex = \
6204 __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
6205 .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
6206 @@ -161,6 +170,7 @@ extern struct cred init_cred;
6207 }, \
6208 .tasks = LIST_HEAD_INIT(tsk.tasks), \
6209 INIT_PUSHABLE_TASKS(tsk) \
6210 + INIT_CGROUP_SCHED(tsk) \
6211 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
6212 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
6213 .real_parent = &tsk, \
6214 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
6215 index fc615a9..1e57449 100644
6216 --- a/include/linux/kobject.h
6217 +++ b/include/linux/kobject.h
6218 @@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
6219
6220 static inline __printf(2, 3)
6221 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
6222 -{ return 0; }
6223 +{ return -ENOMEM; }
6224
6225 static inline int kobject_action_type(const char *buf, size_t count,
6226 enum kobject_action *type)
6227 diff --git a/include/linux/kthread.h b/include/linux/kthread.h
6228 index 0714b24..22ccf9d 100644
6229 --- a/include/linux/kthread.h
6230 +++ b/include/linux/kthread.h
6231 @@ -49,8 +49,6 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
6232 * can be queued and flushed using queue/flush_kthread_work()
6233 * respectively. Queued kthread_works are processed by a kthread
6234 * running kthread_worker_fn().
6235 - *
6236 - * A kthread_work can't be freed while it is executing.
6237 */
6238 struct kthread_work;
6239 typedef void (*kthread_work_func_t)(struct kthread_work *work);
6240 @@ -59,15 +57,14 @@ struct kthread_worker {
6241 spinlock_t lock;
6242 struct list_head work_list;
6243 struct task_struct *task;
6244 + struct kthread_work *current_work;
6245 };
6246
6247 struct kthread_work {
6248 struct list_head node;
6249 kthread_work_func_t func;
6250 wait_queue_head_t done;
6251 - atomic_t flushing;
6252 - int queue_seq;
6253 - int done_seq;
6254 + struct kthread_worker *worker;
6255 };
6256
6257 #define KTHREAD_WORKER_INIT(worker) { \
6258 @@ -79,7 +76,6 @@ struct kthread_work {
6259 .node = LIST_HEAD_INIT((work).node), \
6260 .func = (fn), \
6261 .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \
6262 - .flushing = ATOMIC_INIT(0), \
6263 }
6264
6265 #define DEFINE_KTHREAD_WORKER(worker) \
6266 diff --git a/include/linux/ktime.h b/include/linux/ktime.h
6267 index 603bec2..06177ba10 100644
6268 --- a/include/linux/ktime.h
6269 +++ b/include/linux/ktime.h
6270 @@ -58,13 +58,6 @@ union ktime {
6271
6272 typedef union ktime ktime_t; /* Kill this */
6273
6274 -#define KTIME_MAX ((s64)~((u64)1 << 63))
6275 -#if (BITS_PER_LONG == 64)
6276 -# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
6277 -#else
6278 -# define KTIME_SEC_MAX LONG_MAX
6279 -#endif
6280 -
6281 /*
6282 * ktime_t definitions when using the 64-bit scalar representation:
6283 */
6284 diff --git a/include/linux/memory.h b/include/linux/memory.h
6285 index 1ac7f6e..ff9a9f8 100644
6286 --- a/include/linux/memory.h
6287 +++ b/include/linux/memory.h
6288 @@ -19,7 +19,7 @@
6289 #include <linux/compiler.h>
6290 #include <linux/mutex.h>
6291
6292 -#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
6293 +#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
6294
6295 struct memory_block {
6296 unsigned long start_section_nr;
6297 diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
6298 index 629b823..b5292f3 100644
6299 --- a/include/linux/mmc/card.h
6300 +++ b/include/linux/mmc/card.h
6301 @@ -234,6 +234,7 @@ struct mmc_card {
6302 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
6303 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
6304 #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
6305 +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
6306 /* byte mode */
6307 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
6308 #define MMC_NO_POWER_NOTIFICATION 0
6309 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
6310 index 33900a5..e517695 100644
6311 --- a/include/linux/netdevice.h
6312 +++ b/include/linux/netdevice.h
6313 @@ -1279,6 +1279,8 @@ struct net_device {
6314 /* for setting kernel sock attribute on TCP connection setup */
6315 #define GSO_MAX_SIZE 65536
6316 unsigned int gso_max_size;
6317 +#define GSO_MAX_SEGS 65535
6318 + u16 gso_max_segs;
6319
6320 #ifdef CONFIG_DCB
6321 /* Data Center Bridging netlink ops */
6322 @@ -1494,6 +1496,8 @@ struct packet_type {
6323 struct sk_buff **(*gro_receive)(struct sk_buff **head,
6324 struct sk_buff *skb);
6325 int (*gro_complete)(struct sk_buff *skb);
6326 + bool (*id_match)(struct packet_type *ptype,
6327 + struct sock *sk);
6328 void *af_packet_priv;
6329 struct list_head list;
6330 };
6331 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
6332 index 52a1bdb..941d688 100644
6333 --- a/include/linux/nfs_fs.h
6334 +++ b/include/linux/nfs_fs.h
6335 @@ -264,11 +264,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
6336 return NFS_SERVER(inode)->nfs_client->rpc_ops;
6337 }
6338
6339 -static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
6340 -{
6341 - return NFS_I(inode)->cookieverf;
6342 -}
6343 -
6344 static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
6345 {
6346 struct nfs_server *nfss = NFS_SERVER(inode);
6347 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
6348 index 3329965..19ca550 100644
6349 --- a/include/linux/pci_ids.h
6350 +++ b/include/linux/pci_ids.h
6351 @@ -2148,7 +2148,7 @@
6352 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
6353 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
6354 #define PCI_DEVICE_ID_NX2_5706S 0x16aa
6355 -#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
6356 +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
6357 #define PCI_DEVICE_ID_NX2_5708S 0x16ac
6358 #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
6359 #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
6360 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
6361 index ddbb6a9..f18d537 100644
6362 --- a/include/linux/perf_event.h
6363 +++ b/include/linux/perf_event.h
6364 @@ -925,7 +925,7 @@ struct perf_event {
6365 struct hw_perf_event hw;
6366
6367 struct perf_event_context *ctx;
6368 - struct file *filp;
6369 + atomic_long_t refcount;
6370
6371 /*
6372 * These accumulate total time (in nanoseconds) that children
6373 diff --git a/include/linux/sched.h b/include/linux/sched.h
6374 index 7b06169..48241aa 100644
6375 --- a/include/linux/sched.h
6376 +++ b/include/linux/sched.h
6377 @@ -1279,6 +1279,9 @@ struct task_struct {
6378 const struct sched_class *sched_class;
6379 struct sched_entity se;
6380 struct sched_rt_entity rt;
6381 +#ifdef CONFIG_CGROUP_SCHED
6382 + struct task_group *sched_task_group;
6383 +#endif
6384
6385 #ifdef CONFIG_PREEMPT_NOTIFIERS
6386 /* list of struct preempt_notifier: */
6387 @@ -2744,7 +2747,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
6388 extern long sched_group_rt_period(struct task_group *tg);
6389 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
6390 #endif
6391 -#endif
6392 +#endif /* CONFIG_CGROUP_SCHED */
6393
6394 extern int task_can_switch_user(struct user_struct *up,
6395 struct task_struct *tsk);
6396 diff --git a/include/linux/time.h b/include/linux/time.h
6397 index 8da5129..03dce74 100644
6398 --- a/include/linux/time.h
6399 +++ b/include/linux/time.h
6400 @@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
6401 return ts_delta;
6402 }
6403
6404 +#define KTIME_MAX ((s64)~((u64)1 << 63))
6405 +#if (BITS_PER_LONG == 64)
6406 +# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
6407 +#else
6408 +# define KTIME_SEC_MAX LONG_MAX
6409 +#endif
6410 +
6411 /*
6412 * Returns true if the timespec is norm, false if denorm:
6413 */
6414 -#define timespec_valid(ts) \
6415 - (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
6416 +static inline bool timespec_valid(const struct timespec *ts)
6417 +{
6418 + /* Dates before 1970 are bogus */
6419 + if (ts->tv_sec < 0)
6420 + return false;
6421 + /* Can't have more nanoseconds then a second */
6422 + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
6423 + return false;
6424 + return true;
6425 +}
6426 +
6427 +static inline bool timespec_valid_strict(const struct timespec *ts)
6428 +{
6429 + if (!timespec_valid(ts))
6430 + return false;
6431 + /* Disallow values that could overflow ktime_t */
6432 + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
6433 + return false;
6434 + return true;
6435 +}
6436
6437 extern void read_persistent_clock(struct timespec *ts);
6438 extern void read_boot_clock(struct timespec *ts);
6439 diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
6440 index 7b3acdd..1bf9bec 100644
6441 --- a/include/net/bluetooth/smp.h
6442 +++ b/include/net/bluetooth/smp.h
6443 @@ -136,7 +136,7 @@ struct smp_chan {
6444 };
6445
6446 /* SMP Commands */
6447 -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
6448 +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
6449 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
6450 int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
6451 int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
6452 diff --git a/include/net/scm.h b/include/net/scm.h
6453 index d456f4c..0c0017c 100644
6454 --- a/include/net/scm.h
6455 +++ b/include/net/scm.h
6456 @@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
6457 }
6458
6459 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
6460 - struct scm_cookie *scm)
6461 + struct scm_cookie *scm, bool forcecreds)
6462 {
6463 memset(scm, 0, sizeof(*scm));
6464 + if (forcecreds)
6465 + scm_set_cred(scm, task_tgid(current), current_cred());
6466 unix_get_peersec_dgram(sock, scm);
6467 if (msg->msg_controllen <= 0)
6468 return 0;
6469 diff --git a/include/net/sock.h b/include/net/sock.h
6470 index 5a0a58a..5878118 100644
6471 --- a/include/net/sock.h
6472 +++ b/include/net/sock.h
6473 @@ -216,6 +216,7 @@ struct cg_proto;
6474 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
6475 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
6476 * @sk_gso_max_size: Maximum GSO segment size to build
6477 + * @sk_gso_max_segs: Maximum number of GSO segments
6478 * @sk_lingertime: %SO_LINGER l_linger setting
6479 * @sk_backlog: always used with the per-socket spinlock held
6480 * @sk_callback_lock: used with the callbacks in the end of this struct
6481 @@ -335,6 +336,7 @@ struct sock {
6482 netdev_features_t sk_route_nocaps;
6483 int sk_gso_type;
6484 unsigned int sk_gso_max_size;
6485 + u16 sk_gso_max_segs;
6486 int sk_rcvlowat;
6487 unsigned long sk_lingertime;
6488 struct sk_buff_head sk_error_queue;
6489 diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
6490 index 5f889f1..08fa272 100644
6491 --- a/include/trace/events/kmem.h
6492 +++ b/include/trace/events/kmem.h
6493 @@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,
6494
6495 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
6496 __entry->page,
6497 - page_to_pfn(__entry->page),
6498 + __entry->page ? page_to_pfn(__entry->page) : 0,
6499 __entry->order,
6500 __entry->migratetype,
6501 show_gfp_flags(__entry->gfp_flags))
6502 @@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,
6503
6504 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
6505 __entry->page,
6506 - page_to_pfn(__entry->page),
6507 + __entry->page ? page_to_pfn(__entry->page) : 0,
6508 __entry->order,
6509 __entry->migratetype,
6510 __entry->order == 0)
6511 diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
6512 index 15f8a00..f0037a8 100644
6513 --- a/include/xen/grant_table.h
6514 +++ b/include/xen/grant_table.h
6515 @@ -185,6 +185,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
6516 struct gnttab_map_grant_ref *kmap_ops,
6517 struct page **pages, unsigned int count);
6518 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
6519 - struct page **pages, unsigned int count, bool clear_pte);
6520 + struct gnttab_map_grant_ref *kunmap_ops,
6521 + struct page **pages, unsigned int count);
6522
6523 #endif /* __ASM_GNTTAB_H__ */
6524 diff --git a/kernel/async.c b/kernel/async.c
6525 index bd0c168..32d8dc9 100644
6526 --- a/kernel/async.c
6527 +++ b/kernel/async.c
6528 @@ -86,6 +86,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
6529 {
6530 struct async_entry *entry;
6531
6532 + if (!running) { /* just check the entry count */
6533 + if (atomic_read(&entry_count))
6534 + return 0; /* smaller than any cookie */
6535 + else
6536 + return next_cookie;
6537 + }
6538 +
6539 if (!list_empty(running)) {
6540 entry = list_first_entry(running,
6541 struct async_entry, list);
6542 @@ -236,9 +243,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
6543 */
6544 void async_synchronize_full(void)
6545 {
6546 - do {
6547 - async_synchronize_cookie(next_cookie);
6548 - } while (!list_empty(&async_running) || !list_empty(&async_pending));
6549 + async_synchronize_cookie_domain(next_cookie, NULL);
6550 }
6551 EXPORT_SYMBOL_GPL(async_synchronize_full);
6552
6553 @@ -258,7 +263,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
6554 /**
6555 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
6556 * @cookie: async_cookie_t to use as checkpoint
6557 - * @running: running list to synchronize on
6558 + * @running: running list to synchronize on, NULL indicates all lists
6559 *
6560 * This function waits until all asynchronous function calls for the
6561 * synchronization domain specified by the running list @list submitted
6562 diff --git a/kernel/events/core.c b/kernel/events/core.c
6563 index fd126f8..228fdb0 100644
6564 --- a/kernel/events/core.c
6565 +++ b/kernel/events/core.c
6566 @@ -2929,12 +2929,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
6567 /*
6568 * Called when the last reference to the file is gone.
6569 */
6570 -static int perf_release(struct inode *inode, struct file *file)
6571 +static void put_event(struct perf_event *event)
6572 {
6573 - struct perf_event *event = file->private_data;
6574 struct task_struct *owner;
6575
6576 - file->private_data = NULL;
6577 + if (!atomic_long_dec_and_test(&event->refcount))
6578 + return;
6579
6580 rcu_read_lock();
6581 owner = ACCESS_ONCE(event->owner);
6582 @@ -2969,7 +2969,13 @@ static int perf_release(struct inode *inode, struct file *file)
6583 put_task_struct(owner);
6584 }
6585
6586 - return perf_event_release_kernel(event);
6587 + perf_event_release_kernel(event);
6588 +}
6589 +
6590 +static int perf_release(struct inode *inode, struct file *file)
6591 +{
6592 + put_event(file->private_data);
6593 + return 0;
6594 }
6595
6596 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
6597 @@ -3222,7 +3228,7 @@ unlock:
6598
6599 static const struct file_operations perf_fops;
6600
6601 -static struct perf_event *perf_fget_light(int fd, int *fput_needed)
6602 +static struct file *perf_fget_light(int fd, int *fput_needed)
6603 {
6604 struct file *file;
6605
6606 @@ -3236,7 +3242,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
6607 return ERR_PTR(-EBADF);
6608 }
6609
6610 - return file->private_data;
6611 + return file;
6612 }
6613
6614 static int perf_event_set_output(struct perf_event *event,
6615 @@ -3268,19 +3274,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6616
6617 case PERF_EVENT_IOC_SET_OUTPUT:
6618 {
6619 + struct file *output_file = NULL;
6620 struct perf_event *output_event = NULL;
6621 int fput_needed = 0;
6622 int ret;
6623
6624 if (arg != -1) {
6625 - output_event = perf_fget_light(arg, &fput_needed);
6626 - if (IS_ERR(output_event))
6627 - return PTR_ERR(output_event);
6628 + output_file = perf_fget_light(arg, &fput_needed);
6629 + if (IS_ERR(output_file))
6630 + return PTR_ERR(output_file);
6631 + output_event = output_file->private_data;
6632 }
6633
6634 ret = perf_event_set_output(event, output_event);
6635 if (output_event)
6636 - fput_light(output_event->filp, fput_needed);
6637 + fput_light(output_file, fput_needed);
6638
6639 return ret;
6640 }
6641 @@ -5920,6 +5928,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6642
6643 mutex_init(&event->mmap_mutex);
6644
6645 + atomic_long_set(&event->refcount, 1);
6646 event->cpu = cpu;
6647 event->attr = *attr;
6648 event->group_leader = group_leader;
6649 @@ -6230,12 +6239,12 @@ SYSCALL_DEFINE5(perf_event_open,
6650 return event_fd;
6651
6652 if (group_fd != -1) {
6653 - group_leader = perf_fget_light(group_fd, &fput_needed);
6654 - if (IS_ERR(group_leader)) {
6655 - err = PTR_ERR(group_leader);
6656 + group_file = perf_fget_light(group_fd, &fput_needed);
6657 + if (IS_ERR(group_file)) {
6658 + err = PTR_ERR(group_file);
6659 goto err_fd;
6660 }
6661 - group_file = group_leader->filp;
6662 + group_leader = group_file->private_data;
6663 if (flags & PERF_FLAG_FD_OUTPUT)
6664 output_event = group_leader;
6665 if (flags & PERF_FLAG_FD_NO_GROUP)
6666 @@ -6370,7 +6379,6 @@ SYSCALL_DEFINE5(perf_event_open,
6667 put_ctx(gctx);
6668 }
6669
6670 - event->filp = event_file;
6671 WARN_ON_ONCE(ctx->parent_ctx);
6672 mutex_lock(&ctx->mutex);
6673
6674 @@ -6460,7 +6468,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6675 goto err_free;
6676 }
6677
6678 - event->filp = NULL;
6679 WARN_ON_ONCE(ctx->parent_ctx);
6680 mutex_lock(&ctx->mutex);
6681 perf_install_in_context(ctx, event, cpu);
6682 @@ -6509,7 +6516,7 @@ static void sync_child_event(struct perf_event *child_event,
6683 * Release the parent event, if this was the last
6684 * reference to it.
6685 */
6686 - fput(parent_event->filp);
6687 + put_event(parent_event);
6688 }
6689
6690 static void
6691 @@ -6585,9 +6592,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6692 *
6693 * __perf_event_exit_task()
6694 * sync_child_event()
6695 - * fput(parent_event->filp)
6696 - * perf_release()
6697 - * mutex_lock(&ctx->mutex)
6698 + * put_event()
6699 + * mutex_lock(&ctx->mutex)
6700 *
6701 * But since its the parent context it won't be the same instance.
6702 */
6703 @@ -6655,7 +6661,7 @@ static void perf_free_event(struct perf_event *event,
6704 list_del_init(&event->child_list);
6705 mutex_unlock(&parent->child_mutex);
6706
6707 - fput(parent->filp);
6708 + put_event(parent);
6709
6710 perf_group_detach(event);
6711 list_del_event(event, ctx);
6712 @@ -6735,6 +6741,12 @@ inherit_event(struct perf_event *parent_event,
6713 NULL, NULL);
6714 if (IS_ERR(child_event))
6715 return child_event;
6716 +
6717 + if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
6718 + free_event(child_event);
6719 + return NULL;
6720 + }
6721 +
6722 get_ctx(child_ctx);
6723
6724 /*
6725 @@ -6776,14 +6788,6 @@ inherit_event(struct perf_event *parent_event,
6726 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6727
6728 /*
6729 - * Get a reference to the parent filp - we will fput it
6730 - * when the child event exits. This is safe to do because
6731 - * we are in the parent and we know that the filp still
6732 - * exists and has a nonzero count:
6733 - */
6734 - atomic_long_inc(&parent_event->filp->f_count);
6735 -
6736 - /*
6737 * Link this into the parent event's child list
6738 */
6739 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6740 diff --git a/kernel/kthread.c b/kernel/kthread.c
6741 index 3d3de63..b579af5 100644
6742 --- a/kernel/kthread.c
6743 +++ b/kernel/kthread.c
6744 @@ -360,16 +360,12 @@ repeat:
6745 struct kthread_work, node);
6746 list_del_init(&work->node);
6747 }
6748 + worker->current_work = work;
6749 spin_unlock_irq(&worker->lock);
6750
6751 if (work) {
6752 __set_current_state(TASK_RUNNING);
6753 work->func(work);
6754 - smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
6755 - work->done_seq = work->queue_seq;
6756 - smp_mb(); /* mb worker-b1 paired with flush-b0 */
6757 - if (atomic_read(&work->flushing))
6758 - wake_up_all(&work->done);
6759 } else if (!freezing(current))
6760 schedule();
6761
6762 @@ -378,6 +374,19 @@ repeat:
6763 }
6764 EXPORT_SYMBOL_GPL(kthread_worker_fn);
6765
6766 +/* insert @work before @pos in @worker */
6767 +static void insert_kthread_work(struct kthread_worker *worker,
6768 + struct kthread_work *work,
6769 + struct list_head *pos)
6770 +{
6771 + lockdep_assert_held(&worker->lock);
6772 +
6773 + list_add_tail(&work->node, pos);
6774 + work->worker = worker;
6775 + if (likely(worker->task))
6776 + wake_up_process(worker->task);
6777 +}
6778 +
6779 /**
6780 * queue_kthread_work - queue a kthread_work
6781 * @worker: target kthread_worker
6782 @@ -395,10 +404,7 @@ bool queue_kthread_work(struct kthread_worker *worker,
6783
6784 spin_lock_irqsave(&worker->lock, flags);
6785 if (list_empty(&work->node)) {
6786 - list_add_tail(&work->node, &worker->work_list);
6787 - work->queue_seq++;
6788 - if (likely(worker->task))
6789 - wake_up_process(worker->task);
6790 + insert_kthread_work(worker, work, &worker->work_list);
6791 ret = true;
6792 }
6793 spin_unlock_irqrestore(&worker->lock, flags);
6794 @@ -406,6 +412,18 @@ bool queue_kthread_work(struct kthread_worker *worker,
6795 }
6796 EXPORT_SYMBOL_GPL(queue_kthread_work);
6797
6798 +struct kthread_flush_work {
6799 + struct kthread_work work;
6800 + struct completion done;
6801 +};
6802 +
6803 +static void kthread_flush_work_fn(struct kthread_work *work)
6804 +{
6805 + struct kthread_flush_work *fwork =
6806 + container_of(work, struct kthread_flush_work, work);
6807 + complete(&fwork->done);
6808 +}
6809 +
6810 /**
6811 * flush_kthread_work - flush a kthread_work
6812 * @work: work to flush
6813 @@ -414,39 +432,37 @@ EXPORT_SYMBOL_GPL(queue_kthread_work);
6814 */
6815 void flush_kthread_work(struct kthread_work *work)
6816 {
6817 - int seq = work->queue_seq;
6818 -
6819 - atomic_inc(&work->flushing);
6820 + struct kthread_flush_work fwork = {
6821 + KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
6822 + COMPLETION_INITIALIZER_ONSTACK(fwork.done),
6823 + };
6824 + struct kthread_worker *worker;
6825 + bool noop = false;
6826
6827 - /*
6828 - * mb flush-b0 paired with worker-b1, to make sure either
6829 - * worker sees the above increment or we see done_seq update.
6830 - */
6831 - smp_mb__after_atomic_inc();
6832 +retry:
6833 + worker = work->worker;
6834 + if (!worker)
6835 + return;
6836
6837 - /* A - B <= 0 tests whether B is in front of A regardless of overflow */
6838 - wait_event(work->done, seq - work->done_seq <= 0);
6839 - atomic_dec(&work->flushing);
6840 + spin_lock_irq(&worker->lock);
6841 + if (work->worker != worker) {
6842 + spin_unlock_irq(&worker->lock);
6843 + goto retry;
6844 + }
6845
6846 - /*
6847 - * rmb flush-b1 paired with worker-b0, to make sure our caller
6848 - * sees every change made by work->func().
6849 - */
6850 - smp_mb__after_atomic_dec();
6851 -}
6852 -EXPORT_SYMBOL_GPL(flush_kthread_work);
6853 + if (!list_empty(&work->node))
6854 + insert_kthread_work(worker, &fwork.work, work->node.next);
6855 + else if (worker->current_work == work)
6856 + insert_kthread_work(worker, &fwork.work, worker->work_list.next);
6857 + else
6858 + noop = true;
6859
6860 -struct kthread_flush_work {
6861 - struct kthread_work work;
6862 - struct completion done;
6863 -};
6864 + spin_unlock_irq(&worker->lock);
6865
6866 -static void kthread_flush_work_fn(struct kthread_work *work)
6867 -{
6868 - struct kthread_flush_work *fwork =
6869 - container_of(work, struct kthread_flush_work, work);
6870 - complete(&fwork->done);
6871 + if (!noop)
6872 + wait_for_completion(&fwork.done);
6873 }
6874 +EXPORT_SYMBOL_GPL(flush_kthread_work);
6875
6876 /**
6877 * flush_kthread_worker - flush all current works on a kthread_worker
6878 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6879 index ef6a8f2..593087b 100644
6880 --- a/kernel/sched/core.c
6881 +++ b/kernel/sched/core.c
6882 @@ -1098,7 +1098,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
6883 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
6884 *
6885 * sched_move_task() holds both and thus holding either pins the cgroup,
6886 - * see set_task_rq().
6887 + * see task_group().
6888 *
6889 * Furthermore, all task_rq users should acquire both locks, see
6890 * task_rq_lock().
6891 @@ -7427,6 +7427,7 @@ void sched_destroy_group(struct task_group *tg)
6892 */
6893 void sched_move_task(struct task_struct *tsk)
6894 {
6895 + struct task_group *tg;
6896 int on_rq, running;
6897 unsigned long flags;
6898 struct rq *rq;
6899 @@ -7441,6 +7442,12 @@ void sched_move_task(struct task_struct *tsk)
6900 if (unlikely(running))
6901 tsk->sched_class->put_prev_task(rq, tsk);
6902
6903 + tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
6904 + lockdep_is_held(&tsk->sighand->siglock)),
6905 + struct task_group, css);
6906 + tg = autogroup_task_group(tsk, tg);
6907 + tsk->sched_task_group = tg;
6908 +
6909 #ifdef CONFIG_FAIR_GROUP_SCHED
6910 if (tsk->sched_class->task_move_group)
6911 tsk->sched_class->task_move_group(tsk, on_rq);
6912 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
6913 index d9637f4..acfa701 100644
6914 --- a/kernel/sched/sched.h
6915 +++ b/kernel/sched/sched.h
6916 @@ -536,22 +536,19 @@ DECLARE_PER_CPU(int, sd_llc_id);
6917 /*
6918 * Return the group to which this tasks belongs.
6919 *
6920 - * We use task_subsys_state_check() and extend the RCU verification with
6921 - * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
6922 - * task it moves into the cgroup. Therefore by holding either of those locks,
6923 - * we pin the task to the current cgroup.
6924 + * We cannot use task_subsys_state() and friends because the cgroup
6925 + * subsystem changes that value before the cgroup_subsys::attach() method
6926 + * is called, therefore we cannot pin it and might observe the wrong value.
6927 + *
6928 + * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
6929 + * core changes this before calling sched_move_task().
6930 + *
6931 + * Instead we use a 'copy' which is updated from sched_move_task() while
6932 + * holding both task_struct::pi_lock and rq::lock.
6933 */
6934 static inline struct task_group *task_group(struct task_struct *p)
6935 {
6936 - struct task_group *tg;
6937 - struct cgroup_subsys_state *css;
6938 -
6939 - css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
6940 - lockdep_is_held(&p->pi_lock) ||
6941 - lockdep_is_held(&task_rq(p)->lock));
6942 - tg = container_of(css, struct task_group, css);
6943 -
6944 - return autogroup_task_group(p, tg);
6945 + return p->sched_task_group;
6946 }
6947
6948 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
6949 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
6950 index fd4e160..e603477 100644
6951 --- a/kernel/time/tick-sched.c
6952 +++ b/kernel/time/tick-sched.c
6953 @@ -145,6 +145,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
6954 tick_do_update_jiffies64(now);
6955 local_irq_restore(flags);
6956
6957 + calc_load_exit_idle();
6958 touch_softlockup_watchdog();
6959 }
6960
6961 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
6962 index 7c50de8..12843e9 100644
6963 --- a/kernel/time/timekeeping.c
6964 +++ b/kernel/time/timekeeping.c
6965 @@ -385,7 +385,7 @@ int do_settimeofday(const struct timespec *tv)
6966 struct timespec ts_delta;
6967 unsigned long flags;
6968
6969 - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
6970 + if (!timespec_valid_strict(tv))
6971 return -EINVAL;
6972
6973 write_seqlock_irqsave(&timekeeper.lock, flags);
6974 @@ -420,6 +420,8 @@ EXPORT_SYMBOL(do_settimeofday);
6975 int timekeeping_inject_offset(struct timespec *ts)
6976 {
6977 unsigned long flags;
6978 + struct timespec tmp;
6979 + int ret = 0;
6980
6981 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
6982 return -EINVAL;
6983 @@ -428,10 +430,17 @@ int timekeeping_inject_offset(struct timespec *ts)
6984
6985 timekeeping_forward_now();
6986
6987 + tmp = timespec_add(timekeeper.xtime, *ts);
6988 + if (!timespec_valid_strict(&tmp)) {
6989 + ret = -EINVAL;
6990 + goto error;
6991 + }
6992 +
6993 timekeeper.xtime = timespec_add(timekeeper.xtime, *ts);
6994 timekeeper.wall_to_monotonic =
6995 timespec_sub(timekeeper.wall_to_monotonic, *ts);
6996
6997 +error: /* even if we error out, we forwarded the time, so call update */
6998 timekeeping_update(true);
6999
7000 write_sequnlock_irqrestore(&timekeeper.lock, flags);
7001 @@ -439,7 +448,7 @@ int timekeeping_inject_offset(struct timespec *ts)
7002 /* signal hrtimers about time change */
7003 clock_was_set();
7004
7005 - return 0;
7006 + return ret;
7007 }
7008 EXPORT_SYMBOL(timekeeping_inject_offset);
7009
7010 @@ -599,7 +608,20 @@ void __init timekeeping_init(void)
7011 struct timespec now, boot;
7012
7013 read_persistent_clock(&now);
7014 + if (!timespec_valid_strict(&now)) {
7015 + pr_warn("WARNING: Persistent clock returned invalid value!\n"
7016 + " Check your CMOS/BIOS settings.\n");
7017 + now.tv_sec = 0;
7018 + now.tv_nsec = 0;
7019 + }
7020 +
7021 read_boot_clock(&boot);
7022 + if (!timespec_valid_strict(&boot)) {
7023 + pr_warn("WARNING: Boot clock returned invalid value!\n"
7024 + " Check your CMOS/BIOS settings.\n");
7025 + boot.tv_sec = 0;
7026 + boot.tv_nsec = 0;
7027 + }
7028
7029 seqlock_init(&timekeeper.lock);
7030
7031 @@ -645,7 +667,7 @@ static void update_sleep_time(struct timespec t)
7032 */
7033 static void __timekeeping_inject_sleeptime(struct timespec *delta)
7034 {
7035 - if (!timespec_valid(delta)) {
7036 + if (!timespec_valid_strict(delta)) {
7037 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
7038 "sleep delta value!\n");
7039 return;
7040 @@ -1035,9 +1057,12 @@ static void update_wall_time(void)
7041 #else
7042 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
7043 #endif
7044 + /* Check if there's really nothing to do */
7045 + if (offset < timekeeper.cycle_interval)
7046 + goto out;
7047 +
7048 timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec <<
7049 timekeeper.shift;
7050 -
7051 /*
7052 * With NO_HZ we may have to accumulate many cycle_intervals
7053 * (think "ticks") worth of time at once. To do this efficiently,
7054 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
7055 index bfe3f8a..7584322 100644
7056 --- a/kernel/workqueue.c
7057 +++ b/kernel/workqueue.c
7058 @@ -3433,14 +3433,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
7059
7060 for_each_busy_worker(worker, i, pos, gcwq) {
7061 struct work_struct *rebind_work = &worker->rebind_work;
7062 + unsigned long worker_flags = worker->flags;
7063
7064 /*
7065 * Rebind_work may race with future cpu hotplug
7066 * operations. Use a separate flag to mark that
7067 - * rebinding is scheduled.
7068 + * rebinding is scheduled. The morphing should
7069 + * be atomic.
7070 */
7071 - worker->flags |= WORKER_REBIND;
7072 - worker->flags &= ~WORKER_ROGUE;
7073 + worker_flags |= WORKER_REBIND;
7074 + worker_flags &= ~WORKER_ROGUE;
7075 + ACCESS_ONCE(worker->flags) = worker_flags;
7076
7077 /* queue rebind_work, wq doesn't matter, use the default one */
7078 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
7079 @@ -3620,18 +3623,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
7080 #ifdef CONFIG_SMP
7081
7082 struct work_for_cpu {
7083 - struct completion completion;
7084 + struct work_struct work;
7085 long (*fn)(void *);
7086 void *arg;
7087 long ret;
7088 };
7089
7090 -static int do_work_for_cpu(void *_wfc)
7091 +static void work_for_cpu_fn(struct work_struct *work)
7092 {
7093 - struct work_for_cpu *wfc = _wfc;
7094 + struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
7095 +
7096 wfc->ret = wfc->fn(wfc->arg);
7097 - complete(&wfc->completion);
7098 - return 0;
7099 }
7100
7101 /**
7102 @@ -3646,19 +3648,11 @@ static int do_work_for_cpu(void *_wfc)
7103 */
7104 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
7105 {
7106 - struct task_struct *sub_thread;
7107 - struct work_for_cpu wfc = {
7108 - .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
7109 - .fn = fn,
7110 - .arg = arg,
7111 - };
7112 + struct work_for_cpu wfc = { .fn = fn, .arg = arg };
7113
7114 - sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
7115 - if (IS_ERR(sub_thread))
7116 - return PTR_ERR(sub_thread);
7117 - kthread_bind(sub_thread, cpu);
7118 - wake_up_process(sub_thread);
7119 - wait_for_completion(&wfc.completion);
7120 + INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
7121 + schedule_work_on(cpu, &wfc.work);
7122 + flush_work(&wfc.work);
7123 return wfc.ret;
7124 }
7125 EXPORT_SYMBOL_GPL(work_on_cpu);
7126 diff --git a/lib/digsig.c b/lib/digsig.c
7127 index 286d558..8c0e629 100644
7128 --- a/lib/digsig.c
7129 +++ b/lib/digsig.c
7130 @@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
7131 memcpy(out1 + head, p, l);
7132
7133 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
7134 + if (err)
7135 + goto err;
7136
7137 - if (!err && len == hlen)
7138 - err = memcmp(out2, h, hlen);
7139 + if (len != hlen || memcmp(out2, h, hlen))
7140 + err = -EINVAL;
7141
7142 err:
7143 mpi_free(in);
7144 diff --git a/mm/bootmem.c b/mm/bootmem.c
7145 index 0131170..53cf62b 100644
7146 --- a/mm/bootmem.c
7147 +++ b/mm/bootmem.c
7148 @@ -766,13 +766,17 @@ void * __init alloc_bootmem_section(unsigned long size,
7149 unsigned long section_nr)
7150 {
7151 bootmem_data_t *bdata;
7152 - unsigned long pfn, goal;
7153 + unsigned long pfn, goal, limit;
7154
7155 pfn = section_nr_to_pfn(section_nr);
7156 goal = pfn << PAGE_SHIFT;
7157 + limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
7158 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
7159
7160 - return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
7161 + if (goal + size > limit)
7162 + limit = 0;
7163 +
7164 + return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
7165 }
7166 #endif
7167
7168 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
7169 index 6629faf..9ad7d1e 100644
7170 --- a/mm/memory_hotplug.c
7171 +++ b/mm/memory_hotplug.c
7172 @@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
7173 struct mem_section *ms;
7174 struct page *page, *memmap;
7175
7176 - if (!pfn_valid(start_pfn))
7177 - return;
7178 -
7179 section_nr = pfn_to_section_nr(start_pfn);
7180 ms = __nr_to_section(section_nr);
7181
7182 @@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
7183 end_pfn = pfn + pgdat->node_spanned_pages;
7184
7185 /* register_section info */
7186 - for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
7187 - register_page_bootmem_info_section(pfn);
7188 -
7189 + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
7190 + /*
7191 + * Some platforms can assign the same pfn to multiple nodes - on
7192 + * node0 as well as nodeN. To avoid registering a pfn against
7193 + * multiple nodes we check that this pfn does not already
7194 + * reside in some other node.
7195 + */
7196 + if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
7197 + register_page_bootmem_info_section(pfn);
7198 + }
7199 }
7200 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
7201
7202 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7203 index 918330f..88a6d87 100644
7204 --- a/mm/page_alloc.c
7205 +++ b/mm/page_alloc.c
7206 @@ -579,7 +579,7 @@ static inline void __free_one_page(struct page *page,
7207 combined_idx = buddy_idx & page_idx;
7208 higher_page = page + (combined_idx - page_idx);
7209 buddy_idx = __find_buddy_index(combined_idx, order + 1);
7210 - higher_buddy = page + (buddy_idx - combined_idx);
7211 + higher_buddy = higher_page + (buddy_idx - combined_idx);
7212 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
7213 list_add_tail(&page->lru,
7214 &zone->free_area[order].free_list[migratetype]);
7215 diff --git a/mm/vmscan.c b/mm/vmscan.c
7216 index be5bc0a..e989ee2 100644
7217 --- a/mm/vmscan.c
7218 +++ b/mm/vmscan.c
7219 @@ -1983,10 +1983,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
7220 * proportional to the fraction of recently scanned pages on
7221 * each list that were recently referenced and in active use.
7222 */
7223 - ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
7224 + ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
7225 ap /= reclaim_stat->recent_rotated[0] + 1;
7226
7227 - fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
7228 + fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
7229 fp /= reclaim_stat->recent_rotated[1] + 1;
7230 spin_unlock_irq(&mz->zone->lru_lock);
7231
7232 @@ -1999,7 +1999,7 @@ out:
7233 unsigned long scan;
7234
7235 scan = zone_nr_lru_pages(mz, lru);
7236 - if (priority || noswap) {
7237 + if (priority || noswap || !vmscan_swappiness(mz, sc)) {
7238 scan >>= priority;
7239 if (!scan && force_scan)
7240 scan = SWAP_CLUSTER_MAX;
7241 diff --git a/net/atm/common.c b/net/atm/common.c
7242 index b4b44db..0c0ad93 100644
7243 --- a/net/atm/common.c
7244 +++ b/net/atm/common.c
7245 @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
7246
7247 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
7248 return -ENOTCONN;
7249 + memset(&pvc, 0, sizeof(pvc));
7250 pvc.sap_family = AF_ATMPVC;
7251 pvc.sap_addr.itf = vcc->dev->number;
7252 pvc.sap_addr.vpi = vcc->vpi;
7253 diff --git a/net/atm/pvc.c b/net/atm/pvc.c
7254 index 3a73491..ae03240 100644
7255 --- a/net/atm/pvc.c
7256 +++ b/net/atm/pvc.c
7257 @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
7258 return -ENOTCONN;
7259 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
7260 addr = (struct sockaddr_atmpvc *)sockaddr;
7261 + memset(addr, 0, sizeof(*addr));
7262 addr->sap_family = AF_ATMPVC;
7263 addr->sap_addr.itf = vcc->dev->number;
7264 addr->sap_addr.vpi = vcc->vpi;
7265 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
7266 index 5238b6b..39b2baf 100644
7267 --- a/net/bluetooth/hci_conn.c
7268 +++ b/net/bluetooth/hci_conn.c
7269 @@ -42,6 +42,7 @@
7270
7271 #include <net/bluetooth/bluetooth.h>
7272 #include <net/bluetooth/hci_core.h>
7273 +#include <net/bluetooth/smp.h>
7274
7275 static void hci_le_connect(struct hci_conn *conn)
7276 {
7277 @@ -661,6 +662,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
7278 {
7279 BT_DBG("conn %p", conn);
7280
7281 + if (conn->type == LE_LINK)
7282 + return smp_conn_security(conn, sec_level);
7283 +
7284 /* For sdp we don't need the link key. */
7285 if (sec_level == BT_SECURITY_SDP)
7286 return 1;
7287 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
7288 index d6dc44c..0a30ec1 100644
7289 --- a/net/bluetooth/hci_core.c
7290 +++ b/net/bluetooth/hci_core.c
7291 @@ -750,6 +750,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
7292
7293 cancel_work_sync(&hdev->le_scan);
7294
7295 + cancel_delayed_work(&hdev->power_off);
7296 +
7297 hci_req_cancel(hdev, ENODEV);
7298 hci_req_lock(hdev);
7299
7300 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
7301 index 5914623..bedc768 100644
7302 --- a/net/bluetooth/hci_sock.c
7303 +++ b/net/bluetooth/hci_sock.c
7304 @@ -706,6 +706,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
7305 *addr_len = sizeof(*haddr);
7306 haddr->hci_family = AF_BLUETOOTH;
7307 haddr->hci_dev = hdev->id;
7308 + haddr->hci_channel= 0;
7309
7310 release_sock(sk);
7311 return 0;
7312 @@ -1016,6 +1017,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
7313 {
7314 struct hci_filter *f = &hci_pi(sk)->filter;
7315
7316 + memset(&uf, 0, sizeof(uf));
7317 uf.type_mask = f->type_mask;
7318 uf.opcode = f->opcode;
7319 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
7320 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
7321 index 9a86759..627c354 100644
7322 --- a/net/bluetooth/l2cap_core.c
7323 +++ b/net/bluetooth/l2cap_core.c
7324 @@ -937,14 +937,15 @@ static void l2cap_chan_ready(struct l2cap_chan *chan)
7325 static void l2cap_conn_ready(struct l2cap_conn *conn)
7326 {
7327 struct l2cap_chan *chan;
7328 + struct hci_conn *hcon = conn->hcon;
7329
7330 BT_DBG("conn %p", conn);
7331
7332 - if (!conn->hcon->out && conn->hcon->type == LE_LINK)
7333 + if (!hcon->out && hcon->type == LE_LINK)
7334 l2cap_le_conn_ready(conn);
7335
7336 - if (conn->hcon->out && conn->hcon->type == LE_LINK)
7337 - smp_conn_security(conn, conn->hcon->pending_sec_level);
7338 + if (hcon->out && hcon->type == LE_LINK)
7339 + smp_conn_security(hcon, hcon->pending_sec_level);
7340
7341 mutex_lock(&conn->chan_lock);
7342
7343 @@ -952,8 +953,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
7344
7345 l2cap_chan_lock(chan);
7346
7347 - if (conn->hcon->type == LE_LINK) {
7348 - if (smp_conn_security(conn, chan->sec_level))
7349 + if (hcon->type == LE_LINK) {
7350 + if (smp_conn_security(hcon, chan->sec_level))
7351 l2cap_chan_ready(chan);
7352
7353 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7354 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
7355 index 04e7c17..4a26348 100644
7356 --- a/net/bluetooth/l2cap_sock.c
7357 +++ b/net/bluetooth/l2cap_sock.c
7358 @@ -242,6 +242,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
7359
7360 BT_DBG("sock %p, sk %p", sock, sk);
7361
7362 + memset(la, 0, sizeof(struct sockaddr_l2));
7363 addr->sa_family = AF_BLUETOOTH;
7364 *len = sizeof(struct sockaddr_l2);
7365
7366 @@ -587,7 +588,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
7367 break;
7368 }
7369
7370 - if (smp_conn_security(conn, sec.level))
7371 + if (smp_conn_security(conn->hcon, sec.level))
7372 break;
7373 sk->sk_state = BT_CONFIG;
7374 chan->state = BT_CONFIG;
7375 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
7376 index 4bb03b1..8f3d9dc 100644
7377 --- a/net/bluetooth/mgmt.c
7378 +++ b/net/bluetooth/mgmt.c
7379 @@ -2801,6 +2801,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
7380 if (scan)
7381 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
7382
7383 + if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
7384 + u8 ssp = 1;
7385 +
7386 + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
7387 + }
7388 +
7389 + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
7390 + struct hci_cp_write_le_host_supported cp;
7391 +
7392 + cp.le = 1;
7393 + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
7394 +
7395 + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7396 + sizeof(cp), &cp);
7397 + }
7398 +
7399 update_class(hdev);
7400 update_name(hdev, hdev->dev_name);
7401 update_eir(hdev);
7402 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
7403 index a55a43e..717c43a 100644
7404 --- a/net/bluetooth/rfcomm/sock.c
7405 +++ b/net/bluetooth/rfcomm/sock.c
7406 @@ -546,6 +546,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
7407
7408 BT_DBG("sock %p, sk %p", sock, sk);
7409
7410 + memset(sa, 0, sizeof(*sa));
7411 sa->rc_family = AF_BLUETOOTH;
7412 sa->rc_channel = rfcomm_pi(sk)->channel;
7413 if (peer)
7414 @@ -836,6 +837,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
7415 }
7416
7417 sec.level = rfcomm_pi(sk)->sec_level;
7418 + sec.key_size = 0;
7419
7420 len = min_t(unsigned int, len, sizeof(sec));
7421 if (copy_to_user(optval, (char *) &sec, len))
7422 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
7423 index 4bf54b3..95a0f60 100644
7424 --- a/net/bluetooth/rfcomm/tty.c
7425 +++ b/net/bluetooth/rfcomm/tty.c
7426 @@ -467,7 +467,7 @@ static int rfcomm_get_dev_list(void __user *arg)
7427
7428 size = sizeof(*dl) + dev_num * sizeof(*di);
7429
7430 - dl = kmalloc(size, GFP_KERNEL);
7431 + dl = kzalloc(size, GFP_KERNEL);
7432 if (!dl)
7433 return -ENOMEM;
7434
7435 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
7436 index deb1198..1f6cb1f 100644
7437 --- a/net/bluetooth/smp.c
7438 +++ b/net/bluetooth/smp.c
7439 @@ -266,10 +266,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
7440 mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
7441 hcon->dst_type, reason);
7442
7443 - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
7444 - cancel_delayed_work_sync(&conn->security_timer);
7445 + cancel_delayed_work_sync(&conn->security_timer);
7446 +
7447 + if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
7448 smp_chan_destroy(conn);
7449 - }
7450 }
7451
7452 #define JUST_WORKS 0x00
7453 @@ -753,9 +753,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
7454 return 0;
7455 }
7456
7457 -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
7458 +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
7459 {
7460 - struct hci_conn *hcon = conn->hcon;
7461 + struct l2cap_conn *conn = hcon->l2cap_data;
7462 struct smp_chan *smp = conn->smp_chan;
7463 __u8 authreq;
7464
7465 diff --git a/net/core/dev.c b/net/core/dev.c
7466 index c299416..3fd9cae 100644
7467 --- a/net/core/dev.c
7468 +++ b/net/core/dev.c
7469 @@ -1056,6 +1056,8 @@ rollback:
7470 */
7471 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
7472 {
7473 + char *new_ifalias;
7474 +
7475 ASSERT_RTNL();
7476
7477 if (len >= IFALIASZ)
7478 @@ -1069,9 +1071,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
7479 return 0;
7480 }
7481
7482 - dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
7483 - if (!dev->ifalias)
7484 + new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
7485 + if (!new_ifalias)
7486 return -ENOMEM;
7487 + dev->ifalias = new_ifalias;
7488
7489 strlcpy(dev->ifalias, alias, len+1);
7490 return len;
7491 @@ -1638,6 +1641,19 @@ static inline int deliver_skb(struct sk_buff *skb,
7492 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
7493 }
7494
7495 +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
7496 +{
7497 + if (ptype->af_packet_priv == NULL)
7498 + return false;
7499 +
7500 + if (ptype->id_match)
7501 + return ptype->id_match(ptype, skb->sk);
7502 + else if ((struct sock *)ptype->af_packet_priv == skb->sk)
7503 + return true;
7504 +
7505 + return false;
7506 +}
7507 +
7508 /*
7509 * Support routine. Sends outgoing frames to any network
7510 * taps currently in use.
7511 @@ -1655,8 +1671,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
7512 * they originated from - MvS (miquels@drinkel.ow.org)
7513 */
7514 if ((ptype->dev == dev || !ptype->dev) &&
7515 - (ptype->af_packet_priv == NULL ||
7516 - (struct sock *)ptype->af_packet_priv != skb->sk)) {
7517 + (!skb_loop_sk(ptype, skb))) {
7518 if (pt_prev) {
7519 deliver_skb(skb2, pt_prev, skb->dev);
7520 pt_prev = ptype;
7521 @@ -2121,6 +2136,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
7522 __be16 protocol = skb->protocol;
7523 netdev_features_t features = skb->dev->features;
7524
7525 + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
7526 + features &= ~NETIF_F_GSO_MASK;
7527 +
7528 if (protocol == htons(ETH_P_8021Q)) {
7529 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
7530 protocol = veh->h_vlan_encapsulated_proto;
7531 @@ -5909,6 +5927,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7532 dev_net_set(dev, &init_net);
7533
7534 dev->gso_max_size = GSO_MAX_SIZE;
7535 + dev->gso_max_segs = GSO_MAX_SEGS;
7536
7537 INIT_LIST_HEAD(&dev->napi_list);
7538 INIT_LIST_HEAD(&dev->unreg_list);
7539 @@ -6284,7 +6303,8 @@ static struct hlist_head *netdev_create_hash(void)
7540 /* Initialize per network namespace state */
7541 static int __net_init netdev_init(struct net *net)
7542 {
7543 - INIT_LIST_HEAD(&net->dev_base_head);
7544 + if (net != &init_net)
7545 + INIT_LIST_HEAD(&net->dev_base_head);
7546
7547 net->dev_name_head = netdev_create_hash();
7548 if (net->dev_name_head == NULL)
7549 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
7550 index 31a5ae5..dd00b71 100644
7551 --- a/net/core/net_namespace.c
7552 +++ b/net/core/net_namespace.c
7553 @@ -25,7 +25,9 @@ static DEFINE_MUTEX(net_mutex);
7554 LIST_HEAD(net_namespace_list);
7555 EXPORT_SYMBOL_GPL(net_namespace_list);
7556
7557 -struct net init_net;
7558 +struct net init_net = {
7559 + .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
7560 +};
7561 EXPORT_SYMBOL(init_net);
7562
7563 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
7564 diff --git a/net/core/sock.c b/net/core/sock.c
7565 index 0f8402e..d3e0a52 100644
7566 --- a/net/core/sock.c
7567 +++ b/net/core/sock.c
7568 @@ -1411,6 +1411,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
7569 } else {
7570 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
7571 sk->sk_gso_max_size = dst->dev->gso_max_size;
7572 + sk->sk_gso_max_segs = dst->dev->gso_max_segs;
7573 }
7574 }
7575 }
7576 diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
7577 index 70bfaf2..b658f3b 100644
7578 --- a/net/dccp/ccids/ccid3.c
7579 +++ b/net/dccp/ccids/ccid3.c
7580 @@ -531,6 +531,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
7581 case DCCP_SOCKOPT_CCID_TX_INFO:
7582 if (len < sizeof(tfrc))
7583 return -EINVAL;
7584 + memset(&tfrc, 0, sizeof(tfrc));
7585 tfrc.tfrctx_x = hc->tx_x;
7586 tfrc.tfrctx_x_recv = hc->tx_x_recv;
7587 tfrc.tfrctx_x_calc = hc->tx_x_calc;
7588 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
7589 index 960fbfc..8626b64 100644
7590 --- a/net/ipv4/ipmr.c
7591 +++ b/net/ipv4/ipmr.c
7592 @@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
7593 static struct kmem_cache *mrt_cachep __read_mostly;
7594
7595 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
7596 +static void ipmr_free_table(struct mr_table *mrt);
7597 +
7598 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
7599 struct sk_buff *skb, struct mfc_cache *cache,
7600 int local);
7601 @@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
7602 struct sk_buff *pkt, vifi_t vifi, int assert);
7603 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
7604 struct mfc_cache *c, struct rtmsg *rtm);
7605 +static void mroute_clean_tables(struct mr_table *mrt);
7606 static void ipmr_expire_process(unsigned long arg);
7607
7608 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
7609 @@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
7610
7611 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
7612 list_del(&mrt->list);
7613 - kfree(mrt);
7614 + ipmr_free_table(mrt);
7615 }
7616 fib_rules_unregister(net->ipv4.mr_rules_ops);
7617 }
7618 @@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
7619
7620 static void __net_exit ipmr_rules_exit(struct net *net)
7621 {
7622 - kfree(net->ipv4.mrt);
7623 + ipmr_free_table(net->ipv4.mrt);
7624 }
7625 #endif
7626
7627 @@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
7628 return mrt;
7629 }
7630
7631 +static void ipmr_free_table(struct mr_table *mrt)
7632 +{
7633 + del_timer_sync(&mrt->ipmr_expire_timer);
7634 + mroute_clean_tables(mrt);
7635 + kfree(mrt);
7636 +}
7637 +
7638 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
7639
7640 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
7641 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
7642 index d6feb1e..367bdaf 100644
7643 --- a/net/ipv4/tcp.c
7644 +++ b/net/ipv4/tcp.c
7645 @@ -740,7 +740,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
7646 old_size_goal + mss_now > xmit_size_goal)) {
7647 xmit_size_goal = old_size_goal;
7648 } else {
7649 - tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
7650 + tp->xmit_size_goal_segs =
7651 + min_t(u16, xmit_size_goal / mss_now,
7652 + sk->sk_gso_max_segs);
7653 xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
7654 }
7655 }
7656 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
7657 index 272a845..69251dd 100644
7658 --- a/net/ipv4/tcp_cong.c
7659 +++ b/net/ipv4/tcp_cong.c
7660 @@ -291,7 +291,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
7661 left = tp->snd_cwnd - in_flight;
7662 if (sk_can_gso(sk) &&
7663 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
7664 - left * tp->mss_cache < sk->sk_gso_max_size)
7665 + left * tp->mss_cache < sk->sk_gso_max_size &&
7666 + left < sk->sk_gso_max_segs)
7667 return 1;
7668 return left <= tcp_max_tso_deferred_mss(tp);
7669 }
7670 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
7671 index 56a9c8d..3acebbd 100644
7672 --- a/net/ipv4/tcp_input.c
7673 +++ b/net/ipv4/tcp_input.c
7674 @@ -3037,13 +3037,14 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
7675 * tcp_xmit_retransmit_queue().
7676 */
7677 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
7678 - int newly_acked_sacked, bool is_dupack,
7679 + int prior_sacked, bool is_dupack,
7680 int flag)
7681 {
7682 struct inet_connection_sock *icsk = inet_csk(sk);
7683 struct tcp_sock *tp = tcp_sk(sk);
7684 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
7685 (tcp_fackets_out(tp) > tp->reordering));
7686 + int newly_acked_sacked = 0;
7687 int fast_rexmit = 0, mib_idx;
7688
7689 if (WARN_ON(!tp->packets_out && tp->sacked_out))
7690 @@ -3103,6 +3104,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
7691 tcp_add_reno_sack(sk);
7692 } else
7693 do_lost = tcp_try_undo_partial(sk, pkts_acked);
7694 + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
7695 break;
7696 case TCP_CA_Loss:
7697 if (flag & FLAG_DATA_ACKED)
7698 @@ -3124,6 +3126,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
7699 if (is_dupack)
7700 tcp_add_reno_sack(sk);
7701 }
7702 + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
7703
7704 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
7705 tcp_try_undo_dsack(sk);
7706 @@ -3695,7 +3698,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
7707 int prior_packets;
7708 int prior_sacked = tp->sacked_out;
7709 int pkts_acked = 0;
7710 - int newly_acked_sacked = 0;
7711 int frto_cwnd = 0;
7712
7713 /* If the ack is older than previous acks
7714 @@ -3768,8 +3770,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
7715 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
7716
7717 pkts_acked = prior_packets - tp->packets_out;
7718 - newly_acked_sacked = (prior_packets - prior_sacked) -
7719 - (tp->packets_out - tp->sacked_out);
7720
7721 if (tp->frto_counter)
7722 frto_cwnd = tcp_process_frto(sk, flag);
7723 @@ -3783,7 +3783,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
7724 tcp_may_raise_cwnd(sk, flag))
7725 tcp_cong_avoid(sk, ack, prior_in_flight);
7726 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
7727 - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
7728 + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
7729 is_dupack, flag);
7730 } else {
7731 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
7732 @@ -3798,7 +3798,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
7733 no_queue:
7734 /* If data was DSACKed, see if we can undo a cwnd reduction. */
7735 if (flag & FLAG_DSACKING_ACK)
7736 - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
7737 + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
7738 is_dupack, flag);
7739 /* If this ack opens up a zero window, clear backoff. It was
7740 * being used to time the probes, and is probably far higher than
7741 @@ -3818,8 +3818,7 @@ old_ack:
7742 */
7743 if (TCP_SKB_CB(skb)->sacked) {
7744 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
7745 - newly_acked_sacked = tp->sacked_out - prior_sacked;
7746 - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
7747 + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
7748 is_dupack, flag);
7749 }
7750
7751 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
7752 index 7ac6423..2d27e1a 100644
7753 --- a/net/ipv4/tcp_output.c
7754 +++ b/net/ipv4/tcp_output.c
7755 @@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk)
7756 * when we would be allowed to send the split-due-to-Nagle skb fully.
7757 */
7758 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
7759 - unsigned int mss_now, unsigned int cwnd)
7760 + unsigned int mss_now, unsigned int max_segs)
7761 {
7762 const struct tcp_sock *tp = tcp_sk(sk);
7763 - u32 needed, window, cwnd_len;
7764 + u32 needed, window, max_len;
7765
7766 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
7767 - cwnd_len = mss_now * cwnd;
7768 + max_len = mss_now * max_segs;
7769
7770 - if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
7771 - return cwnd_len;
7772 + if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
7773 + return max_len;
7774
7775 needed = min(skb->len, window);
7776
7777 - if (cwnd_len <= needed)
7778 - return cwnd_len;
7779 + if (max_len <= needed)
7780 + return max_len;
7781
7782 return needed - needed % mss_now;
7783 }
7784 @@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
7785 limit = min(send_win, cong_win);
7786
7787 /* If a full-sized TSO skb can be sent, do it. */
7788 - if (limit >= sk->sk_gso_max_size)
7789 + if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
7790 + sk->sk_gso_max_segs * tp->mss_cache))
7791 goto send_now;
7792
7793 /* Middle in queue won't get any more data, full sendable already? */
7794 @@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
7795 limit = mss_now;
7796 if (tso_segs > 1 && !tcp_urg_mode(tp))
7797 limit = tcp_mss_split_point(sk, skb, mss_now,
7798 - cwnd_quota);
7799 + min_t(unsigned int,
7800 + cwnd_quota,
7801 + sk->sk_gso_max_segs));
7802
7803 if (skb->len > limit &&
7804 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
7805 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
7806 index 7d5cb97..2c69eca 100644
7807 --- a/net/ipv6/addrconf.c
7808 +++ b/net/ipv6/addrconf.c
7809 @@ -493,8 +493,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
7810 struct net_device *dev;
7811 struct inet6_dev *idev;
7812
7813 - rcu_read_lock();
7814 - for_each_netdev_rcu(net, dev) {
7815 + for_each_netdev(net, dev) {
7816 idev = __in6_dev_get(dev);
7817 if (idev) {
7818 int changed = (!idev->cnf.forwarding) ^ (!newf);
7819 @@ -503,7 +502,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
7820 dev_forward_change(idev);
7821 }
7822 }
7823 - rcu_read_unlock();
7824 }
7825
7826 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
7827 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
7828 index 89ff8c6..7501b22 100644
7829 --- a/net/l2tp/l2tp_core.c
7830 +++ b/net/l2tp/l2tp_core.c
7831 @@ -1253,11 +1253,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
7832 /* Remove from tunnel list */
7833 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
7834 list_del_rcu(&tunnel->list);
7835 + kfree_rcu(tunnel, rcu);
7836 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
7837 - synchronize_rcu();
7838
7839 atomic_dec(&l2tp_tunnel_count);
7840 - kfree(tunnel);
7841 }
7842
7843 /* Create a socket for the tunnel, if one isn't set up by
7844 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
7845 index a16a48e..4393794 100644
7846 --- a/net/l2tp/l2tp_core.h
7847 +++ b/net/l2tp/l2tp_core.h
7848 @@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg {
7849
7850 struct l2tp_tunnel {
7851 int magic; /* Should be L2TP_TUNNEL_MAGIC */
7852 + struct rcu_head rcu;
7853 rwlock_t hlist_lock; /* protect session_hlist */
7854 struct hlist_head session_hlist[L2TP_HASH_SIZE];
7855 /* hashed list of sessions,
7856 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
7857 index b9bef2c..df08d77 100644
7858 --- a/net/llc/af_llc.c
7859 +++ b/net/llc/af_llc.c
7860 @@ -971,14 +971,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
7861 struct sockaddr_llc sllc;
7862 struct sock *sk = sock->sk;
7863 struct llc_sock *llc = llc_sk(sk);
7864 - int rc = 0;
7865 + int rc = -EBADF;
7866
7867 memset(&sllc, 0, sizeof(sllc));
7868 lock_sock(sk);
7869 if (sock_flag(sk, SOCK_ZAPPED))
7870 goto out;
7871 *uaddrlen = sizeof(sllc);
7872 - memset(uaddr, 0, *uaddrlen);
7873 if (peer) {
7874 rc = -ENOTCONN;
7875 if (sk->sk_state != TCP_ESTABLISHED)
7876 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
7877 index 25be683..abc31d7 100644
7878 --- a/net/mac80211/mlme.c
7879 +++ b/net/mac80211/mlme.c
7880 @@ -3232,6 +3232,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
7881 goto out_unlock;
7882
7883 err_clear:
7884 + memset(ifmgd->bssid, 0, ETH_ALEN);
7885 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
7886 ifmgd->auth_data = NULL;
7887 err_free:
7888 kfree(auth_data);
7889 @@ -3410,6 +3412,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
7890 err = 0;
7891 goto out;
7892 err_clear:
7893 + memset(ifmgd->bssid, 0, ETH_ALEN);
7894 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
7895 ifmgd->assoc_data = NULL;
7896 err_free:
7897 kfree(assoc_data);
7898 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
7899 index f558998..97e7380 100644
7900 --- a/net/netfilter/ipvs/ip_vs_ctl.c
7901 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
7902 @@ -2713,6 +2713,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
7903 {
7904 struct ip_vs_timeout_user t;
7905
7906 + memset(&t, 0, sizeof(t));
7907 __ip_vs_get_timeouts(net, &t);
7908 if (copy_to_user(user, &t, sizeof(t)) != 0)
7909 ret = -EFAULT;
7910 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
7911 index faa48f7..bba6ba1 100644
7912 --- a/net/netlink/af_netlink.c
7913 +++ b/net/netlink/af_netlink.c
7914 @@ -1329,7 +1329,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
7915 if (NULL == siocb->scm)
7916 siocb->scm = &scm;
7917
7918 - err = scm_send(sock, msg, siocb->scm);
7919 + err = scm_send(sock, msg, siocb->scm, true);
7920 if (err < 0)
7921 return err;
7922
7923 @@ -1340,7 +1340,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
7924 dst_pid = addr->nl_pid;
7925 dst_group = ffs(addr->nl_groups);
7926 err = -EPERM;
7927 - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
7928 + if ((dst_group || dst_pid) &&
7929 + !netlink_capable(sock, NL_NONROOT_SEND))
7930 goto out;
7931 } else {
7932 dst_pid = nlk->dst_pid;
7933 @@ -2115,6 +2116,7 @@ static void __init netlink_add_usersock_entry(void)
7934 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
7935 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
7936 nl_table[NETLINK_USERSOCK].registered = 1;
7937 + nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
7938
7939 netlink_table_ungrab();
7940 }
7941 diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
7942 index b6b1d7d..ce5348f 100644
7943 --- a/net/openvswitch/vport-internal_dev.c
7944 +++ b/net/openvswitch/vport-internal_dev.c
7945 @@ -24,6 +24,9 @@
7946 #include <linux/ethtool.h>
7947 #include <linux/skbuff.h>
7948
7949 +#include <net/dst.h>
7950 +#include <net/xfrm.h>
7951 +
7952 #include "datapath.h"
7953 #include "vport-internal_dev.h"
7954 #include "vport-netdev.h"
7955 @@ -209,6 +212,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
7956 int len;
7957
7958 len = skb->len;
7959 +
7960 + skb_dst_drop(skb);
7961 + nf_reset(skb);
7962 + secpath_reset(skb);
7963 +
7964 skb->dev = netdev;
7965 skb->pkt_type = PACKET_HOST;
7966 skb->protocol = eth_type_trans(skb, netdev);
7967 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
7968 index 4f2c0df..078fdff 100644
7969 --- a/net/packet/af_packet.c
7970 +++ b/net/packet/af_packet.c
7971 @@ -1280,6 +1280,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
7972 spin_unlock(&f->lock);
7973 }
7974
7975 +bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
7976 +{
7977 + if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
7978 + return true;
7979 +
7980 + return false;
7981 +}
7982 +
7983 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
7984 {
7985 struct packet_sock *po = pkt_sk(sk);
7986 @@ -1332,6 +1340,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
7987 match->prot_hook.dev = po->prot_hook.dev;
7988 match->prot_hook.func = packet_rcv_fanout;
7989 match->prot_hook.af_packet_priv = match;
7990 + match->prot_hook.id_match = match_fanout_group;
7991 dev_add_pack(&match->prot_hook);
7992 list_add(&match->list, &fanout_list);
7993 }
7994 @@ -1943,7 +1952,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
7995
7996 if (likely(po->tx_ring.pg_vec)) {
7997 ph = skb_shinfo(skb)->destructor_arg;
7998 - BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
7999 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
8000 atomic_dec(&po->tx_ring.pending);
8001 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
8002 diff --git a/net/rds/recv.c b/net/rds/recv.c
8003 index 5c6e9f1..9f0f17c 100644
8004 --- a/net/rds/recv.c
8005 +++ b/net/rds/recv.c
8006 @@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
8007
8008 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
8009
8010 + msg->msg_namelen = 0;
8011 +
8012 if (msg_flags & MSG_OOB)
8013 goto out;
8014
8015 @@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
8016 sin->sin_port = inc->i_hdr.h_sport;
8017 sin->sin_addr.s_addr = inc->i_saddr;
8018 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
8019 + msg->msg_namelen = sizeof(*sin);
8020 }
8021 break;
8022 }
8023 diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
8024 index b77f5a0..bdacd8d 100644
8025 --- a/net/sched/act_gact.c
8026 +++ b/net/sched/act_gact.c
8027 @@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
8028 struct tcf_common *pc;
8029 int ret = 0;
8030 int err;
8031 +#ifdef CONFIG_GACT_PROB
8032 + struct tc_gact_p *p_parm = NULL;
8033 +#endif
8034
8035 if (nla == NULL)
8036 return -EINVAL;
8037 @@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
8038 #ifndef CONFIG_GACT_PROB
8039 if (tb[TCA_GACT_PROB] != NULL)
8040 return -EOPNOTSUPP;
8041 +#else
8042 + if (tb[TCA_GACT_PROB]) {
8043 + p_parm = nla_data(tb[TCA_GACT_PROB]);
8044 + if (p_parm->ptype >= MAX_RAND)
8045 + return -EINVAL;
8046 + }
8047 #endif
8048
8049 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
8050 @@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
8051 spin_lock_bh(&gact->tcf_lock);
8052 gact->tcf_action = parm->action;
8053 #ifdef CONFIG_GACT_PROB
8054 - if (tb[TCA_GACT_PROB] != NULL) {
8055 - struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
8056 + if (p_parm) {
8057 gact->tcfg_paction = p_parm->paction;
8058 gact->tcfg_pval = p_parm->pval;
8059 gact->tcfg_ptype = p_parm->ptype;
8060 @@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
8061
8062 spin_lock(&gact->tcf_lock);
8063 #ifdef CONFIG_GACT_PROB
8064 - if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
8065 + if (gact->tcfg_ptype)
8066 action = gact_rand[gact->tcfg_ptype](gact);
8067 else
8068 action = gact->tcf_action;
8069 diff --git a/net/socket.c b/net/socket.c
8070 index 06ffa0f..dab3176 100644
8071 --- a/net/socket.c
8072 +++ b/net/socket.c
8073 @@ -2658,6 +2658,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
8074 if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
8075 return -EFAULT;
8076
8077 + memset(&ifc, 0, sizeof(ifc));
8078 if (ifc32.ifcbuf == 0) {
8079 ifc32.ifc_len = 0;
8080 ifc.ifc_len = 0;
8081 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
8082 index d510353..109e30b 100644
8083 --- a/net/unix/af_unix.c
8084 +++ b/net/unix/af_unix.c
8085 @@ -1446,7 +1446,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
8086 if (NULL == siocb->scm)
8087 siocb->scm = &tmp_scm;
8088 wait_for_unix_gc();
8089 - err = scm_send(sock, msg, siocb->scm);
8090 + err = scm_send(sock, msg, siocb->scm, false);
8091 if (err < 0)
8092 return err;
8093
8094 @@ -1607,7 +1607,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
8095 if (NULL == siocb->scm)
8096 siocb->scm = &tmp_scm;
8097 wait_for_unix_gc();
8098 - err = scm_send(sock, msg, siocb->scm);
8099 + err = scm_send(sock, msg, siocb->scm, false);
8100 if (err < 0)
8101 return err;
8102
8103 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
8104 index 460af03..b01449f 100644
8105 --- a/net/wireless/reg.c
8106 +++ b/net/wireless/reg.c
8107 @@ -340,6 +340,9 @@ static void reg_regdb_search(struct work_struct *work)
8108 struct reg_regdb_search_request *request;
8109 const struct ieee80211_regdomain *curdom, *regdom;
8110 int i, r;
8111 + bool set_reg = false;
8112 +
8113 + mutex_lock(&cfg80211_mutex);
8114
8115 mutex_lock(&reg_regdb_search_mutex);
8116 while (!list_empty(&reg_regdb_search_list)) {
8117 @@ -355,9 +358,7 @@ static void reg_regdb_search(struct work_struct *work)
8118 r = reg_copy_regd(&regdom, curdom);
8119 if (r)
8120 break;
8121 - mutex_lock(&cfg80211_mutex);
8122 - set_regdom(regdom);
8123 - mutex_unlock(&cfg80211_mutex);
8124 + set_reg = true;
8125 break;
8126 }
8127 }
8128 @@ -365,6 +366,11 @@ static void reg_regdb_search(struct work_struct *work)
8129 kfree(request);
8130 }
8131 mutex_unlock(&reg_regdb_search_mutex);
8132 +
8133 + if (set_reg)
8134 + set_regdom(regdom);
8135 +
8136 + mutex_unlock(&cfg80211_mutex);
8137 }
8138
8139 static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
8140 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
8141 index 926b455..cec7479 100644
8142 --- a/sound/pci/hda/hda_codec.c
8143 +++ b/sound/pci/hda/hda_codec.c
8144 @@ -2279,6 +2279,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
8145 }
8146 if (codec->patch_ops.free)
8147 codec->patch_ops.free(codec);
8148 + memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
8149 snd_hda_jack_tbl_clear(codec);
8150 codec->proc_widget_hook = NULL;
8151 codec->spec = NULL;
8152 @@ -2292,7 +2293,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
8153 codec->num_pcms = 0;
8154 codec->pcm_info = NULL;
8155 codec->preset = NULL;
8156 - memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
8157 codec->slave_dig_outs = NULL;
8158 codec->spdif_status_reset = 0;
8159 module_put(codec->owner);
8160 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8161 index 152d91b..52e7a45 100644
8162 --- a/sound/pci/hda/patch_realtek.c
8163 +++ b/sound/pci/hda/patch_realtek.c
8164 @@ -188,6 +188,7 @@ struct alc_spec {
8165 unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */
8166 unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */
8167 unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
8168 + unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */
8169
8170 /* auto-mute control */
8171 int automute_mode;
8172 @@ -4365,7 +4366,8 @@ static int alc_parse_auto_config(struct hda_codec *codec,
8173 return 0; /* can't find valid BIOS pin config */
8174 }
8175
8176 - if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
8177 + if (!spec->no_primary_hp &&
8178 + cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
8179 cfg->line_outs <= cfg->hp_outs) {
8180 /* use HP as primary out */
8181 cfg->speaker_outs = cfg->line_outs;
8182 @@ -5076,6 +5078,7 @@ enum {
8183 ALC889_FIXUP_DAC_ROUTE,
8184 ALC889_FIXUP_MBP_VREF,
8185 ALC889_FIXUP_IMAC91_VREF,
8186 + ALC882_FIXUP_NO_PRIMARY_HP,
8187 };
8188
8189 static void alc889_fixup_coef(struct hda_codec *codec,
8190 @@ -5199,6 +5202,17 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
8191 spec->keep_vref_in_automute = 1;
8192 }
8193
8194 +/* Don't take HP output as primary
8195 + * strangely, the speaker output doesn't work on VAIO Z through DAC 0x05
8196 + */
8197 +static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
8198 + const struct alc_fixup *fix, int action)
8199 +{
8200 + struct alc_spec *spec = codec->spec;
8201 + if (action == ALC_FIXUP_ACT_PRE_PROBE)
8202 + spec->no_primary_hp = 1;
8203 +}
8204 +
8205 static const struct alc_fixup alc882_fixups[] = {
8206 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
8207 .type = ALC_FIXUP_PINS,
8208 @@ -5381,6 +5395,10 @@ static const struct alc_fixup alc882_fixups[] = {
8209 .chained = true,
8210 .chain_id = ALC882_FIXUP_GPIO1,
8211 },
8212 + [ALC882_FIXUP_NO_PRIMARY_HP] = {
8213 + .type = ALC_FIXUP_FUNC,
8214 + .v.func = alc882_fixup_no_primary_hp,
8215 + },
8216 };
8217
8218 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
8219 @@ -5415,6 +5433,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
8220 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
8221 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
8222 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
8223 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
8224
8225 /* All Apple entries are in codec SSIDs */
8226 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
8227 @@ -5455,6 +5474,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = {
8228 {.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
8229 {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
8230 {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
8231 + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
8232 {}
8233 };
8234
8235 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
8236 index fd53312..04cd44f 100644
8237 --- a/sound/pci/hda/patch_sigmatel.c
8238 +++ b/sound/pci/hda/patch_sigmatel.c
8239 @@ -1072,7 +1072,7 @@ static struct snd_kcontrol_new stac_smux_mixer = {
8240
8241 static const char * const slave_pfxs[] = {
8242 "Front", "Surround", "Center", "LFE", "Side",
8243 - "Headphone", "Speaker", "IEC958",
8244 + "Headphone", "Speaker", "IEC958", "PCM",
8245 NULL
8246 };
8247
8248 diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
8249 index 764cc93..075d5aa 100644
8250 --- a/sound/pci/ice1712/prodigy_hifi.c
8251 +++ b/sound/pci/ice1712/prodigy_hifi.c
8252 @@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
8253 }
8254
8255 static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
8256 +static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
8257
8258 static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
8259 {
8260 @@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
8261 .info = ak4396_dac_vol_info,
8262 .get = ak4396_dac_vol_get,
8263 .put = ak4396_dac_vol_put,
8264 - .tlv = { .p = db_scale_wm_dac },
8265 + .tlv = { .p = ak4396_db_scale },
8266 },
8267 };
8268
8269 diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
8270 index a75c376..ae6f26f 100644
8271 --- a/sound/soc/codecs/wm2000.c
8272 +++ b/sound/soc/codecs/wm2000.c
8273 @@ -692,7 +692,7 @@ static int wm2000_resume(struct snd_soc_codec *codec)
8274 #endif
8275
8276 static const struct regmap_config wm2000_regmap = {
8277 - .reg_bits = 8,
8278 + .reg_bits = 16,
8279 .val_bits = 8,
8280 };
8281
8282 diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
8283 index ddc6cde..2526eca 100644
8284 --- a/sound/soc/samsung/dma.c
8285 +++ b/sound/soc/samsung/dma.c
8286 @@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = {
8287 .info = SNDRV_PCM_INFO_INTERLEAVED |
8288 SNDRV_PCM_INFO_BLOCK_TRANSFER |
8289 SNDRV_PCM_INFO_MMAP |
8290 - SNDRV_PCM_INFO_MMAP_VALID |
8291 - SNDRV_PCM_INFO_PAUSE |
8292 - SNDRV_PCM_INFO_RESUME,
8293 + SNDRV_PCM_INFO_MMAP_VALID,
8294 .formats = SNDRV_PCM_FMTBIT_S16_LE |
8295 SNDRV_PCM_FMTBIT_U16_LE |
8296 SNDRV_PCM_FMTBIT_U8 |
8297 @@ -246,15 +244,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
8298
8299 switch (cmd) {
8300 case SNDRV_PCM_TRIGGER_START:
8301 - case SNDRV_PCM_TRIGGER_RESUME:
8302 - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
8303 prtd->state |= ST_RUNNING;
8304 prtd->params->ops->trigger(prtd->params->ch);
8305 break;
8306
8307 case SNDRV_PCM_TRIGGER_STOP:
8308 - case SNDRV_PCM_TRIGGER_SUSPEND:
8309 - case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
8310 prtd->state &= ~ST_RUNNING;
8311 prtd->params->ops->stop(prtd->params->ch);
8312 break;
8313 diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
8314 index e45ccd8..76d759e 100644
8315 --- a/sound/soc/tegra/tegra_alc5632.c
8316 +++ b/sound/soc/tegra/tegra_alc5632.c
8317 @@ -95,7 +95,6 @@ static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = {
8318 .name = "Headset detection",
8319 .report = SND_JACK_HEADSET,
8320 .debounce_time = 150,
8321 - .invert = 1,
8322 };
8323
8324 static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = {