Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.4/0108-3.4.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1897 - (hide annotations) (download)
Sat Sep 22 14:51:23 2012 UTC (11 years, 7 months ago) by niro
File size: 83648 byte(s)
-added linux patches up to 3.4.11
1 niro 1897 diff --git a/MAINTAINERS b/MAINTAINERS
2     index b362709..a60009d 100644
3     --- a/MAINTAINERS
4     +++ b/MAINTAINERS
5     @@ -5566,7 +5566,7 @@ F: Documentation/blockdev/ramdisk.txt
6     F: drivers/block/brd.c
7    
8     RANDOM NUMBER DRIVER
9     -M: Matt Mackall <mpm@selenic.com>
10     +M: Theodore Ts'o" <tytso@mit.edu>
11     S: Maintained
12     F: drivers/char/random.c
13    
14     diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
15     index 2dccce4..7541a91 100644
16     --- a/arch/arm/boot/dts/imx53-ard.dts
17     +++ b/arch/arm/boot/dts/imx53-ard.dts
18     @@ -70,10 +70,30 @@
19     interrupt-parent = <&gpio2>;
20     interrupts = <31>;
21     reg-io-width = <4>;
22     + /*
23     + * VDD33A and VDDVARIO of LAN9220 are supplied by
24     + * SW4_3V3 of LTC3589. Before the regulator driver
25     + * for this PMIC is available, we use a fixed dummy
26     + * 3V3 regulator to get LAN9220 driver probing work.
27     + */
28     + vdd33a-supply = <&reg_3p3v>;
29     + vddvario-supply = <&reg_3p3v>;
30     smsc,irq-push-pull;
31     };
32     };
33    
34     + regulators {
35     + compatible = "simple-bus";
36     +
37     + reg_3p3v: 3p3v {
38     + compatible = "regulator-fixed";
39     + regulator-name = "3P3V";
40     + regulator-min-microvolt = <3300000>;
41     + regulator-max-microvolt = <3300000>;
42     + regulator-always-on;
43     + };
44     + };
45     +
46     gpio-keys {
47     compatible = "gpio-keys";
48    
49     diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
50     index 1ebbf45..70d0bf4 100644
51     --- a/arch/arm/configs/mxs_defconfig
52     +++ b/arch/arm/configs/mxs_defconfig
53     @@ -32,7 +32,6 @@ CONFIG_NO_HZ=y
54     CONFIG_HIGH_RES_TIMERS=y
55     CONFIG_PREEMPT_VOLUNTARY=y
56     CONFIG_AEABI=y
57     -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
58     CONFIG_AUTO_ZRELADDR=y
59     CONFIG_FPE_NWFPE=y
60     CONFIG_NET=y
61     diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
62     index 1252a26..42dec04 100644
63     --- a/arch/arm/include/asm/cacheflush.h
64     +++ b/arch/arm/include/asm/cacheflush.h
65     @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
66     static inline void
67     vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
68     {
69     - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
70     + struct mm_struct *mm = vma->vm_mm;
71     +
72     + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
73     __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
74     vma->vm_flags);
75     }
76     @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
77     static inline void
78     vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
79     {
80     - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
81     + struct mm_struct *mm = vma->vm_mm;
82     +
83     + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
84     unsigned long addr = user_addr & PAGE_MASK;
85     __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
86     }
87     diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
88     index 7fd3ad0..8f29865 100644
89     --- a/arch/arm/kernel/entry-armv.S
90     +++ b/arch/arm/kernel/entry-armv.S
91     @@ -244,6 +244,19 @@ svc_preempt:
92     b 1b
93     #endif
94    
95     +__und_fault:
96     + @ Correct the PC such that it is pointing at the instruction
97     + @ which caused the fault. If the faulting instruction was ARM
98     + @ the PC will be pointing at the next instruction, and have to
99     + @ subtract 4. Otherwise, it is Thumb, and the PC will be
100     + @ pointing at the second half of the Thumb instruction. We
101     + @ have to subtract 2.
102     + ldr r2, [r0, #S_PC]
103     + sub r2, r2, r1
104     + str r2, [r0, #S_PC]
105     + b do_undefinstr
106     +ENDPROC(__und_fault)
107     +
108     .align 5
109     __und_svc:
110     #ifdef CONFIG_KPROBES
111     @@ -261,25 +274,32 @@ __und_svc:
112     @
113     @ r0 - instruction
114     @
115     -#ifndef CONFIG_THUMB2_KERNEL
116     +#ifndef CONFIG_THUMB2_KERNEL
117     ldr r0, [r4, #-4]
118     #else
119     + mov r1, #2
120     ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
121     cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
122     - ldrhhs r9, [r4] @ bottom 16 bits
123     - orrhs r0, r9, r0, lsl #16
124     + blo __und_svc_fault
125     + ldrh r9, [r4] @ bottom 16 bits
126     + add r4, r4, #2
127     + str r4, [sp, #S_PC]
128     + orr r0, r9, r0, lsl #16
129     #endif
130     - adr r9, BSYM(1f)
131     + adr r9, BSYM(__und_svc_finish)
132     mov r2, r4
133     bl call_fpe
134    
135     + mov r1, #4 @ PC correction to apply
136     +__und_svc_fault:
137     mov r0, sp @ struct pt_regs *regs
138     - bl do_undefinstr
139     + bl __und_fault
140    
141     @
142     @ IRQs off again before pulling preserved data off the stack
143     @
144     -1: disable_irq_notrace
145     +__und_svc_finish:
146     + disable_irq_notrace
147    
148     @
149     @ restore SPSR and restart the instruction
150     @@ -423,25 +443,33 @@ __und_usr:
151     mov r2, r4
152     mov r3, r5
153    
154     + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
155     + @ faulting instruction depending on Thumb mode.
156     + @ r3 = regs->ARM_cpsr
157     @
158     - @ fall through to the emulation code, which returns using r9 if
159     - @ it has emulated the instruction, or the more conventional lr
160     - @ if we are to treat this as a real undefined instruction
161     - @
162     - @ r0 - instruction
163     + @ The emulation code returns using r9 if it has emulated the
164     + @ instruction, or the more conventional lr if we are to treat
165     + @ this as a real undefined instruction
166     @
167     adr r9, BSYM(ret_from_exception)
168     - adr lr, BSYM(__und_usr_unknown)
169     +
170     tst r3, #PSR_T_BIT @ Thumb mode?
171     - itet eq @ explicit IT needed for the 1f label
172     - subeq r4, r2, #4 @ ARM instr at LR - 4
173     - subne r4, r2, #2 @ Thumb instr at LR - 2
174     -1: ldreqt r0, [r4]
175     + bne __und_usr_thumb
176     + sub r4, r2, #4 @ ARM instr at LR - 4
177     +1: ldrt r0, [r4]
178     #ifdef CONFIG_CPU_ENDIAN_BE8
179     - reveq r0, r0 @ little endian instruction
180     + rev r0, r0 @ little endian instruction
181     #endif
182     - beq call_fpe
183     + @ r0 = 32-bit ARM instruction which caused the exception
184     + @ r2 = PC value for the following instruction (:= regs->ARM_pc)
185     + @ r4 = PC value for the faulting instruction
186     + @ lr = 32-bit undefined instruction function
187     + adr lr, BSYM(__und_usr_fault_32)
188     + b call_fpe
189     +
190     +__und_usr_thumb:
191     @ Thumb instruction
192     + sub r4, r2, #2 @ First half of thumb instr at LR - 2
193     #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
194     /*
195     * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
196     @@ -455,7 +483,7 @@ __und_usr:
197     ldr r5, .LCcpu_architecture
198     ldr r5, [r5]
199     cmp r5, #CPU_ARCH_ARMv7
200     - blo __und_usr_unknown
201     + blo __und_usr_fault_16 @ 16bit undefined instruction
202     /*
203     * The following code won't get run unless the running CPU really is v7, so
204     * coding round the lack of ldrht on older arches is pointless. Temporarily
205     @@ -463,15 +491,18 @@ __und_usr:
206     */
207     .arch armv6t2
208     #endif
209     -2:
210     - ARM( ldrht r5, [r4], #2 )
211     - THUMB( ldrht r5, [r4] )
212     - THUMB( add r4, r4, #2 )
213     +2: ldrht r5, [r4]
214     cmp r5, #0xe800 @ 32bit instruction if xx != 0
215     - blo __und_usr_unknown
216     -3: ldrht r0, [r4]
217     + blo __und_usr_fault_16 @ 16bit undefined instruction
218     +3: ldrht r0, [r2]
219     add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
220     + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
221     orr r0, r0, r5, lsl #16
222     + adr lr, BSYM(__und_usr_fault_32)
223     + @ r0 = the two 16-bit Thumb instructions which caused the exception
224     + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
225     + @ r4 = PC value for the first 16-bit Thumb instruction
226     + @ lr = 32bit undefined instruction function
227    
228     #if __LINUX_ARM_ARCH__ < 7
229     /* If the target arch was overridden, change it back: */
230     @@ -482,17 +513,13 @@ __und_usr:
231     #endif
232     #endif /* __LINUX_ARM_ARCH__ < 7 */
233     #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
234     - b __und_usr_unknown
235     + b __und_usr_fault_16
236     #endif
237     - UNWIND(.fnend )
238     + UNWIND(.fnend)
239     ENDPROC(__und_usr)
240    
241     - @
242     - @ fallthrough to call_fpe
243     - @
244     -
245     /*
246     - * The out of line fixup for the ldrt above.
247     + * The out of line fixup for the ldrt instructions above.
248     */
249     .pushsection .fixup, "ax"
250     4: mov pc, r9
251     @@ -523,11 +550,12 @@ ENDPROC(__und_usr)
252     * NEON handler code.
253     *
254     * Emulators may wish to make use of the following registers:
255     - * r0 = instruction opcode.
256     - * r2 = PC+4
257     + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
258     + * r2 = PC value to resume execution after successful emulation
259     * r9 = normal "successful" return address
260     - * r10 = this threads thread_info structure.
261     + * r10 = this threads thread_info structure
262     * lr = unrecognised instruction return address
263     + * IRQs disabled, FIQs enabled.
264     */
265     @
266     @ Fall-through from Thumb-2 __und_usr
267     @@ -662,12 +690,17 @@ ENTRY(no_fp)
268     mov pc, lr
269     ENDPROC(no_fp)
270    
271     -__und_usr_unknown:
272     - enable_irq
273     +__und_usr_fault_32:
274     + mov r1, #4
275     + b 1f
276     +__und_usr_fault_16:
277     + mov r1, #2
278     +1: enable_irq
279     mov r0, sp
280     adr lr, BSYM(ret_from_exception)
281     - b do_undefinstr
282     -ENDPROC(__und_usr_unknown)
283     + b __und_fault
284     +ENDPROC(__und_usr_fault_32)
285     +ENDPROC(__und_usr_fault_16)
286    
287     .align 5
288     __pabt_usr:
289     diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
290     index 2b7b017..48f3624 100644
291     --- a/arch/arm/kernel/process.c
292     +++ b/arch/arm/kernel/process.c
293     @@ -267,6 +267,7 @@ void machine_shutdown(void)
294     void machine_halt(void)
295     {
296     machine_shutdown();
297     + local_irq_disable();
298     while (1);
299     }
300    
301     @@ -288,6 +289,7 @@ void machine_restart(char *cmd)
302    
303     /* Whoops - the platform was unable to reboot. Tell the user! */
304     printk("Reboot failed -- System halted\n");
305     + local_irq_disable();
306     while (1);
307     }
308    
309     diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
310     index 8f46446..7babc3f 100644
311     --- a/arch/arm/kernel/smp.c
312     +++ b/arch/arm/kernel/smp.c
313     @@ -590,7 +590,8 @@ void smp_send_stop(void)
314    
315     cpumask_copy(&mask, cpu_online_mask);
316     cpumask_clear_cpu(smp_processor_id(), &mask);
317     - smp_cross_call(&mask, IPI_CPU_STOP);
318     + if (!cpumask_empty(&mask))
319     + smp_cross_call(&mask, IPI_CPU_STOP);
320    
321     /* Wait up to one second for other CPUs to stop */
322     timeout = USEC_PER_SEC;
323     diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
324     index 63d402f..a8ad1e3 100644
325     --- a/arch/arm/kernel/traps.c
326     +++ b/arch/arm/kernel/traps.c
327     @@ -370,18 +370,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
328    
329     asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
330     {
331     - unsigned int correction = thumb_mode(regs) ? 2 : 4;
332     unsigned int instr;
333     siginfo_t info;
334     void __user *pc;
335    
336     - /*
337     - * According to the ARM ARM, PC is 2 or 4 bytes ahead,
338     - * depending whether we're in Thumb mode or not.
339     - * Correct this offset.
340     - */
341     - regs->ARM_pc -= correction;
342     -
343     pc = (void __user *)instruction_pointer(regs);
344    
345     if (processor_mode(regs) == SVC_MODE) {
346     diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
347     index 5905ed1..d89d87ae 100644
348     --- a/arch/arm/mach-pxa/raumfeld.c
349     +++ b/arch/arm/mach-pxa/raumfeld.c
350     @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
351    
352     static struct eeti_ts_platform_data eeti_ts_pdata = {
353     .irq_active_high = 1,
354     + .irq_gpio = GPIO_TOUCH_IRQ,
355     };
356    
357     static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
358     .type = "eeti_ts",
359     .addr = 0x0a,
360     - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
361     .platform_data = &eeti_ts_pdata,
362     };
363    
364     diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
365     index 845f461..c202113 100644
366     --- a/arch/arm/mm/tlb-v7.S
367     +++ b/arch/arm/mm/tlb-v7.S
368     @@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
369     dsb
370     mov r0, r0, lsr #PAGE_SHIFT @ align address
371     mov r1, r1, lsr #PAGE_SHIFT
372     +#ifdef CONFIG_ARM_ERRATA_720789
373     + mov r3, #0
374     +#else
375     asid r3, r3 @ mask ASID
376     +#endif
377     orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
378     mov r1, r1, lsl #PAGE_SHIFT
379     1:
380     +#ifdef CONFIG_ARM_ERRATA_720789
381     + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
382     +#else
383     ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
384     +#endif
385     ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
386    
387     add r0, r0, #PAGE_SZ
388     @@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
389     mov r0, r0, lsl #PAGE_SHIFT
390     mov r1, r1, lsl #PAGE_SHIFT
391     1:
392     +#ifdef CONFIG_ARM_ERRATA_720789
393     + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
394     +#else
395     ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
396     +#endif
397     ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
398     add r0, r0, #PAGE_SZ
399     cmp r0, r1
400     diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
401     index 4fa9903..cc926c9 100644
402     --- a/arch/arm/vfp/entry.S
403     +++ b/arch/arm/vfp/entry.S
404     @@ -7,18 +7,20 @@
405     * This program is free software; you can redistribute it and/or modify
406     * it under the terms of the GNU General Public License version 2 as
407     * published by the Free Software Foundation.
408     - *
409     - * Basic entry code, called from the kernel's undefined instruction trap.
410     - * r0 = faulted instruction
411     - * r5 = faulted PC+4
412     - * r9 = successful return
413     - * r10 = thread_info structure
414     - * lr = failure return
415     */
416     #include <asm/thread_info.h>
417     #include <asm/vfpmacros.h>
418     #include "../kernel/entry-header.S"
419    
420     +@ VFP entry point.
421     +@
422     +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
423     +@ r2 = PC value to resume execution after successful emulation
424     +@ r9 = normal "successful" return address
425     +@ r10 = this threads thread_info structure
426     +@ lr = unrecognised instruction return address
427     +@ IRQs disabled.
428     +@
429     ENTRY(do_vfp)
430     #ifdef CONFIG_PREEMPT
431     ldr r4, [r10, #TI_PREEMPT] @ get preempt count
432     diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
433     index 2d30c7f..3a0efaa 100644
434     --- a/arch/arm/vfp/vfphw.S
435     +++ b/arch/arm/vfp/vfphw.S
436     @@ -61,13 +61,13 @@
437    
438     @ VFP hardware support entry point.
439     @
440     -@ r0 = faulted instruction
441     -@ r2 = faulted PC+4
442     -@ r9 = successful return
443     +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
444     +@ r2 = PC value to resume execution after successful emulation
445     +@ r9 = normal "successful" return address
446     @ r10 = vfp_state union
447     @ r11 = CPU number
448     -@ lr = failure return
449     -
450     +@ lr = unrecognised instruction return address
451     +@ IRQs enabled.
452     ENTRY(vfp_support_entry)
453     DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
454    
455     @@ -161,9 +161,12 @@ vfp_hw_state_valid:
456     @ exception before retrying branch
457     @ out before setting an FPEXC that
458     @ stops us reading stuff
459     - VFPFMXR FPEXC, r1 @ restore FPEXC last
460     - sub r2, r2, #4
461     - str r2, [sp, #S_PC] @ retry the instruction
462     + VFPFMXR FPEXC, r1 @ Restore FPEXC last
463     + sub r2, r2, #4 @ Retry current instruction - if Thumb
464     + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
465     + @ else it's one 32-bit instruction, so
466     + @ always subtract 4 from the following
467     + @ instruction address.
468     #ifdef CONFIG_PREEMPT
469     get_thread_info r10
470     ldr r4, [r10, #TI_PREEMPT] @ get preempt count
471     diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
472     index b0197b2..1ef803a 100644
473     --- a/arch/arm/vfp/vfpmodule.c
474     +++ b/arch/arm/vfp/vfpmodule.c
475     @@ -457,10 +457,16 @@ static int vfp_pm_suspend(void)
476    
477     /* disable, just in case */
478     fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
479     + } else if (vfp_current_hw_state[ti->cpu]) {
480     +#ifndef CONFIG_SMP
481     + fmxr(FPEXC, fpexc | FPEXC_EN);
482     + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
483     + fmxr(FPEXC, fpexc);
484     +#endif
485     }
486    
487     /* clear any information we had about last context state */
488     - memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
489     + vfp_current_hw_state[ti->cpu] = NULL;
490    
491     return 0;
492     }
493     diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
494     index 7d91166..6e6fe18 100644
495     --- a/arch/ia64/include/asm/atomic.h
496     +++ b/arch/ia64/include/asm/atomic.h
497     @@ -17,8 +17,8 @@
498     #include <asm/intrinsics.h>
499    
500    
501     -#define ATOMIC_INIT(i) ((atomic_t) { (i) })
502     -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
503     +#define ATOMIC_INIT(i) { (i) }
504     +#define ATOMIC64_INIT(i) { (i) }
505    
506     #define atomic_read(v) (*(volatile int *)&(v)->counter)
507     #define atomic64_read(v) (*(volatile long *)&(v)->counter)
508     diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
509     index 5c3e088..1034884 100644
510     --- a/arch/ia64/kernel/irq_ia64.c
511     +++ b/arch/ia64/kernel/irq_ia64.c
512     @@ -23,7 +23,6 @@
513     #include <linux/ioport.h>
514     #include <linux/kernel_stat.h>
515     #include <linux/ptrace.h>
516     -#include <linux/random.h> /* for rand_initialize_irq() */
517     #include <linux/signal.h>
518     #include <linux/smp.h>
519     #include <linux/threads.h>
520     diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
521     index 1f84794..73ef56c 100644
522     --- a/arch/x86/kernel/alternative.c
523     +++ b/arch/x86/kernel/alternative.c
524     @@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
525     ideal_nops = intel_nops;
526     #endif
527     }
528     -
529     + break;
530     default:
531     #ifdef CONFIG_X86_64
532     ideal_nops = k8_nops;
533     diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
534     index c9bda6d..24b852b 100644
535     --- a/arch/x86/kernel/microcode_core.c
536     +++ b/arch/x86/kernel/microcode_core.c
537     @@ -298,20 +298,31 @@ static ssize_t reload_store(struct device *dev,
538     const char *buf, size_t size)
539     {
540     unsigned long val;
541     - int cpu = dev->id;
542     - int ret = 0;
543     - char *end;
544     + int cpu;
545     + ssize_t ret = 0, tmp_ret;
546    
547     - val = simple_strtoul(buf, &end, 0);
548     - if (end == buf)
549     + /* allow reload only from the BSP */
550     + if (boot_cpu_data.cpu_index != dev->id)
551     return -EINVAL;
552    
553     - if (val == 1) {
554     - get_online_cpus();
555     - if (cpu_online(cpu))
556     - ret = reload_for_cpu(cpu);
557     - put_online_cpus();
558     + ret = kstrtoul(buf, 0, &val);
559     + if (ret)
560     + return ret;
561     +
562     + if (val != 1)
563     + return size;
564     +
565     + get_online_cpus();
566     + for_each_online_cpu(cpu) {
567     + tmp_ret = reload_for_cpu(cpu);
568     + if (tmp_ret != 0)
569     + pr_warn("Error reloading microcode on CPU %d\n", cpu);
570     +
571     + /* save retval of the first encountered reload error */
572     + if (!ret)
573     + ret = tmp_ret;
574     }
575     + put_online_cpus();
576    
577     if (!ret)
578     ret = size;
579     diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
580     index 0734086..bbac51e 100644
581     --- a/drivers/acpi/processor_driver.c
582     +++ b/drivers/acpi/processor_driver.c
583     @@ -442,7 +442,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
584     /* Normal CPU soft online event */
585     } else {
586     acpi_processor_ppc_has_changed(pr, 0);
587     - acpi_processor_cst_has_changed(pr);
588     + acpi_processor_hotplug(pr);
589     acpi_processor_reevaluate_tstate(pr, action);
590     acpi_processor_tstate_has_changed(pr);
591     }
592     diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
593     index 8b78750..845f97f 100644
594     --- a/drivers/char/mspec.c
595     +++ b/drivers/char/mspec.c
596     @@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
597     vdata->flags = flags;
598     vdata->type = type;
599     spin_lock_init(&vdata->lock);
600     - vdata->refcnt = ATOMIC_INIT(1);
601     + atomic_set(&vdata->refcnt, 1);
602     vma->vm_private_data = vdata;
603    
604     vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
605     diff --git a/drivers/char/random.c b/drivers/char/random.c
606     index 4ec04a7..d98b2a6 100644
607     --- a/drivers/char/random.c
608     +++ b/drivers/char/random.c
609     @@ -125,21 +125,26 @@
610     * The current exported interfaces for gathering environmental noise
611     * from the devices are:
612     *
613     + * void add_device_randomness(const void *buf, unsigned int size);
614     * void add_input_randomness(unsigned int type, unsigned int code,
615     * unsigned int value);
616     - * void add_interrupt_randomness(int irq);
617     + * void add_interrupt_randomness(int irq, int irq_flags);
618     * void add_disk_randomness(struct gendisk *disk);
619     *
620     + * add_device_randomness() is for adding data to the random pool that
621     + * is likely to differ between two devices (or possibly even per boot).
622     + * This would be things like MAC addresses or serial numbers, or the
623     + * read-out of the RTC. This does *not* add any actual entropy to the
624     + * pool, but it initializes the pool to different values for devices
625     + * that might otherwise be identical and have very little entropy
626     + * available to them (particularly common in the embedded world).
627     + *
628     * add_input_randomness() uses the input layer interrupt timing, as well as
629     * the event type information from the hardware.
630     *
631     - * add_interrupt_randomness() uses the inter-interrupt timing as random
632     - * inputs to the entropy pool. Note that not all interrupts are good
633     - * sources of randomness! For example, the timer interrupts is not a
634     - * good choice, because the periodicity of the interrupts is too
635     - * regular, and hence predictable to an attacker. Network Interface
636     - * Controller interrupts are a better measure, since the timing of the
637     - * NIC interrupts are more unpredictable.
638     + * add_interrupt_randomness() uses the interrupt timing as random
639     + * inputs to the entropy pool. Using the cycle counters and the irq source
640     + * as inputs, it feeds the randomness roughly once a second.
641     *
642     * add_disk_randomness() uses what amounts to the seek time of block
643     * layer request events, on a per-disk_devt basis, as input to the
644     @@ -248,6 +253,8 @@
645     #include <linux/percpu.h>
646     #include <linux/cryptohash.h>
647     #include <linux/fips.h>
648     +#include <linux/ptrace.h>
649     +#include <linux/kmemcheck.h>
650    
651     #ifdef CONFIG_GENERIC_HARDIRQS
652     # include <linux/irq.h>
653     @@ -256,8 +263,12 @@
654     #include <asm/processor.h>
655     #include <asm/uaccess.h>
656     #include <asm/irq.h>
657     +#include <asm/irq_regs.h>
658     #include <asm/io.h>
659    
660     +#define CREATE_TRACE_POINTS
661     +#include <trace/events/random.h>
662     +
663     /*
664     * Configuration information
665     */
666     @@ -266,6 +277,8 @@
667     #define SEC_XFER_SIZE 512
668     #define EXTRACT_SIZE 10
669    
670     +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
671     +
672     /*
673     * The minimum number of bits of entropy before we wake up a read on
674     * /dev/random. Should be enough to do a significant reseed.
675     @@ -420,8 +433,10 @@ struct entropy_store {
676     /* read-write data: */
677     spinlock_t lock;
678     unsigned add_ptr;
679     + unsigned input_rotate;
680     int entropy_count;
681     - int input_rotate;
682     + int entropy_total;
683     + unsigned int initialized:1;
684     __u8 last_data[EXTRACT_SIZE];
685     };
686    
687     @@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
688     .pool = nonblocking_pool_data
689     };
690    
691     +static __u32 const twist_table[8] = {
692     + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
693     + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
694     +
695     /*
696     * This function adds bytes into the entropy "pool". It does not
697     * update the entropy estimate. The caller should call
698     @@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
699     * it's cheap to do so and helps slightly in the expected case where
700     * the entropy is concentrated in the low-order bits.
701     */
702     -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
703     - int nbytes, __u8 out[64])
704     +static void _mix_pool_bytes(struct entropy_store *r, const void *in,
705     + int nbytes, __u8 out[64])
706     {
707     - static __u32 const twist_table[8] = {
708     - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
709     - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
710     unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
711     int input_rotate;
712     int wordmask = r->poolinfo->poolwords - 1;
713     const char *bytes = in;
714     __u32 w;
715     - unsigned long flags;
716    
717     - /* Taps are constant, so we can load them without holding r->lock. */
718     tap1 = r->poolinfo->tap1;
719     tap2 = r->poolinfo->tap2;
720     tap3 = r->poolinfo->tap3;
721     tap4 = r->poolinfo->tap4;
722     tap5 = r->poolinfo->tap5;
723    
724     - spin_lock_irqsave(&r->lock, flags);
725     - input_rotate = r->input_rotate;
726     - i = r->add_ptr;
727     + smp_rmb();
728     + input_rotate = ACCESS_ONCE(r->input_rotate);
729     + i = ACCESS_ONCE(r->add_ptr);
730    
731     /* mix one byte at a time to simplify size handling and churn faster */
732     while (nbytes--) {
733     @@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
734     input_rotate += i ? 7 : 14;
735     }
736    
737     - r->input_rotate = input_rotate;
738     - r->add_ptr = i;
739     + ACCESS_ONCE(r->input_rotate) = input_rotate;
740     + ACCESS_ONCE(r->add_ptr) = i;
741     + smp_wmb();
742    
743     if (out)
744     for (j = 0; j < 16; j++)
745     ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
746     +}
747     +
748     +static void __mix_pool_bytes(struct entropy_store *r, const void *in,
749     + int nbytes, __u8 out[64])
750     +{
751     + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
752     + _mix_pool_bytes(r, in, nbytes, out);
753     +}
754     +
755     +static void mix_pool_bytes(struct entropy_store *r, const void *in,
756     + int nbytes, __u8 out[64])
757     +{
758     + unsigned long flags;
759    
760     + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
761     + spin_lock_irqsave(&r->lock, flags);
762     + _mix_pool_bytes(r, in, nbytes, out);
763     spin_unlock_irqrestore(&r->lock, flags);
764     }
765    
766     -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
767     +struct fast_pool {
768     + __u32 pool[4];
769     + unsigned long last;
770     + unsigned short count;
771     + unsigned char rotate;
772     + unsigned char last_timer_intr;
773     +};
774     +
775     +/*
776     + * This is a fast mixing routine used by the interrupt randomness
777     + * collector. It's hardcoded for an 128 bit pool and assumes that any
778     + * locks that might be needed are taken by the caller.
779     + */
780     +static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
781     {
782     - mix_pool_bytes_extract(r, in, bytes, NULL);
783     + const char *bytes = in;
784     + __u32 w;
785     + unsigned i = f->count;
786     + unsigned input_rotate = f->rotate;
787     +
788     + while (nbytes--) {
789     + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
790     + f->pool[(i + 1) & 3];
791     + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
792     + input_rotate += (i++ & 3) ? 7 : 14;
793     + }
794     + f->count = i;
795     + f->rotate = input_rotate;
796     }
797    
798     /*
799     @@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
800     */
801     static void credit_entropy_bits(struct entropy_store *r, int nbits)
802     {
803     - unsigned long flags;
804     - int entropy_count;
805     + int entropy_count, orig;
806    
807     if (!nbits)
808     return;
809    
810     - spin_lock_irqsave(&r->lock, flags);
811     -
812     DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
813     - entropy_count = r->entropy_count;
814     +retry:
815     + entropy_count = orig = ACCESS_ONCE(r->entropy_count);
816     entropy_count += nbits;
817     +
818     if (entropy_count < 0) {
819     DEBUG_ENT("negative entropy/overflow\n");
820     entropy_count = 0;
821     } else if (entropy_count > r->poolinfo->POOLBITS)
822     entropy_count = r->poolinfo->POOLBITS;
823     - r->entropy_count = entropy_count;
824     + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
825     + goto retry;
826     +
827     + if (!r->initialized && nbits > 0) {
828     + r->entropy_total += nbits;
829     + if (r->entropy_total > 128)
830     + r->initialized = 1;
831     + }
832     +
833     + trace_credit_entropy_bits(r->name, nbits, entropy_count,
834     + r->entropy_total, _RET_IP_);
835    
836     /* should we wake readers? */
837     if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
838     wake_up_interruptible(&random_read_wait);
839     kill_fasync(&fasync, SIGIO, POLL_IN);
840     }
841     - spin_unlock_irqrestore(&r->lock, flags);
842     }
843    
844     /*********************************************************************
845     @@ -572,42 +636,24 @@ struct timer_rand_state {
846     unsigned dont_count_entropy:1;
847     };
848    
849     -#ifndef CONFIG_GENERIC_HARDIRQS
850     -
851     -static struct timer_rand_state *irq_timer_state[NR_IRQS];
852     -
853     -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
854     -{
855     - return irq_timer_state[irq];
856     -}
857     -
858     -static void set_timer_rand_state(unsigned int irq,
859     - struct timer_rand_state *state)
860     -{
861     - irq_timer_state[irq] = state;
862     -}
863     -
864     -#else
865     -
866     -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
867     -{
868     - struct irq_desc *desc;
869     -
870     - desc = irq_to_desc(irq);
871     -
872     - return desc->timer_rand_state;
873     -}
874     -
875     -static void set_timer_rand_state(unsigned int irq,
876     - struct timer_rand_state *state)
877     +/*
878     + * Add device- or boot-specific data to the input and nonblocking
879     + * pools to help initialize them to unique values.
880     + *
881     + * None of this adds any entropy, it is meant to avoid the
882     + * problem of the nonblocking pool having similar initial state
883     + * across largely identical devices.
884     + */
885     +void add_device_randomness(const void *buf, unsigned int size)
886     {
887     - struct irq_desc *desc;
888     + unsigned long time = get_cycles() ^ jiffies;
889    
890     - desc = irq_to_desc(irq);
891     -
892     - desc->timer_rand_state = state;
893     + mix_pool_bytes(&input_pool, buf, size, NULL);
894     + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
895     + mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
896     + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
897     }
898     -#endif
899     +EXPORT_SYMBOL(add_device_randomness);
900    
901     static struct timer_rand_state input_timer_state;
902    
903     @@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
904     goto out;
905    
906     sample.jiffies = jiffies;
907     -
908     - /* Use arch random value, fall back to cycles */
909     - if (!arch_get_random_int(&sample.cycles))
910     - sample.cycles = get_cycles();
911     -
912     + sample.cycles = get_cycles();
913     sample.num = num;
914     - mix_pool_bytes(&input_pool, &sample, sizeof(sample));
915     + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
916    
917     /*
918     * Calculate number of bits of randomness we probably added.
919     @@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
920     }
921     EXPORT_SYMBOL_GPL(add_input_randomness);
922    
923     -void add_interrupt_randomness(int irq)
924     +static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
925     +
926     +void add_interrupt_randomness(int irq, int irq_flags)
927     {
928     - struct timer_rand_state *state;
929     + struct entropy_store *r;
930     + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
931     + struct pt_regs *regs = get_irq_regs();
932     + unsigned long now = jiffies;
933     + __u32 input[4], cycles = get_cycles();
934     +
935     + input[0] = cycles ^ jiffies;
936     + input[1] = irq;
937     + if (regs) {
938     + __u64 ip = instruction_pointer(regs);
939     + input[2] = ip;
940     + input[3] = ip >> 32;
941     + }
942    
943     - state = get_timer_rand_state(irq);
944     + fast_mix(fast_pool, input, sizeof(input));
945    
946     - if (state == NULL)
947     + if ((fast_pool->count & 1023) &&
948     + !time_after(now, fast_pool->last + HZ))
949     return;
950    
951     - DEBUG_ENT("irq event %d\n", irq);
952     - add_timer_randomness(state, 0x100 + irq);
953     + fast_pool->last = now;
954     +
955     + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
956     + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
957     + /*
958     + * If we don't have a valid cycle counter, and we see
959     + * back-to-back timer interrupts, then skip giving credit for
960     + * any entropy.
961     + */
962     + if (cycles == 0) {
963     + if (irq_flags & __IRQF_TIMER) {
964     + if (fast_pool->last_timer_intr)
965     + return;
966     + fast_pool->last_timer_intr = 1;
967     + } else
968     + fast_pool->last_timer_intr = 0;
969     + }
970     + credit_entropy_bits(r, 1);
971     }
972    
973     #ifdef CONFIG_BLOCK
974     @@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
975     */
976     static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
977     {
978     - __u32 tmp[OUTPUT_POOL_WORDS];
979     + __u32 tmp[OUTPUT_POOL_WORDS];
980    
981     if (r->pull && r->entropy_count < nbytes * 8 &&
982     r->entropy_count < r->poolinfo->POOLBITS) {
983     @@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
984    
985     bytes = extract_entropy(r->pull, tmp, bytes,
986     random_read_wakeup_thresh / 8, rsvd);
987     - mix_pool_bytes(r, tmp, bytes);
988     + mix_pool_bytes(r, tmp, bytes, NULL);
989     credit_entropy_bits(r, bytes*8);
990     }
991     }
992     @@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
993     static void extract_buf(struct entropy_store *r, __u8 *out)
994     {
995     int i;
996     - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
997     + union {
998     + __u32 w[5];
999     + unsigned long l[LONGS(EXTRACT_SIZE)];
1000     + } hash;
1001     + __u32 workspace[SHA_WORKSPACE_WORDS];
1002     __u8 extract[64];
1003     + unsigned long flags;
1004    
1005     /* Generate a hash across the pool, 16 words (512 bits) at a time */
1006     - sha_init(hash);
1007     + sha_init(hash.w);
1008     + spin_lock_irqsave(&r->lock, flags);
1009     for (i = 0; i < r->poolinfo->poolwords; i += 16)
1010     - sha_transform(hash, (__u8 *)(r->pool + i), workspace);
1011     + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1012    
1013     /*
1014     * We mix the hash back into the pool to prevent backtracking
1015     @@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1016     * brute-forcing the feedback as hard as brute-forcing the
1017     * hash.
1018     */
1019     - mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
1020     + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
1021     + spin_unlock_irqrestore(&r->lock, flags);
1022    
1023     /*
1024     * To avoid duplicates, we atomically extract a portion of the
1025     * pool while mixing, and hash one final time.
1026     */
1027     - sha_transform(hash, extract, workspace);
1028     + sha_transform(hash.w, extract, workspace);
1029     memset(extract, 0, sizeof(extract));
1030     memset(workspace, 0, sizeof(workspace));
1031    
1032     @@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1033     * pattern, we fold it in half. Thus, we always feed back
1034     * twice as much data as we output.
1035     */
1036     - hash[0] ^= hash[3];
1037     - hash[1] ^= hash[4];
1038     - hash[2] ^= rol32(hash[2], 16);
1039     - memcpy(out, hash, EXTRACT_SIZE);
1040     - memset(hash, 0, sizeof(hash));
1041     + hash.w[0] ^= hash.w[3];
1042     + hash.w[1] ^= hash.w[4];
1043     + hash.w[2] ^= rol32(hash.w[2], 16);
1044     +
1045     + /*
1046     + * If we have a architectural hardware random number
1047     + * generator, mix that in, too.
1048     + */
1049     + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
1050     + unsigned long v;
1051     + if (!arch_get_random_long(&v))
1052     + break;
1053     + hash.l[i] ^= v;
1054     + }
1055     +
1056     + memcpy(out, &hash, EXTRACT_SIZE);
1057     + memset(&hash, 0, sizeof(hash));
1058     }
1059    
1060     static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1061     - size_t nbytes, int min, int reserved)
1062     + size_t nbytes, int min, int reserved)
1063     {
1064     ssize_t ret = 0, i;
1065     __u8 tmp[EXTRACT_SIZE];
1066     - unsigned long flags;
1067    
1068     + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
1069     xfer_secondary_pool(r, nbytes);
1070     nbytes = account(r, nbytes, min, reserved);
1071    
1072     @@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1073     extract_buf(r, tmp);
1074    
1075     if (fips_enabled) {
1076     + unsigned long flags;
1077     +
1078     spin_lock_irqsave(&r->lock, flags);
1079     if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1080     panic("Hardware RNG duplicated output!\n");
1081     @@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1082     ssize_t ret = 0, i;
1083     __u8 tmp[EXTRACT_SIZE];
1084    
1085     + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
1086     xfer_secondary_pool(r, nbytes);
1087     nbytes = account(r, nbytes, 0, 0);
1088    
1089     @@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1090    
1091     /*
1092     * This function is the exported kernel interface. It returns some
1093     - * number of good random numbers, suitable for seeding TCP sequence
1094     - * numbers, etc.
1095     + * number of good random numbers, suitable for key generation, seeding
1096     + * TCP sequence numbers, etc. It does not use the hw random number
1097     + * generator, if available; use get_random_bytes_arch() for that.
1098     */
1099     void get_random_bytes(void *buf, int nbytes)
1100     {
1101     + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1102     +}
1103     +EXPORT_SYMBOL(get_random_bytes);
1104     +
1105     +/*
1106     + * This function will use the architecture-specific hardware random
1107     + * number generator if it is available. The arch-specific hw RNG will
1108     + * almost certainly be faster than what we can do in software, but it
1109     + * is impossible to verify that it is implemented securely (as
1110     + * opposed, to, say, the AES encryption of a sequence number using a
1111     + * key known by the NSA). So it's useful if we need the speed, but
1112     + * only if we're willing to trust the hardware manufacturer not to
1113     + * have put in a back door.
1114     + */
1115     +void get_random_bytes_arch(void *buf, int nbytes)
1116     +{
1117     char *p = buf;
1118    
1119     + trace_get_random_bytes(nbytes, _RET_IP_);
1120     while (nbytes) {
1121     unsigned long v;
1122     int chunk = min(nbytes, (int)sizeof(unsigned long));
1123     -
1124     +
1125     if (!arch_get_random_long(&v))
1126     break;
1127    
1128     @@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes)
1129     nbytes -= chunk;
1130     }
1131    
1132     - extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1133     + if (nbytes)
1134     + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1135     }
1136     -EXPORT_SYMBOL(get_random_bytes);
1137     +EXPORT_SYMBOL(get_random_bytes_arch);
1138     +
1139    
1140     /*
1141     * init_std_data - initialize pool with system data
1142     @@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes);
1143     static void init_std_data(struct entropy_store *r)
1144     {
1145     int i;
1146     - ktime_t now;
1147     - unsigned long flags;
1148     + ktime_t now = ktime_get_real();
1149     + unsigned long rv;
1150    
1151     - spin_lock_irqsave(&r->lock, flags);
1152     r->entropy_count = 0;
1153     - spin_unlock_irqrestore(&r->lock, flags);
1154     -
1155     - now = ktime_get_real();
1156     - mix_pool_bytes(r, &now, sizeof(now));
1157     - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
1158     - if (!arch_get_random_long(&flags))
1159     + r->entropy_total = 0;
1160     + mix_pool_bytes(r, &now, sizeof(now), NULL);
1161     + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
1162     + if (!arch_get_random_long(&rv))
1163     break;
1164     - mix_pool_bytes(r, &flags, sizeof(flags));
1165     + mix_pool_bytes(r, &rv, sizeof(rv), NULL);
1166     }
1167     - mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1168     + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
1169     }
1170    
1171     +/*
1172     + * Note that setup_arch() may call add_device_randomness()
1173     + * long before we get here. This allows seeding of the pools
1174     + * with some platform dependent data very early in the boot
1175     + * process. But it limits our options here. We must use
1176     + * statically allocated structures that already have all
1177     + * initializations complete at compile time. We should also
1178     + * take care not to overwrite the precious per platform data
1179     + * we were given.
1180     + */
1181     static int rand_initialize(void)
1182     {
1183     init_std_data(&input_pool);
1184     @@ -992,24 +1114,6 @@ static int rand_initialize(void)
1185     }
1186     module_init(rand_initialize);
1187    
1188     -void rand_initialize_irq(int irq)
1189     -{
1190     - struct timer_rand_state *state;
1191     -
1192     - state = get_timer_rand_state(irq);
1193     -
1194     - if (state)
1195     - return;
1196     -
1197     - /*
1198     - * If kzalloc returns null, we just won't use that entropy
1199     - * source.
1200     - */
1201     - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1202     - if (state)
1203     - set_timer_rand_state(irq, state);
1204     -}
1205     -
1206     #ifdef CONFIG_BLOCK
1207     void rand_initialize_disk(struct gendisk *disk)
1208     {
1209     @@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1210     count -= bytes;
1211     p += bytes;
1212    
1213     - mix_pool_bytes(r, buf, bytes);
1214     + mix_pool_bytes(r, buf, bytes, NULL);
1215     cond_resched();
1216     }
1217    
1218     diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1219     index 153980b..b298158 100644
1220     --- a/drivers/firmware/dmi_scan.c
1221     +++ b/drivers/firmware/dmi_scan.c
1222     @@ -6,6 +6,7 @@
1223     #include <linux/dmi.h>
1224     #include <linux/efi.h>
1225     #include <linux/bootmem.h>
1226     +#include <linux/random.h>
1227     #include <asm/dmi.h>
1228    
1229     /*
1230     @@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
1231    
1232     dmi_table(buf, dmi_len, dmi_num, decode, NULL);
1233    
1234     + add_device_randomness(buf, dmi_len);
1235     +
1236     dmi_iounmap(buf, dmi_len);
1237     return 0;
1238     }
1239     diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
1240     index 51e0e2d..a330492 100644
1241     --- a/drivers/firmware/pcdp.c
1242     +++ b/drivers/firmware/pcdp.c
1243     @@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
1244     if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
1245     return -ENODEV;
1246    
1247     - pcdp = ioremap(efi.hcdp, 4096);
1248     + pcdp = early_ioremap(efi.hcdp, 4096);
1249     printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
1250    
1251     if (strstr(cmdline, "console=hcdp")) {
1252     @@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
1253     }
1254    
1255     out:
1256     - iounmap(pcdp);
1257     + early_iounmap(pcdp, 4096);
1258     return rc;
1259     }
1260     diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
1261     index b99af34..a2abb8e 100644
1262     --- a/drivers/hid/hid-chicony.c
1263     +++ b/drivers/hid/hid-chicony.c
1264     @@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1265     static const struct hid_device_id ch_devices[] = {
1266     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1267     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1268     + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1269     { }
1270     };
1271     MODULE_DEVICE_TABLE(hid, ch_devices);
1272     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1273     index 973c238..41d4437 100644
1274     --- a/drivers/hid/hid-core.c
1275     +++ b/drivers/hid/hid-core.c
1276     @@ -1404,12 +1404,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
1277     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1278     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1279     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1280     + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1281     { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1282     { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1283     { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
1284     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1285     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
1286     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
1287     + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
1288     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1289     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1290     { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1291     diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
1292     index 2f0be4c..9e43aac 100644
1293     --- a/drivers/hid/hid-cypress.c
1294     +++ b/drivers/hid/hid-cypress.c
1295     @@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
1296     .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1297     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
1298     .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1299     + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
1300     + .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1301     { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
1302     .driver_data = CP_2WHEEL_MOUSE_HACK },
1303     { }
1304     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1305     index bb1abf8..41ad6ff 100644
1306     --- a/drivers/hid/hid-ids.h
1307     +++ b/drivers/hid/hid-ids.h
1308     @@ -202,6 +202,7 @@
1309     #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
1310     #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
1311     #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
1312     +#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
1313    
1314     #define USB_VENDOR_ID_CHUNGHWAT 0x2247
1315     #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
1316     @@ -231,6 +232,7 @@
1317     #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
1318     #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
1319     #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
1320     +#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
1321     #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
1322    
1323     #define USB_VENDOR_ID_DEALEXTREAME 0x10c5
1324     @@ -567,6 +569,9 @@
1325     #define USB_VENDOR_ID_NINTENDO 0x057e
1326     #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
1327    
1328     +#define USB_VENDOR_ID_NOVATEK 0x0603
1329     +#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
1330     +
1331     #define USB_VENDOR_ID_NTRIG 0x1b96
1332     #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
1333     #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
1334     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1335     index a6197f5..e754dff 100644
1336     --- a/drivers/hid/hid-multitouch.c
1337     +++ b/drivers/hid/hid-multitouch.c
1338     @@ -940,6 +940,11 @@ static const struct hid_device_id mt_devices[] = {
1339     HID_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
1340     USB_DEVICE_ID_PANABOARD_UBT880) },
1341    
1342     + /* Novatek Panel */
1343     + { .driver_data = MT_CLS_DEFAULT,
1344     + HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
1345     + USB_DEVICE_ID_NOVATEK_PCT) },
1346     +
1347     /* PenMount panels */
1348     { .driver_data = MT_CLS_CONFIDENCE,
1349     HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
1350     diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
1351     index cecd35c..c77032c 100644
1352     --- a/drivers/input/tablet/wacom_wac.c
1353     +++ b/drivers/input/tablet/wacom_wac.c
1354     @@ -243,7 +243,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
1355     input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
1356     input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
1357     if (wacom->tool[0] != BTN_TOOL_MOUSE) {
1358     - input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
1359     + input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
1360     input_report_key(input, BTN_TOUCH, data[1] & 0x01);
1361     input_report_key(input, BTN_STYLUS, data[1] & 0x02);
1362     input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
1363     diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
1364     index 503c709..908407e 100644
1365     --- a/drivers/input/touchscreen/eeti_ts.c
1366     +++ b/drivers/input/touchscreen/eeti_ts.c
1367     @@ -48,7 +48,7 @@ struct eeti_ts_priv {
1368     struct input_dev *input;
1369     struct work_struct work;
1370     struct mutex mutex;
1371     - int irq, irq_active_high;
1372     + int irq_gpio, irq, irq_active_high;
1373     };
1374    
1375     #define EETI_TS_BITDEPTH (11)
1376     @@ -62,7 +62,7 @@ struct eeti_ts_priv {
1377    
1378     static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
1379     {
1380     - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
1381     + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
1382     }
1383    
1384     static void eeti_ts_read(struct work_struct *work)
1385     @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
1386     static int __devinit eeti_ts_probe(struct i2c_client *client,
1387     const struct i2c_device_id *idp)
1388     {
1389     - struct eeti_ts_platform_data *pdata;
1390     + struct eeti_ts_platform_data *pdata = client->dev.platform_data;
1391     struct eeti_ts_priv *priv;
1392     struct input_dev *input;
1393     unsigned int irq_flags;
1394     @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1395    
1396     priv->client = client;
1397     priv->input = input;
1398     - priv->irq = client->irq;
1399     + priv->irq_gpio = pdata->irq_gpio;
1400     + priv->irq = gpio_to_irq(pdata->irq_gpio);
1401    
1402     - pdata = client->dev.platform_data;
1403     + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
1404     + if (err < 0)
1405     + goto err1;
1406    
1407     if (pdata)
1408     priv->irq_active_high = pdata->irq_active_high;
1409     @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1410    
1411     err = input_register_device(input);
1412     if (err)
1413     - goto err1;
1414     + goto err2;
1415    
1416     err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
1417     client->name, priv);
1418     if (err) {
1419     dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
1420     - goto err2;
1421     + goto err3;
1422     }
1423    
1424     /*
1425     @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1426     device_init_wakeup(&client->dev, 0);
1427     return 0;
1428    
1429     -err2:
1430     +err3:
1431     input_unregister_device(input);
1432     input = NULL; /* so we dont try to free it below */
1433     +err2:
1434     + gpio_free(pdata->irq_gpio);
1435     err1:
1436     input_free_device(input);
1437     kfree(priv);
1438     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1439     index d7add9d..23904d2 100644
1440     --- a/drivers/md/raid1.c
1441     +++ b/drivers/md/raid1.c
1442     @@ -2429,7 +2429,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
1443     /* There is nowhere to write, so all non-sync
1444     * drives must be failed - so we are finished
1445     */
1446     - sector_t rv = max_sector - sector_nr;
1447     + sector_t rv;
1448     + if (min_bad > 0)
1449     + max_sector = sector_nr + min_bad;
1450     + rv = max_sector - sector_nr;
1451     *skipped = 1;
1452     put_buf(r1_bio);
1453     return rv;
1454     diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
1455     index bef5296..647dd95 100644
1456     --- a/drivers/media/rc/ene_ir.c
1457     +++ b/drivers/media/rc/ene_ir.c
1458     @@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1459    
1460     spin_lock_init(&dev->hw_lock);
1461    
1462     + dev->hw_io = pnp_port_start(pnp_dev, 0);
1463     +
1464     pnp_set_drvdata(pnp_dev, dev);
1465     dev->pnp_dev = pnp_dev;
1466    
1467     @@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1468    
1469     /* claim the resources */
1470     error = -EBUSY;
1471     - dev->hw_io = pnp_port_start(pnp_dev, 0);
1472     if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
1473     dev->hw_io = -1;
1474     dev->irq = -1;
1475     diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
1476     index 1efad20..1287645 100644
1477     --- a/drivers/mfd/ab3100-core.c
1478     +++ b/drivers/mfd/ab3100-core.c
1479     @@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
1480     u32 fatevent;
1481     int err;
1482    
1483     - add_interrupt_randomness(irq);
1484     -
1485     err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
1486     event_regs, 3);
1487     if (err)
1488     @@ -933,9 +931,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
1489    
1490     err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
1491     IRQF_ONESHOT, "ab3100-core", ab3100);
1492     - /* This real unpredictable IRQ is of course sampled for entropy */
1493     - rand_initialize_irq(client->irq);
1494     -
1495     if (err)
1496     goto exit_no_irq;
1497    
1498     diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
1499     index 43a76c4..db662e2 100644
1500     --- a/drivers/mfd/ezx-pcap.c
1501     +++ b/drivers/mfd/ezx-pcap.c
1502     @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
1503     }
1504     local_irq_enable();
1505     ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
1506     - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
1507     + } while (gpio_get_value(pdata->gpio));
1508     }
1509    
1510     static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
1511     diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
1512     index f742745..b90f3e0 100644
1513     --- a/drivers/mfd/wm831x-otp.c
1514     +++ b/drivers/mfd/wm831x-otp.c
1515     @@ -18,6 +18,7 @@
1516     #include <linux/bcd.h>
1517     #include <linux/delay.h>
1518     #include <linux/mfd/core.h>
1519     +#include <linux/random.h>
1520    
1521     #include <linux/mfd/wm831x/core.h>
1522     #include <linux/mfd/wm831x/otp.h>
1523     @@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
1524    
1525     int wm831x_otp_init(struct wm831x *wm831x)
1526     {
1527     + char uuid[WM831X_UNIQUE_ID_LEN];
1528     int ret;
1529    
1530     ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
1531     @@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
1532     dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
1533     ret);
1534    
1535     + ret = wm831x_unique_id_read(wm831x, uuid);
1536     + if (ret == 0)
1537     + add_device_randomness(uuid, sizeof(uuid));
1538     + else
1539     + dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
1540     +
1541     return ret;
1542     }
1543    
1544     diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
1545     index a9dd6a9..c098b24 100644
1546     --- a/drivers/net/ethernet/intel/e1000e/82571.c
1547     +++ b/drivers/net/ethernet/intel/e1000e/82571.c
1548     @@ -1582,10 +1582,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1549     * auto-negotiation in the TXCW register and disable
1550     * forced link in the Device Control register in an
1551     * attempt to auto-negotiate with our link partner.
1552     - * If the partner code word is null, stop forcing
1553     - * and restart auto negotiation.
1554     */
1555     - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1556     + if (rxcw & E1000_RXCW_C) {
1557     /* Enable autoneg, and unforce link up */
1558     ew32(TXCW, mac->txcw);
1559     ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1560     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1561     index 8f13420..147b628 100644
1562     --- a/drivers/net/tun.c
1563     +++ b/drivers/net/tun.c
1564     @@ -185,7 +185,6 @@ static void __tun_detach(struct tun_struct *tun)
1565     netif_tx_lock_bh(tun->dev);
1566     netif_carrier_off(tun->dev);
1567     tun->tfile = NULL;
1568     - tun->socket.file = NULL;
1569     netif_tx_unlock_bh(tun->dev);
1570    
1571     /* Drop read queue */
1572     diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1573     index 6dfd964..28a0edd 100644
1574     --- a/drivers/net/wireless/ath/ath9k/hw.c
1575     +++ b/drivers/net/wireless/ath/ath9k/hw.c
1576     @@ -676,6 +676,7 @@ int ath9k_hw_init(struct ath_hw *ah)
1577     case AR9300_DEVID_AR9340:
1578     case AR9300_DEVID_AR9580:
1579     case AR9300_DEVID_AR9462:
1580     + case AR9485_DEVID_AR1111:
1581     break;
1582     default:
1583     if (common->bus_ops->ath_bus_type == ATH_USB)
1584     diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1585     index e88f182..f8e1fbb 100644
1586     --- a/drivers/net/wireless/ath/ath9k/hw.h
1587     +++ b/drivers/net/wireless/ath/ath9k/hw.h
1588     @@ -48,6 +48,7 @@
1589     #define AR9300_DEVID_AR9580 0x0033
1590     #define AR9300_DEVID_AR9462 0x0034
1591     #define AR9300_DEVID_AR9330 0x0035
1592     +#define AR9485_DEVID_AR1111 0x0037
1593    
1594     #define AR5416_AR9100_DEVID 0x000b
1595    
1596     diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
1597     index 77dc327..e44097a 100644
1598     --- a/drivers/net/wireless/ath/ath9k/pci.c
1599     +++ b/drivers/net/wireless/ath/ath9k/pci.c
1600     @@ -35,6 +35,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
1601     { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
1602     { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
1603     { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
1604     + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
1605     { 0 }
1606     };
1607    
1608     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1609     index da2be3e..7db5d45 100644
1610     --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1611     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1612     @@ -709,11 +709,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
1613     */
1614     static bool rs_use_green(struct ieee80211_sta *sta)
1615     {
1616     - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1617     - struct iwl_rxon_context *ctx = sta_priv->ctx;
1618     -
1619     - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
1620     - !(ctx->ht.non_gf_sta_present);
1621     + /*
1622     + * There's a bug somewhere in this code that causes the
1623     + * scaling to get stuck because GF+SGI can't be combined
1624     + * in SISO rates. Until we find that bug, disable GF, it
1625     + * has only limited benefit and we still interoperate with
1626     + * GF APs since we can always receive GF transmissions.
1627     + */
1628     + return false;
1629     }
1630    
1631     /**
1632     diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
1633     index e0c6d11..0f4bf8c 100644
1634     --- a/drivers/net/wireless/rt2x00/rt61pci.c
1635     +++ b/drivers/net/wireless/rt2x00/rt61pci.c
1636     @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1637    
1638     static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
1639     {
1640     - struct ieee80211_conf conf = { .flags = 0 };
1641     - struct rt2x00lib_conf libconf = { .conf = &conf };
1642     + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
1643    
1644     rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
1645     }
1646     diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
1647     index 77aadde..556cbb4 100644
1648     --- a/drivers/platform/x86/asus-wmi.c
1649     +++ b/drivers/platform/x86/asus-wmi.c
1650     @@ -1467,14 +1467,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
1651     */
1652     if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
1653     asus->dsts_id = ASUS_WMI_METHODID_DSTS;
1654     - else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
1655     + else
1656     asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
1657    
1658     - if (!asus->dsts_id) {
1659     - pr_err("Can't find DSTS");
1660     - return -ENODEV;
1661     - }
1662     -
1663     /* CWAP allow to define the behavior of the Fn+F2 key,
1664     * this method doesn't seems to be present on Eee PCs */
1665     if (asus->driver->quirks->wapf >= 0)
1666     diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
1667     index 3b6e6a6..41c06fe 100644
1668     --- a/drivers/rtc/rtc-wm831x.c
1669     +++ b/drivers/rtc/rtc-wm831x.c
1670     @@ -24,7 +24,7 @@
1671     #include <linux/mfd/wm831x/core.h>
1672     #include <linux/delay.h>
1673     #include <linux/platform_device.h>
1674     -
1675     +#include <linux/random.h>
1676    
1677     /*
1678     * R16416 (0x4020) - RTC Write Counter
1679     @@ -96,6 +96,26 @@ struct wm831x_rtc {
1680     unsigned int alarm_enabled:1;
1681     };
1682    
1683     +static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
1684     +{
1685     + int ret;
1686     + u16 reg;
1687     +
1688     + /*
1689     + * The write counter contains a pseudo-random number which is
1690     + * regenerated every time we set the RTC so it should be a
1691     + * useful per-system source of entropy.
1692     + */
1693     + ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
1694     + if (ret >= 0) {
1695     + reg = ret;
1696     + add_device_randomness(&reg, sizeof(reg));
1697     + } else {
1698     + dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
1699     + ret);
1700     + }
1701     +}
1702     +
1703     /*
1704     * Read current time and date in RTC
1705     */
1706     @@ -431,6 +451,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
1707     alm_irq, ret);
1708     }
1709    
1710     + wm831x_rtc_add_randomness(wm831x);
1711     +
1712     return 0;
1713    
1714     err:
1715     diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
1716     index fa323f8..52a5f62 100644
1717     --- a/drivers/target/target_core_cdb.c
1718     +++ b/drivers/target/target_core_cdb.c
1719     @@ -1022,11 +1022,11 @@ int target_emulate_unmap(struct se_task *task)
1720     struct se_cmd *cmd = task->task_se_cmd;
1721     struct se_device *dev = cmd->se_dev;
1722     unsigned char *buf, *ptr = NULL;
1723     - unsigned char *cdb = &cmd->t_task_cdb[0];
1724     sector_t lba;
1725     - unsigned int size = cmd->data_length, range;
1726     - int ret = 0, offset;
1727     - unsigned short dl, bd_dl;
1728     + int size = cmd->data_length;
1729     + u32 range;
1730     + int ret = 0;
1731     + int dl, bd_dl;
1732    
1733     if (!dev->transport->do_discard) {
1734     pr_err("UNMAP emulation not supported for: %s\n",
1735     @@ -1035,24 +1035,41 @@ int target_emulate_unmap(struct se_task *task)
1736     return -ENOSYS;
1737     }
1738    
1739     - /* First UNMAP block descriptor starts at 8 byte offset */
1740     - offset = 8;
1741     - size -= 8;
1742     - dl = get_unaligned_be16(&cdb[0]);
1743     - bd_dl = get_unaligned_be16(&cdb[2]);
1744     -
1745     buf = transport_kmap_data_sg(cmd);
1746    
1747     - ptr = &buf[offset];
1748     - pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1749     + dl = get_unaligned_be16(&buf[0]);
1750     + bd_dl = get_unaligned_be16(&buf[2]);
1751     +
1752     + size = min(size - 8, bd_dl);
1753     + if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1754     + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1755     + ret = -EINVAL;
1756     + goto err;
1757     + }
1758     +
1759     + /* First UNMAP block descriptor starts at 8 byte offset */
1760     + ptr = &buf[8];
1761     + pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1762     " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1763    
1764     - while (size) {
1765     + while (size >= 16) {
1766     lba = get_unaligned_be64(&ptr[0]);
1767     range = get_unaligned_be32(&ptr[8]);
1768     pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1769     (unsigned long long)lba, range);
1770    
1771     + if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
1772     + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1773     + ret = -EINVAL;
1774     + goto err;
1775     + }
1776     +
1777     + if (lba + range > dev->transport->get_blocks(dev) + 1) {
1778     + cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
1779     + ret = -EINVAL;
1780     + goto err;
1781     + }
1782     +
1783     ret = dev->transport->do_discard(dev, lba, range);
1784     if (ret < 0) {
1785     pr_err("blkdev_issue_discard() failed: %d\n",
1786     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1787     index 6241b71..e727b87 100644
1788     --- a/drivers/usb/core/hub.c
1789     +++ b/drivers/usb/core/hub.c
1790     @@ -24,6 +24,7 @@
1791     #include <linux/kthread.h>
1792     #include <linux/mutex.h>
1793     #include <linux/freezer.h>
1794     +#include <linux/random.h>
1795    
1796     #include <asm/uaccess.h>
1797     #include <asm/byteorder.h>
1798     @@ -1951,6 +1952,14 @@ int usb_new_device(struct usb_device *udev)
1799     /* Tell the world! */
1800     announce_device(udev);
1801    
1802     + if (udev->serial)
1803     + add_device_randomness(udev->serial, strlen(udev->serial));
1804     + if (udev->product)
1805     + add_device_randomness(udev->product, strlen(udev->product));
1806     + if (udev->manufacturer)
1807     + add_device_randomness(udev->manufacturer,
1808     + strlen(udev->manufacturer));
1809     +
1810     device_enable_async_suspend(&udev->dev);
1811    
1812     /*
1813     diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
1814     index ccbfef5..1e1e2d2 100644
1815     --- a/drivers/video/smscufx.c
1816     +++ b/drivers/video/smscufx.c
1817     @@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
1818     result = fb_sys_write(info, buf, count, ppos);
1819    
1820     if (result > 0) {
1821     - int start = max((int)(offset / info->fix.line_length) - 1, 0);
1822     + int start = max((int)(offset / info->fix.line_length), 0);
1823     int lines = min((u32)((result / info->fix.line_length) + 1),
1824     (u32)info->var.yres);
1825    
1826     diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
1827     index 24a49d4..1585db1 100644
1828     --- a/fs/exofs/ore.c
1829     +++ b/fs/exofs/ore.c
1830     @@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
1831     bio->bi_rw |= REQ_WRITE;
1832     }
1833    
1834     - osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
1835     - bio, per_dev->length);
1836     + osd_req_write(or, _ios_obj(ios, cur_comp),
1837     + per_dev->offset, bio, per_dev->length);
1838     ORE_DBGMSG("write(0x%llx) offset=0x%llx "
1839     "length=0x%llx dev=%d\n",
1840     - _LLU(_ios_obj(ios, dev)->id),
1841     + _LLU(_ios_obj(ios, cur_comp)->id),
1842     _LLU(per_dev->offset),
1843     _LLU(per_dev->length), dev);
1844     } else if (ios->kern_buff) {
1845     @@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
1846     (ios->si.unit_off + ios->length >
1847     ios->layout->stripe_unit));
1848    
1849     - ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
1850     + ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
1851     per_dev->offset,
1852     ios->kern_buff, ios->length);
1853     if (unlikely(ret))
1854     goto out;
1855     ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
1856     "length=0x%llx dev=%d\n",
1857     - _LLU(_ios_obj(ios, dev)->id),
1858     + _LLU(_ios_obj(ios, cur_comp)->id),
1859     _LLU(per_dev->offset),
1860     _LLU(ios->length), per_dev->dev);
1861     } else {
1862     - osd_req_set_attributes(or, _ios_obj(ios, dev));
1863     + osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
1864     ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
1865     - _LLU(_ios_obj(ios, dev)->id),
1866     + _LLU(_ios_obj(ios, cur_comp)->id),
1867     ios->out_attr_len, dev);
1868     }
1869    
1870     diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
1871     index 2a70fce..6fe98ed 100644
1872     --- a/fs/nilfs2/ioctl.c
1873     +++ b/fs/nilfs2/ioctl.c
1874     @@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1875     if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
1876     goto out;
1877    
1878     - down_read(&inode->i_sb->s_umount);
1879     + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1880    
1881     nilfs_transaction_begin(inode->i_sb, &ti, 0);
1882     ret = nilfs_cpfile_change_cpmode(
1883     @@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1884     else
1885     nilfs_transaction_commit(inode->i_sb); /* never fails */
1886    
1887     - up_read(&inode->i_sb->s_umount);
1888     + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1889     out:
1890     mnt_drop_write_file(filp);
1891     return ret;
1892     diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
1893     index 1099a76..496904b 100644
1894     --- a/fs/nilfs2/super.c
1895     +++ b/fs/nilfs2/super.c
1896     @@ -948,6 +948,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1897     struct nilfs_root *root;
1898     int ret;
1899    
1900     + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1901     +
1902     down_read(&nilfs->ns_segctor_sem);
1903     ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
1904     up_read(&nilfs->ns_segctor_sem);
1905     @@ -972,6 +974,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1906     ret = nilfs_get_root_dentry(s, root, root_dentry);
1907     nilfs_put_root(root);
1908     out:
1909     + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1910     return ret;
1911     }
1912    
1913     diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
1914     index 501b7f8..41e6a04 100644
1915     --- a/fs/nilfs2/the_nilfs.c
1916     +++ b/fs/nilfs2/the_nilfs.c
1917     @@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
1918     nilfs->ns_bdev = bdev;
1919     atomic_set(&nilfs->ns_ndirtyblks, 0);
1920     init_rwsem(&nilfs->ns_sem);
1921     + mutex_init(&nilfs->ns_snapshot_mount_mutex);
1922     INIT_LIST_HEAD(&nilfs->ns_dirty_files);
1923     INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
1924     spin_lock_init(&nilfs->ns_inode_lock);
1925     diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
1926     index 9992b11..de7435f 100644
1927     --- a/fs/nilfs2/the_nilfs.h
1928     +++ b/fs/nilfs2/the_nilfs.h
1929     @@ -47,6 +47,7 @@ enum {
1930     * @ns_flags: flags
1931     * @ns_bdev: block device
1932     * @ns_sem: semaphore for shared states
1933     + * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
1934     * @ns_sbh: buffer heads of on-disk super blocks
1935     * @ns_sbp: pointers to super block data
1936     * @ns_sbwtime: previous write time of super block
1937     @@ -99,6 +100,7 @@ struct the_nilfs {
1938    
1939     struct block_device *ns_bdev;
1940     struct rw_semaphore ns_sem;
1941     + struct mutex ns_snapshot_mount_mutex;
1942    
1943     /*
1944     * used for
1945     diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
1946     index f875b31..16625d7 100644
1947     --- a/include/linux/input/eeti_ts.h
1948     +++ b/include/linux/input/eeti_ts.h
1949     @@ -2,6 +2,7 @@
1950     #define LINUX_INPUT_EETI_TS_H
1951    
1952     struct eeti_ts_platform_data {
1953     + int irq_gpio;
1954     unsigned int irq_active_high;
1955     };
1956    
1957     diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
1958     index f1e2527..9a323d1 100644
1959     --- a/include/linux/irqdesc.h
1960     +++ b/include/linux/irqdesc.h
1961     @@ -39,7 +39,6 @@ struct module;
1962     */
1963     struct irq_desc {
1964     struct irq_data irq_data;
1965     - struct timer_rand_state *timer_rand_state;
1966     unsigned int __percpu *kstat_irqs;
1967     irq_flow_handler_t handle_irq;
1968     #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
1969     diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
1970     index 40c37216..32a1b5c 100644
1971     --- a/include/linux/mfd/ezx-pcap.h
1972     +++ b/include/linux/mfd/ezx-pcap.h
1973     @@ -16,6 +16,7 @@ struct pcap_subdev {
1974     struct pcap_platform_data {
1975     unsigned int irq_base;
1976     unsigned int config;
1977     + int gpio;
1978     void (*init) (void *); /* board specific init */
1979     int num_subdevs;
1980     struct pcap_subdev *subdevs;
1981     diff --git a/include/linux/random.h b/include/linux/random.h
1982     index 8f74538..ac621ce 100644
1983     --- a/include/linux/random.h
1984     +++ b/include/linux/random.h
1985     @@ -48,13 +48,13 @@ struct rnd_state {
1986    
1987     #ifdef __KERNEL__
1988    
1989     -extern void rand_initialize_irq(int irq);
1990     -
1991     +extern void add_device_randomness(const void *, unsigned int);
1992     extern void add_input_randomness(unsigned int type, unsigned int code,
1993     unsigned int value);
1994     -extern void add_interrupt_randomness(int irq);
1995     +extern void add_interrupt_randomness(int irq, int irq_flags);
1996    
1997     extern void get_random_bytes(void *buf, int nbytes);
1998     +extern void get_random_bytes_arch(void *buf, int nbytes);
1999     void generate_random_uuid(unsigned char uuid_out[16]);
2000    
2001     #ifndef MODULE
2002     diff --git a/include/trace/events/random.h b/include/trace/events/random.h
2003     new file mode 100644
2004     index 0000000..422df19
2005     --- /dev/null
2006     +++ b/include/trace/events/random.h
2007     @@ -0,0 +1,134 @@
2008     +#undef TRACE_SYSTEM
2009     +#define TRACE_SYSTEM random
2010     +
2011     +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
2012     +#define _TRACE_RANDOM_H
2013     +
2014     +#include <linux/writeback.h>
2015     +#include <linux/tracepoint.h>
2016     +
2017     +DECLARE_EVENT_CLASS(random__mix_pool_bytes,
2018     + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2019     +
2020     + TP_ARGS(pool_name, bytes, IP),
2021     +
2022     + TP_STRUCT__entry(
2023     + __field( const char *, pool_name )
2024     + __field( int, bytes )
2025     + __field(unsigned long, IP )
2026     + ),
2027     +
2028     + TP_fast_assign(
2029     + __entry->pool_name = pool_name;
2030     + __entry->bytes = bytes;
2031     + __entry->IP = IP;
2032     + ),
2033     +
2034     + TP_printk("%s pool: bytes %d caller %pF",
2035     + __entry->pool_name, __entry->bytes, (void *)__entry->IP)
2036     +);
2037     +
2038     +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
2039     + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2040     +
2041     + TP_ARGS(pool_name, bytes, IP)
2042     +);
2043     +
2044     +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
2045     + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2046     +
2047     + TP_ARGS(pool_name, bytes, IP)
2048     +);
2049     +
2050     +TRACE_EVENT(credit_entropy_bits,
2051     + TP_PROTO(const char *pool_name, int bits, int entropy_count,
2052     + int entropy_total, unsigned long IP),
2053     +
2054     + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
2055     +
2056     + TP_STRUCT__entry(
2057     + __field( const char *, pool_name )
2058     + __field( int, bits )
2059     + __field( int, entropy_count )
2060     + __field( int, entropy_total )
2061     + __field(unsigned long, IP )
2062     + ),
2063     +
2064     + TP_fast_assign(
2065     + __entry->pool_name = pool_name;
2066     + __entry->bits = bits;
2067     + __entry->entropy_count = entropy_count;
2068     + __entry->entropy_total = entropy_total;
2069     + __entry->IP = IP;
2070     + ),
2071     +
2072     + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
2073     + "caller %pF", __entry->pool_name, __entry->bits,
2074     + __entry->entropy_count, __entry->entropy_total,
2075     + (void *)__entry->IP)
2076     +);
2077     +
2078     +TRACE_EVENT(get_random_bytes,
2079     + TP_PROTO(int nbytes, unsigned long IP),
2080     +
2081     + TP_ARGS(nbytes, IP),
2082     +
2083     + TP_STRUCT__entry(
2084     + __field( int, nbytes )
2085     + __field(unsigned long, IP )
2086     + ),
2087     +
2088     + TP_fast_assign(
2089     + __entry->nbytes = nbytes;
2090     + __entry->IP = IP;
2091     + ),
2092     +
2093     + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
2094     +);
2095     +
2096     +DECLARE_EVENT_CLASS(random__extract_entropy,
2097     + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2098     + unsigned long IP),
2099     +
2100     + TP_ARGS(pool_name, nbytes, entropy_count, IP),
2101     +
2102     + TP_STRUCT__entry(
2103     + __field( const char *, pool_name )
2104     + __field( int, nbytes )
2105     + __field( int, entropy_count )
2106     + __field(unsigned long, IP )
2107     + ),
2108     +
2109     + TP_fast_assign(
2110     + __entry->pool_name = pool_name;
2111     + __entry->nbytes = nbytes;
2112     + __entry->entropy_count = entropy_count;
2113     + __entry->IP = IP;
2114     + ),
2115     +
2116     + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
2117     + __entry->pool_name, __entry->nbytes, __entry->entropy_count,
2118     + (void *)__entry->IP)
2119     +);
2120     +
2121     +
2122     +DEFINE_EVENT(random__extract_entropy, extract_entropy,
2123     + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2124     + unsigned long IP),
2125     +
2126     + TP_ARGS(pool_name, nbytes, entropy_count, IP)
2127     +);
2128     +
2129     +DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
2130     + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2131     + unsigned long IP),
2132     +
2133     + TP_ARGS(pool_name, nbytes, entropy_count, IP)
2134     +);
2135     +
2136     +
2137     +
2138     +#endif /* _TRACE_RANDOM_H */
2139     +
2140     +/* This part must be outside protection */
2141     +#include <trace/define_trace.h>
2142     diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
2143     index bdb1803..131ca17 100644
2144     --- a/kernel/irq/handle.c
2145     +++ b/kernel/irq/handle.c
2146     @@ -133,7 +133,7 @@ irqreturn_t
2147     handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2148     {
2149     irqreturn_t retval = IRQ_NONE;
2150     - unsigned int random = 0, irq = desc->irq_data.irq;
2151     + unsigned int flags = 0, irq = desc->irq_data.irq;
2152    
2153     do {
2154     irqreturn_t res;
2155     @@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2156    
2157     /* Fall through to add to randomness */
2158     case IRQ_HANDLED:
2159     - random |= action->flags;
2160     + flags |= action->flags;
2161     break;
2162    
2163     default:
2164     @@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2165     action = action->next;
2166     } while (action);
2167    
2168     - if (random & IRQF_SAMPLE_RANDOM)
2169     - add_interrupt_randomness(irq);
2170     + add_interrupt_randomness(irq, flags);
2171    
2172     if (!noirqdebug)
2173     note_interrupt(irq, desc, retval);
2174     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2175     index 89a3ea8..b9d1d83 100644
2176     --- a/kernel/irq/manage.c
2177     +++ b/kernel/irq/manage.c
2178     @@ -890,22 +890,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
2179     return -ENOSYS;
2180     if (!try_module_get(desc->owner))
2181     return -ENODEV;
2182     - /*
2183     - * Some drivers like serial.c use request_irq() heavily,
2184     - * so we have to be careful not to interfere with a
2185     - * running system.
2186     - */
2187     - if (new->flags & IRQF_SAMPLE_RANDOM) {
2188     - /*
2189     - * This function might sleep, we want to call it first,
2190     - * outside of the atomic block.
2191     - * Yes, this might clear the entropy pool if the wrong
2192     - * driver is attempted to be loaded, without actually
2193     - * installing a new handler, but is this really a problem,
2194     - * only the sysadmin is able to do this.
2195     - */
2196     - rand_initialize_irq(irq);
2197     - }
2198    
2199     /*
2200     * Check whether the interrupt nests into another interrupt
2201     @@ -1339,7 +1323,6 @@ EXPORT_SYMBOL(free_irq);
2202     * Flags:
2203     *
2204     * IRQF_SHARED Interrupt is shared
2205     - * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
2206     * IRQF_TRIGGER_* Specify active edge(s) or level
2207     *
2208     */
2209     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2210     index 263e177..a799df5 100644
2211     --- a/mm/hugetlb.c
2212     +++ b/mm/hugetlb.c
2213     @@ -2392,6 +2392,22 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2214     {
2215     mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2216     __unmap_hugepage_range(vma, start, end, ref_page);
2217     + /*
2218     + * Clear this flag so that x86's huge_pmd_share page_table_shareable
2219     + * test will fail on a vma being torn down, and not grab a page table
2220     + * on its way out. We're lucky that the flag has such an appropriate
2221     + * name, and can in fact be safely cleared here. We could clear it
2222     + * before the __unmap_hugepage_range above, but all that's necessary
2223     + * is to clear it before releasing the i_mmap_mutex below.
2224     + *
2225     + * This works because in the contexts this is called, the VMA is
2226     + * going to be destroyed. It is not vunerable to madvise(DONTNEED)
2227     + * because madvise is not supported on hugetlbfs. The same applies
2228     + * for direct IO. unmap_hugepage_range() is only being called just
2229     + * before free_pgtables() so clearing VM_MAYSHARE will not cause
2230     + * surprises later.
2231     + */
2232     + vma->vm_flags &= ~VM_MAYSHARE;
2233     mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2234     }
2235    
2236     @@ -2958,9 +2974,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2237     }
2238     }
2239     spin_unlock(&mm->page_table_lock);
2240     - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2241     -
2242     + /*
2243     + * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
2244     + * may have cleared our pud entry and done put_page on the page table:
2245     + * once we release i_mmap_mutex, another task can do the final put_page
2246     + * and that page table be reused and filled with junk.
2247     + */
2248     flush_tlb_range(vma, start, end);
2249     + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2250     }
2251    
2252     int hugetlb_reserve_pages(struct inode *inode,
2253     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2254     index 0de20d7..274c3cc 100644
2255     --- a/mm/memory-failure.c
2256     +++ b/mm/memory-failure.c
2257     @@ -1433,8 +1433,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
2258     /* Keep page count to indicate a given hugepage is isolated. */
2259    
2260     list_add(&hpage->lru, &pagelist);
2261     - ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
2262     - true);
2263     + ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
2264     + MIGRATE_SYNC);
2265     if (ret) {
2266     struct page *page1, *page2;
2267     list_for_each_entry_safe(page1, page2, &pagelist, lru)
2268     @@ -1563,7 +1563,7 @@ int soft_offline_page(struct page *page, int flags)
2269     page_is_file_cache(page));
2270     list_add(&page->lru, &pagelist);
2271     ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
2272     - 0, MIGRATE_SYNC);
2273     + false, MIGRATE_SYNC);
2274     if (ret) {
2275     putback_lru_pages(&pagelist);
2276     pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
2277     diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
2278     index 9a611d3..862b608 100644
2279     --- a/mm/mmu_notifier.c
2280     +++ b/mm/mmu_notifier.c
2281     @@ -33,6 +33,24 @@
2282     void __mmu_notifier_release(struct mm_struct *mm)
2283     {
2284     struct mmu_notifier *mn;
2285     + struct hlist_node *n;
2286     +
2287     + /*
2288     + * RCU here will block mmu_notifier_unregister until
2289     + * ->release returns.
2290     + */
2291     + rcu_read_lock();
2292     + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
2293     + /*
2294     + * if ->release runs before mmu_notifier_unregister it
2295     + * must be handled as it's the only way for the driver
2296     + * to flush all existing sptes and stop the driver
2297     + * from establishing any more sptes before all the
2298     + * pages in the mm are freed.
2299     + */
2300     + if (mn->ops->release)
2301     + mn->ops->release(mn, mm);
2302     + rcu_read_unlock();
2303    
2304     spin_lock(&mm->mmu_notifier_mm->lock);
2305     while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
2306     @@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
2307     * mmu_notifier_unregister to return.
2308     */
2309     hlist_del_init_rcu(&mn->hlist);
2310     - /*
2311     - * RCU here will block mmu_notifier_unregister until
2312     - * ->release returns.
2313     - */
2314     - rcu_read_lock();
2315     - spin_unlock(&mm->mmu_notifier_mm->lock);
2316     - /*
2317     - * if ->release runs before mmu_notifier_unregister it
2318     - * must be handled as it's the only way for the driver
2319     - * to flush all existing sptes and stop the driver
2320     - * from establishing any more sptes before all the
2321     - * pages in the mm are freed.
2322     - */
2323     - if (mn->ops->release)
2324     - mn->ops->release(mn, mm);
2325     - rcu_read_unlock();
2326     - spin_lock(&mm->mmu_notifier_mm->lock);
2327     }
2328     spin_unlock(&mm->mmu_notifier_mm->lock);
2329    
2330     @@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
2331     {
2332     BUG_ON(atomic_read(&mm->mm_count) <= 0);
2333    
2334     - spin_lock(&mm->mmu_notifier_mm->lock);
2335     if (!hlist_unhashed(&mn->hlist)) {
2336     - hlist_del_rcu(&mn->hlist);
2337     -
2338     /*
2339     * RCU here will force exit_mmap to wait ->release to finish
2340     * before freeing the pages.
2341     */
2342     rcu_read_lock();
2343     - spin_unlock(&mm->mmu_notifier_mm->lock);
2344     +
2345     /*
2346     * exit_mmap will block in mmu_notifier_release to
2347     * guarantee ->release is called before freeing the
2348     @@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
2349     if (mn->ops->release)
2350     mn->ops->release(mn, mm);
2351     rcu_read_unlock();
2352     - } else
2353     +
2354     + spin_lock(&mm->mmu_notifier_mm->lock);
2355     + hlist_del_rcu(&mn->hlist);
2356     spin_unlock(&mm->mmu_notifier_mm->lock);
2357     + }
2358    
2359     /*
2360     * Wait any running method to finish, of course including
2361     diff --git a/net/core/dev.c b/net/core/dev.c
2362     index 533c586..c299416 100644
2363     --- a/net/core/dev.c
2364     +++ b/net/core/dev.c
2365     @@ -1173,6 +1173,7 @@ static int __dev_open(struct net_device *dev)
2366     net_dmaengine_get();
2367     dev_set_rx_mode(dev);
2368     dev_activate(dev);
2369     + add_device_randomness(dev->dev_addr, dev->addr_len);
2370     }
2371    
2372     return ret;
2373     @@ -4765,6 +4766,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2374     err = ops->ndo_set_mac_address(dev, sa);
2375     if (!err)
2376     call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2377     + add_device_randomness(dev->dev_addr, dev->addr_len);
2378     return err;
2379     }
2380     EXPORT_SYMBOL(dev_set_mac_address);
2381     @@ -5543,6 +5545,7 @@ int register_netdevice(struct net_device *dev)
2382     dev_init_scheduler(dev);
2383     dev_hold(dev);
2384     list_netdevice(dev);
2385     + add_device_randomness(dev->dev_addr, dev->addr_len);
2386    
2387     /* Notify protocols, that a new device appeared. */
2388     ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
2389     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2390     index b8052ba..900fc61 100644
2391     --- a/net/core/rtnetlink.c
2392     +++ b/net/core/rtnetlink.c
2393     @@ -1376,6 +1376,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
2394     goto errout;
2395     send_addr_notify = 1;
2396     modified = 1;
2397     + add_device_randomness(dev->dev_addr, dev->addr_len);
2398     }
2399    
2400     if (tb[IFLA_MTU]) {
2401     diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
2402     index e5fbb7c..e80fa33 100644
2403     --- a/net/mac80211/mesh.c
2404     +++ b/net/mac80211/mesh.c
2405     @@ -595,6 +595,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
2406    
2407     del_timer_sync(&sdata->u.mesh.housekeeping_timer);
2408     del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
2409     + del_timer_sync(&sdata->u.mesh.mesh_path_timer);
2410     /*
2411     * If the timer fired while we waited for it, it will have
2412     * requeued the work. Now the work will be running again
2413     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2414     index 25302c8..57f2731 100644
2415     --- a/net/sunrpc/clnt.c
2416     +++ b/net/sunrpc/clnt.c
2417     @@ -1846,12 +1846,13 @@ call_timeout(struct rpc_task *task)
2418     return;
2419     }
2420     if (RPC_IS_SOFT(task)) {
2421     - if (clnt->cl_chatty)
2422     + if (clnt->cl_chatty) {
2423     rcu_read_lock();
2424     printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
2425     clnt->cl_protname,
2426     rcu_dereference(clnt->cl_xprt)->servername);
2427     rcu_read_unlock();
2428     + }
2429     if (task->tk_flags & RPC_TASK_TIMEOUT)
2430     rpc_exit(task, -ETIMEDOUT);
2431     else
2432     diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
2433     index 4c38b33..4d53ad5 100644
2434     --- a/net/sunrpc/rpcb_clnt.c
2435     +++ b/net/sunrpc/rpcb_clnt.c
2436     @@ -251,7 +251,7 @@ static int rpcb_create_local_unix(struct net *net)
2437     if (IS_ERR(clnt)) {
2438     dprintk("RPC: failed to create AF_LOCAL rpcbind "
2439     "client (errno %ld).\n", PTR_ERR(clnt));
2440     - result = -PTR_ERR(clnt);
2441     + result = PTR_ERR(clnt);
2442     goto out;
2443     }
2444    
2445     @@ -298,7 +298,7 @@ static int rpcb_create_local_net(struct net *net)
2446     if (IS_ERR(clnt)) {
2447     dprintk("RPC: failed to create local rpcbind "
2448     "client (errno %ld).\n", PTR_ERR(clnt));
2449     - result = -PTR_ERR(clnt);
2450     + result = PTR_ERR(clnt);
2451     goto out;
2452     }
2453    
2454     diff --git a/net/wireless/core.c b/net/wireless/core.c
2455     index ccdfed8..bb5302d 100644
2456     --- a/net/wireless/core.c
2457     +++ b/net/wireless/core.c
2458     @@ -975,6 +975,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
2459     */
2460     synchronize_rcu();
2461     INIT_LIST_HEAD(&wdev->list);
2462     + /*
2463     + * Ensure that all events have been processed and
2464     + * freed.
2465     + */
2466     + cfg80211_process_wdev_events(wdev);
2467     break;
2468     case NETDEV_PRE_UP:
2469     if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
2470     diff --git a/net/wireless/core.h b/net/wireless/core.h
2471     index 3ac2dd0..ce5597c 100644
2472     --- a/net/wireless/core.h
2473     +++ b/net/wireless/core.h
2474     @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2475     struct net_device *dev, enum nl80211_iftype ntype,
2476     u32 *flags, struct vif_params *params);
2477     void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
2478     +void cfg80211_process_wdev_events(struct wireless_dev *wdev);
2479    
2480     int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
2481     struct wireless_dev *wdev,
2482     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2483     index baf5704..460af03 100644
2484     --- a/net/wireless/reg.c
2485     +++ b/net/wireless/reg.c
2486     @@ -891,7 +891,21 @@ static void handle_channel(struct wiphy *wiphy,
2487     chan->max_antenna_gain = min(chan->orig_mag,
2488     (int) MBI_TO_DBI(power_rule->max_antenna_gain));
2489     chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
2490     - chan->max_power = min(chan->max_power, chan->max_reg_power);
2491     + if (chan->orig_mpwr) {
2492     + /*
2493     + * Devices that have their own custom regulatory domain
2494     + * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
2495     + * passed country IE power settings.
2496     + */
2497     + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
2498     + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
2499     + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
2500     + chan->max_power = chan->max_reg_power;
2501     + else
2502     + chan->max_power = min(chan->orig_mpwr,
2503     + chan->max_reg_power);
2504     + } else
2505     + chan->max_power = chan->max_reg_power;
2506     }
2507    
2508     static void handle_band(struct wiphy *wiphy,
2509     diff --git a/net/wireless/util.c b/net/wireless/util.c
2510     index 0eb6cc0..d835377 100644
2511     --- a/net/wireless/util.c
2512     +++ b/net/wireless/util.c
2513     @@ -717,7 +717,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
2514     wdev->connect_keys = NULL;
2515     }
2516    
2517     -static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
2518     +void cfg80211_process_wdev_events(struct wireless_dev *wdev)
2519     {
2520     struct cfg80211_event *ev;
2521     unsigned long flags;
2522     @@ -974,6 +974,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
2523     }
2524     mutex_unlock(&rdev->devlist_mtx);
2525    
2526     + if (total == 1)
2527     + return 0;
2528     +
2529     for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
2530     const struct ieee80211_iface_combination *c;
2531     struct ieee80211_iface_limit *limits;
2532     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2533     index d906c5b..3897027 100644
2534     --- a/sound/pci/hda/patch_conexant.c
2535     +++ b/sound/pci/hda/patch_conexant.c
2536     @@ -2975,7 +2975,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2537     SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
2538     SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
2539     SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
2540     - SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
2541     SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2542     SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
2543     SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
2544     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2545     index 62e1627..152d91b 100644
2546     --- a/sound/pci/hda/patch_realtek.c
2547     +++ b/sound/pci/hda/patch_realtek.c
2548     @@ -6056,6 +6056,8 @@ static const struct alc_fixup alc269_fixups[] = {
2549     [ALC269_FIXUP_PCM_44K] = {
2550     .type = ALC_FIXUP_FUNC,
2551     .v.func = alc269_fixup_pcm_44k,
2552     + .chained = true,
2553     + .chain_id = ALC269_FIXUP_QUANTA_MUTE
2554     },
2555     [ALC269_FIXUP_STEREO_DMIC] = {
2556     .type = ALC_FIXUP_FUNC,
2557     @@ -6157,9 +6159,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2558     SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
2559     SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
2560     SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
2561     + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
2562     + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
2563     SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
2564     - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
2565     - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
2566     + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
2567     SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
2568    
2569     #if 0