Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.9/0102-3.9.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2178 - (hide annotations) (download)
Wed May 22 06:42:34 2013 UTC (10 years, 11 months ago) by niro
File size: 118026 byte(s)
-linux-3.9.1-3
1 niro 2178 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
2     index 7eb18c1..4f009c1 100644
3     --- a/arch/arm/include/asm/cmpxchg.h
4     +++ b/arch/arm/include/asm/cmpxchg.h
5     @@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
6     ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
7     atomic64_t, \
8     counter), \
9     - (unsigned long)(o), \
10     - (unsigned long)(n)))
11     + (unsigned long long)(o), \
12     + (unsigned long long)(n)))
13    
14     #define cmpxchg64_local(ptr, o, n) \
15     ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
16     local64_t, \
17     a), \
18     - (unsigned long)(o), \
19     - (unsigned long)(n)))
20     + (unsigned long long)(o), \
21     + (unsigned long long)(n)))
22    
23     #endif /* __LINUX_ARM_ARCH__ >= 6 */
24    
25     diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
26     index 3f30aa1..57344b7 100644
27     --- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
28     +++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
29     @@ -344,6 +344,7 @@
30     #define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
31     #define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
32     #define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
33     +#define EXYNOS5_ARM_L2_OPTION S5P_PMUREG(0x2608)
34     #define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
35     #define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
36     #define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
37     diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
38     index daebc1a..97d6885 100644
39     --- a/arch/arm/mach-exynos/pmu.c
40     +++ b/arch/arm/mach-exynos/pmu.c
41     @@ -228,6 +228,7 @@ static struct exynos_pmu_conf exynos5250_pmu_config[] = {
42     { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
43     { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
44     { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
45     + { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } },
46     { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
47     { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
48     { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
49     @@ -353,11 +354,9 @@ static void exynos5_init_pmu(void)
50    
51     /*
52     * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
53     - * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
54     */
55     tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
56     - tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
57     - EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
58     + tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
59     __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
60    
61     /*
62     diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
63     index 3a077df..9bc9f19 100644
64     --- a/arch/arm/mach-omap2/board-rx51-peripherals.c
65     +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
66     @@ -73,11 +73,11 @@
67     #define LIS302_IRQ1_GPIO 181
68     #define LIS302_IRQ2_GPIO 180 /* Not yet in use */
69    
70     -/* list all spi devices here */
71     +/* List all SPI devices here. Note that the list/probe order seems to matter! */
72     enum {
73     RX51_SPI_WL1251,
74     - RX51_SPI_MIPID, /* LCD panel */
75     RX51_SPI_TSC2005, /* Touch Controller */
76     + RX51_SPI_MIPID, /* LCD panel */
77     };
78    
79     static struct wl12xx_platform_data wl1251_pdata;
80     diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
81     index 113e282..1976900 100644
82     --- a/arch/parisc/Makefile
83     +++ b/arch/parisc/Makefile
84     @@ -23,26 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm
85     CHECKFLAGS += -D__hppa__=1
86     LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
87    
88     -MACHINE := $(shell uname -m)
89     -ifeq ($(MACHINE),parisc*)
90     -NATIVE := 1
91     -endif
92     -
93     ifdef CONFIG_64BIT
94     UTS_MACHINE := parisc64
95     CHECKFLAGS += -D__LP64__=1 -m64
96     -WIDTH := 64
97     +CC_ARCHES = hppa64
98     else # 32-bit
99     -WIDTH :=
100     +CC_ARCHES = hppa hppa2.0 hppa1.1
101     endif
102    
103     -# attempt to help out folks who are cross-compiling
104     -ifeq ($(NATIVE),1)
105     -CROSS_COMPILE := hppa$(WIDTH)-linux-
106     -else
107     - ifeq ($(CROSS_COMPILE),)
108     - CROSS_COMPILE := hppa$(WIDTH)-linux-gnu-
109     - endif
110     +ifneq ($(SUBARCH),$(UTS_MACHINE))
111     + ifeq ($(CROSS_COMPILE),)
112     + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
113     + CROSS_COMPILE := $(call cc-cross-prefix, \
114     + $(foreach a,$(CC_ARCHES), \
115     + $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
116     + endif
117     endif
118    
119     OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
120     diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
121     index f33201b..897bce4 100644
122     --- a/arch/parisc/kernel/entry.S
123     +++ b/arch/parisc/kernel/entry.S
124     @@ -444,9 +444,41 @@
125     L2_ptep \pgd,\pte,\index,\va,\fault
126     .endm
127    
128     + /* Acquire pa_dbit_lock lock. */
129     + .macro dbit_lock spc,tmp,tmp1
130     +#ifdef CONFIG_SMP
131     + cmpib,COND(=),n 0,\spc,2f
132     + load32 PA(pa_dbit_lock),\tmp
133     +1: LDCW 0(\tmp),\tmp1
134     + cmpib,COND(=) 0,\tmp1,1b
135     + nop
136     +2:
137     +#endif
138     + .endm
139     +
140     + /* Release pa_dbit_lock lock without reloading lock address. */
141     + .macro dbit_unlock0 spc,tmp
142     +#ifdef CONFIG_SMP
143     + or,COND(=) %r0,\spc,%r0
144     + stw \spc,0(\tmp)
145     +#endif
146     + .endm
147     +
148     + /* Release pa_dbit_lock lock. */
149     + .macro dbit_unlock1 spc,tmp
150     +#ifdef CONFIG_SMP
151     + load32 PA(pa_dbit_lock),\tmp
152     + dbit_unlock0 \spc,\tmp
153     +#endif
154     + .endm
155     +
156     /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
157     * don't needlessly dirty the cache line if it was already set */
158     - .macro update_ptep ptep,pte,tmp,tmp1
159     + .macro update_ptep spc,ptep,pte,tmp,tmp1
160     +#ifdef CONFIG_SMP
161     + or,COND(=) %r0,\spc,%r0
162     + LDREG 0(\ptep),\pte
163     +#endif
164     ldi _PAGE_ACCESSED,\tmp1
165     or \tmp1,\pte,\tmp
166     and,COND(<>) \tmp1,\pte,%r0
167     @@ -455,7 +487,11 @@
168    
169     /* Set the dirty bit (and accessed bit). No need to be
170     * clever, this is only used from the dirty fault */
171     - .macro update_dirty ptep,pte,tmp
172     + .macro update_dirty spc,ptep,pte,tmp
173     +#ifdef CONFIG_SMP
174     + or,COND(=) %r0,\spc,%r0
175     + LDREG 0(\ptep),\pte
176     +#endif
177     ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
178     or \tmp,\pte,\pte
179     STREG \pte,0(\ptep)
180     @@ -825,11 +861,6 @@ ENTRY(syscall_exit_rfi)
181     STREG %r19,PT_SR7(%r16)
182    
183     intr_return:
184     - /* NOTE: Need to enable interrupts incase we schedule. */
185     - ssm PSW_SM_I, %r0
186     -
187     -intr_check_resched:
188     -
189     /* check for reschedule */
190     mfctl %cr30,%r1
191     LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
192     @@ -856,6 +887,11 @@ intr_check_sig:
193     LDREG PT_IASQ1(%r16), %r20
194     cmpib,COND(=),n 0,%r20,intr_restore /* backward */
195    
196     + /* NOTE: We need to enable interrupts if we have to deliver
197     + * signals. We used to do this earlier but it caused kernel
198     + * stack overflows. */
199     + ssm PSW_SM_I, %r0
200     +
201     copy %r0, %r25 /* long in_syscall = 0 */
202     #ifdef CONFIG_64BIT
203     ldo -16(%r30),%r29 /* Reference param save area */
204     @@ -907,6 +943,10 @@ intr_do_resched:
205     cmpib,COND(=) 0, %r20, intr_do_preempt
206     nop
207    
208     + /* NOTE: We need to enable interrupts if we schedule. We used
209     + * to do this earlier but it caused kernel stack overflows. */
210     + ssm PSW_SM_I, %r0
211     +
212     #ifdef CONFIG_64BIT
213     ldo -16(%r30),%r29 /* Reference param save area */
214     #endif
215     @@ -1099,11 +1139,13 @@ dtlb_miss_20w:
216    
217     L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
218    
219     - update_ptep ptp,pte,t0,t1
220     + dbit_lock spc,t0,t1
221     + update_ptep spc,ptp,pte,t0,t1
222    
223     make_insert_tlb spc,pte,prot
224    
225     idtlbt pte,prot
226     + dbit_unlock1 spc,t0
227    
228     rfir
229     nop
230     @@ -1123,11 +1165,13 @@ nadtlb_miss_20w:
231    
232     L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
233    
234     - update_ptep ptp,pte,t0,t1
235     + dbit_lock spc,t0,t1
236     + update_ptep spc,ptp,pte,t0,t1
237    
238     make_insert_tlb spc,pte,prot
239    
240     idtlbt pte,prot
241     + dbit_unlock1 spc,t0
242    
243     rfir
244     nop
245     @@ -1149,7 +1193,8 @@ dtlb_miss_11:
246    
247     L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
248    
249     - update_ptep ptp,pte,t0,t1
250     + dbit_lock spc,t0,t1
251     + update_ptep spc,ptp,pte,t0,t1
252    
253     make_insert_tlb_11 spc,pte,prot
254    
255     @@ -1160,6 +1205,7 @@ dtlb_miss_11:
256     idtlbp prot,(%sr1,va)
257    
258     mtsp t0, %sr1 /* Restore sr1 */
259     + dbit_unlock1 spc,t0
260    
261     rfir
262     nop
263     @@ -1180,7 +1226,8 @@ nadtlb_miss_11:
264    
265     L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
266    
267     - update_ptep ptp,pte,t0,t1
268     + dbit_lock spc,t0,t1
269     + update_ptep spc,ptp,pte,t0,t1
270    
271     make_insert_tlb_11 spc,pte,prot
272    
273     @@ -1192,6 +1239,7 @@ nadtlb_miss_11:
274     idtlbp prot,(%sr1,va)
275    
276     mtsp t0, %sr1 /* Restore sr1 */
277     + dbit_unlock1 spc,t0
278    
279     rfir
280     nop
281     @@ -1212,13 +1260,15 @@ dtlb_miss_20:
282    
283     L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
284    
285     - update_ptep ptp,pte,t0,t1
286     + dbit_lock spc,t0,t1
287     + update_ptep spc,ptp,pte,t0,t1
288    
289     make_insert_tlb spc,pte,prot
290    
291     f_extend pte,t0
292    
293     idtlbt pte,prot
294     + dbit_unlock1 spc,t0
295    
296     rfir
297     nop
298     @@ -1238,13 +1288,15 @@ nadtlb_miss_20:
299    
300     L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
301    
302     - update_ptep ptp,pte,t0,t1
303     + dbit_lock spc,t0,t1
304     + update_ptep spc,ptp,pte,t0,t1
305    
306     make_insert_tlb spc,pte,prot
307    
308     f_extend pte,t0
309    
310     idtlbt pte,prot
311     + dbit_unlock1 spc,t0
312    
313     rfir
314     nop
315     @@ -1345,11 +1397,13 @@ itlb_miss_20w:
316    
317     L3_ptep ptp,pte,t0,va,itlb_fault
318    
319     - update_ptep ptp,pte,t0,t1
320     + dbit_lock spc,t0,t1
321     + update_ptep spc,ptp,pte,t0,t1
322    
323     make_insert_tlb spc,pte,prot
324    
325     iitlbt pte,prot
326     + dbit_unlock1 spc,t0
327    
328     rfir
329     nop
330     @@ -1367,11 +1421,13 @@ naitlb_miss_20w:
331    
332     L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
333    
334     - update_ptep ptp,pte,t0,t1
335     + dbit_lock spc,t0,t1
336     + update_ptep spc,ptp,pte,t0,t1
337    
338     make_insert_tlb spc,pte,prot
339    
340     iitlbt pte,prot
341     + dbit_unlock1 spc,t0
342    
343     rfir
344     nop
345     @@ -1393,7 +1449,8 @@ itlb_miss_11:
346    
347     L2_ptep ptp,pte,t0,va,itlb_fault
348    
349     - update_ptep ptp,pte,t0,t1
350     + dbit_lock spc,t0,t1
351     + update_ptep spc,ptp,pte,t0,t1
352    
353     make_insert_tlb_11 spc,pte,prot
354    
355     @@ -1404,6 +1461,7 @@ itlb_miss_11:
356     iitlbp prot,(%sr1,va)
357    
358     mtsp t0, %sr1 /* Restore sr1 */
359     + dbit_unlock1 spc,t0
360    
361     rfir
362     nop
363     @@ -1415,7 +1473,8 @@ naitlb_miss_11:
364    
365     L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
366    
367     - update_ptep ptp,pte,t0,t1
368     + dbit_lock spc,t0,t1
369     + update_ptep spc,ptp,pte,t0,t1
370    
371     make_insert_tlb_11 spc,pte,prot
372    
373     @@ -1426,6 +1485,7 @@ naitlb_miss_11:
374     iitlbp prot,(%sr1,va)
375    
376     mtsp t0, %sr1 /* Restore sr1 */
377     + dbit_unlock1 spc,t0
378    
379     rfir
380     nop
381     @@ -1447,13 +1507,15 @@ itlb_miss_20:
382    
383     L2_ptep ptp,pte,t0,va,itlb_fault
384    
385     - update_ptep ptp,pte,t0,t1
386     + dbit_lock spc,t0,t1
387     + update_ptep spc,ptp,pte,t0,t1
388    
389     make_insert_tlb spc,pte,prot
390    
391     f_extend pte,t0
392    
393     iitlbt pte,prot
394     + dbit_unlock1 spc,t0
395    
396     rfir
397     nop
398     @@ -1465,13 +1527,15 @@ naitlb_miss_20:
399    
400     L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
401    
402     - update_ptep ptp,pte,t0,t1
403     + dbit_lock spc,t0,t1
404     + update_ptep spc,ptp,pte,t0,t1
405    
406     make_insert_tlb spc,pte,prot
407    
408     f_extend pte,t0
409    
410     iitlbt pte,prot
411     + dbit_unlock1 spc,t0
412    
413     rfir
414     nop
415     @@ -1495,29 +1559,13 @@ dbit_trap_20w:
416    
417     L3_ptep ptp,pte,t0,va,dbit_fault
418    
419     -#ifdef CONFIG_SMP
420     - cmpib,COND(=),n 0,spc,dbit_nolock_20w
421     - load32 PA(pa_dbit_lock),t0
422     -
423     -dbit_spin_20w:
424     - LDCW 0(t0),t1
425     - cmpib,COND(=) 0,t1,dbit_spin_20w
426     - nop
427     -
428     -dbit_nolock_20w:
429     -#endif
430     - update_dirty ptp,pte,t1
431     + dbit_lock spc,t0,t1
432     + update_dirty spc,ptp,pte,t1
433    
434     make_insert_tlb spc,pte,prot
435    
436     idtlbt pte,prot
437     -#ifdef CONFIG_SMP
438     - cmpib,COND(=),n 0,spc,dbit_nounlock_20w
439     - ldi 1,t1
440     - stw t1,0(t0)
441     -
442     -dbit_nounlock_20w:
443     -#endif
444     + dbit_unlock0 spc,t0
445    
446     rfir
447     nop
448     @@ -1531,18 +1579,8 @@ dbit_trap_11:
449    
450     L2_ptep ptp,pte,t0,va,dbit_fault
451    
452     -#ifdef CONFIG_SMP
453     - cmpib,COND(=),n 0,spc,dbit_nolock_11
454     - load32 PA(pa_dbit_lock),t0
455     -
456     -dbit_spin_11:
457     - LDCW 0(t0),t1
458     - cmpib,= 0,t1,dbit_spin_11
459     - nop
460     -
461     -dbit_nolock_11:
462     -#endif
463     - update_dirty ptp,pte,t1
464     + dbit_lock spc,t0,t1
465     + update_dirty spc,ptp,pte,t1
466    
467     make_insert_tlb_11 spc,pte,prot
468    
469     @@ -1553,13 +1591,7 @@ dbit_nolock_11:
470     idtlbp prot,(%sr1,va)
471    
472     mtsp t1, %sr1 /* Restore sr1 */
473     -#ifdef CONFIG_SMP
474     - cmpib,COND(=),n 0,spc,dbit_nounlock_11
475     - ldi 1,t1
476     - stw t1,0(t0)
477     -
478     -dbit_nounlock_11:
479     -#endif
480     + dbit_unlock0 spc,t0
481    
482     rfir
483     nop
484     @@ -1571,32 +1603,15 @@ dbit_trap_20:
485    
486     L2_ptep ptp,pte,t0,va,dbit_fault
487    
488     -#ifdef CONFIG_SMP
489     - cmpib,COND(=),n 0,spc,dbit_nolock_20
490     - load32 PA(pa_dbit_lock),t0
491     -
492     -dbit_spin_20:
493     - LDCW 0(t0),t1
494     - cmpib,= 0,t1,dbit_spin_20
495     - nop
496     -
497     -dbit_nolock_20:
498     -#endif
499     - update_dirty ptp,pte,t1
500     + dbit_lock spc,t0,t1
501     + update_dirty spc,ptp,pte,t1
502    
503     make_insert_tlb spc,pte,prot
504    
505     f_extend pte,t1
506    
507     idtlbt pte,prot
508     -
509     -#ifdef CONFIG_SMP
510     - cmpib,COND(=),n 0,spc,dbit_nounlock_20
511     - ldi 1,t1
512     - stw t1,0(t0)
513     -
514     -dbit_nounlock_20:
515     -#endif
516     + dbit_unlock0 spc,t0
517    
518     rfir
519     nop
520     @@ -1694,7 +1709,8 @@ ENTRY(sys_\name\()_wrapper)
521     ldo TASK_REGS(%r1),%r1
522     reg_save %r1
523     mfctl %cr27, %r28
524     - b sys_\name
525     + ldil L%sys_\name, %r31
526     + be R%sys_\name(%sr4,%r31)
527     STREG %r28, PT_CR27(%r1)
528     ENDPROC(sys_\name\()_wrapper)
529     .endm
530     diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
531     index aef00c6..ee38f29 100644
532     --- a/arch/powerpc/include/asm/rtas.h
533     +++ b/arch/powerpc/include/asm/rtas.h
534     @@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsigned short hex);
535     extern void rtas_initialize(void);
536     extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
537     extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
538     +extern int rtas_online_cpus_mask(cpumask_var_t cpus);
539     +extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
540     extern int rtas_ibm_suspend_me(struct rtas_args *);
541    
542     struct rtc_time;
543     diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
544     index 466a290..611acdf 100644
545     --- a/arch/powerpc/kernel/machine_kexec_64.c
546     +++ b/arch/powerpc/kernel/machine_kexec_64.c
547     @@ -17,6 +17,7 @@
548     #include <linux/errno.h>
549     #include <linux/kernel.h>
550     #include <linux/cpu.h>
551     +#include <linux/hardirq.h>
552    
553     #include <asm/page.h>
554     #include <asm/current.h>
555     @@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
556     pr_debug("kexec: Starting switchover sequence.\n");
557    
558     /* switch to a staticly allocated stack. Based on irq stack code.
559     + * We setup preempt_count to avoid using VMX in memcpy.
560     * XXX: the task struct will likely be invalid once we do the copy!
561     */
562     kexec_stack.thread_info.task = current_thread_info()->task;
563     kexec_stack.thread_info.flags = 0;
564     + kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
565     + kexec_stack.thread_info.cpu = current_thread_info()->cpu;
566    
567     /* We need a static PACA, too; copy this CPU's PACA over and switch to
568     * it. Also poison per_cpu_offset to catch anyone using non-static
569     diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
570     index 1fd6e7b..52add6f 100644
571     --- a/arch/powerpc/kernel/rtas.c
572     +++ b/arch/powerpc/kernel/rtas.c
573     @@ -19,6 +19,7 @@
574     #include <linux/init.h>
575     #include <linux/capability.h>
576     #include <linux/delay.h>
577     +#include <linux/cpu.h>
578     #include <linux/smp.h>
579     #include <linux/completion.h>
580     #include <linux/cpumask.h>
581     @@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
582     __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
583     }
584    
585     +enum rtas_cpu_state {
586     + DOWN,
587     + UP,
588     +};
589     +
590     +#ifndef CONFIG_SMP
591     +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
592     + cpumask_var_t cpus)
593     +{
594     + if (!cpumask_empty(cpus)) {
595     + cpumask_clear(cpus);
596     + return -EINVAL;
597     + } else
598     + return 0;
599     +}
600     +#else
601     +/* On return cpumask will be altered to indicate CPUs changed.
602     + * CPUs with states changed will be set in the mask,
603     + * CPUs with status unchanged will be unset in the mask. */
604     +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
605     + cpumask_var_t cpus)
606     +{
607     + int cpu;
608     + int cpuret = 0;
609     + int ret = 0;
610     +
611     + if (cpumask_empty(cpus))
612     + return 0;
613     +
614     + for_each_cpu(cpu, cpus) {
615     + switch (state) {
616     + case DOWN:
617     + cpuret = cpu_down(cpu);
618     + break;
619     + case UP:
620     + cpuret = cpu_up(cpu);
621     + break;
622     + }
623     + if (cpuret) {
624     + pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
625     + __func__,
626     + ((state == UP) ? "up" : "down"),
627     + cpu, cpuret);
628     + if (!ret)
629     + ret = cpuret;
630     + if (state == UP) {
631     + /* clear bits for unchanged cpus, return */
632     + cpumask_shift_right(cpus, cpus, cpu);
633     + cpumask_shift_left(cpus, cpus, cpu);
634     + break;
635     + } else {
636     + /* clear bit for unchanged cpu, continue */
637     + cpumask_clear_cpu(cpu, cpus);
638     + }
639     + }
640     + }
641     +
642     + return ret;
643     +}
644     +#endif
645     +
646     +int rtas_online_cpus_mask(cpumask_var_t cpus)
647     +{
648     + int ret;
649     +
650     + ret = rtas_cpu_state_change_mask(UP, cpus);
651     +
652     + if (ret) {
653     + cpumask_var_t tmp_mask;
654     +
655     + if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
656     + return ret;
657     +
658     + /* Use tmp_mask to preserve cpus mask from first failure */
659     + cpumask_copy(tmp_mask, cpus);
660     + rtas_offline_cpus_mask(tmp_mask);
661     + free_cpumask_var(tmp_mask);
662     + }
663     +
664     + return ret;
665     +}
666     +EXPORT_SYMBOL(rtas_online_cpus_mask);
667     +
668     +int rtas_offline_cpus_mask(cpumask_var_t cpus)
669     +{
670     + return rtas_cpu_state_change_mask(DOWN, cpus);
671     +}
672     +EXPORT_SYMBOL(rtas_offline_cpus_mask);
673     +
674     int rtas_ibm_suspend_me(struct rtas_args *args)
675     {
676     long state;
677     @@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
678     unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
679     struct rtas_suspend_me_data data;
680     DECLARE_COMPLETION_ONSTACK(done);
681     + cpumask_var_t offline_mask;
682     + int cpuret;
683    
684     if (!rtas_service_present("ibm,suspend-me"))
685     return -ENOSYS;
686     @@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
687     return 0;
688     }
689    
690     + if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
691     + return -ENOMEM;
692     +
693     atomic_set(&data.working, 0);
694     atomic_set(&data.done, 0);
695     atomic_set(&data.error, 0);
696     data.token = rtas_token("ibm,suspend-me");
697     data.complete = &done;
698     +
699     + /* All present CPUs must be online */
700     + cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
701     + cpuret = rtas_online_cpus_mask(offline_mask);
702     + if (cpuret) {
703     + pr_err("%s: Could not bring present CPUs online.\n", __func__);
704     + atomic_set(&data.error, cpuret);
705     + goto out;
706     + }
707     +
708     stop_topology_update();
709    
710     /* Call function on all CPUs. One of us will make the
711     @@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
712    
713     start_topology_update();
714    
715     + /* Take down CPUs not online prior to suspend */
716     + cpuret = rtas_offline_cpus_mask(offline_mask);
717     + if (cpuret)
718     + pr_warn("%s: Could not restore CPUs to offline state.\n",
719     + __func__);
720     +
721     +out:
722     + free_cpumask_var(offline_mask);
723     return atomic_read(&data.error);
724     }
725     #else /* CONFIG_PPC_PSERIES */
726     diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
727     index 47226e0..5f997e7 100644
728     --- a/arch/powerpc/platforms/pseries/suspend.c
729     +++ b/arch/powerpc/platforms/pseries/suspend.c
730     @@ -16,6 +16,7 @@
731     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
732     */
733    
734     +#include <linux/cpu.h>
735     #include <linux/delay.h>
736     #include <linux/suspend.h>
737     #include <linux/stat.h>
738     @@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
739     struct device_attribute *attr,
740     const char *buf, size_t count)
741     {
742     + cpumask_var_t offline_mask;
743     int rc;
744    
745     if (!capable(CAP_SYS_ADMIN))
746     return -EPERM;
747    
748     + if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
749     + return -ENOMEM;
750     +
751     stream_id = simple_strtoul(buf, NULL, 16);
752    
753     do {
754     @@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
755     } while (rc == -EAGAIN);
756    
757     if (!rc) {
758     + /* All present CPUs must be online */
759     + cpumask_andnot(offline_mask, cpu_present_mask,
760     + cpu_online_mask);
761     + rc = rtas_online_cpus_mask(offline_mask);
762     + if (rc) {
763     + pr_err("%s: Could not bring present CPUs online.\n",
764     + __func__);
765     + goto out;
766     + }
767     +
768     stop_topology_update();
769     rc = pm_suspend(PM_SUSPEND_MEM);
770     start_topology_update();
771     +
772     + /* Take down CPUs not online prior to suspend */
773     + if (!rtas_offline_cpus_mask(offline_mask))
774     + pr_warn("%s: Could not restore CPUs to offline "
775     + "state.\n", __func__);
776     }
777    
778     stream_id = 0;
779    
780     if (!rc)
781     rc = count;
782     +out:
783     + free_cpumask_var(offline_mask);
784     return rc;
785     }
786    
787     diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
788     index 25877ae..41a2a0b 100644
789     --- a/arch/tile/Kconfig
790     +++ b/arch/tile/Kconfig
791     @@ -368,11 +368,17 @@ config HARDWALL
792     config KERNEL_PL
793     int "Processor protection level for kernel"
794     range 1 2
795     - default "1"
796     + default 2 if TILEGX
797     + default 1 if !TILEGX
798     ---help---
799     - This setting determines the processor protection level the
800     - kernel will be built to run at. Generally you should use
801     - the default value here.
802     + Since MDE 4.2, the Tilera hypervisor runs the kernel
803     + at PL2 by default. If running under an older hypervisor,
804     + or as a KVM guest, you must run at PL1. (The current
805     + hypervisor may also be recompiled with "make HV_PL=2" to
806     + allow it to run a kernel at PL1, but clients running at PL1
807     + are not expected to be supported indefinitely.)
808     +
809     + If you're not sure, don't change the default.
810    
811     source "arch/tile/gxio/Kconfig"
812    
813     diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
814     index ccd847e..837dca5 100644
815     --- a/arch/tile/include/hv/hypervisor.h
816     +++ b/arch/tile/include/hv/hypervisor.h
817     @@ -107,7 +107,22 @@
818     #define HV_DISPATCH_ENTRY_SIZE 32
819    
820     /** Version of the hypervisor interface defined by this file */
821     -#define _HV_VERSION 11
822     +#define _HV_VERSION 13
823     +
824     +/** Last version of the hypervisor interface with old hv_init() ABI.
825     + *
826     + * The change from version 12 to version 13 corresponds to launching
827     + * the client by default at PL2 instead of PL1 (corresponding to the
828     + * hv itself running at PL3 instead of PL2). To make this explicit,
829     + * the hv_init() API was also extended so the client can report its
830     + * desired PL, resulting in a more helpful failure diagnostic. If you
831     + * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
832     + * argument, the hypervisor will assume client_pl = 1.
833     + *
834     + * Note that this is a deprecated solution and we do not expect to
835     + * support clients of the Tilera hypervisor running at PL1 indefinitely.
836     + */
837     +#define _HV_VERSION_OLD_HV_INIT 12
838    
839     /* Index into hypervisor interface dispatch code blocks.
840     *
841     @@ -377,7 +392,11 @@ typedef int HV_Errno;
842     #ifndef __ASSEMBLER__
843    
844     /** Pass HV_VERSION to hv_init to request this version of the interface. */
845     -typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
846     +typedef enum {
847     + HV_VERSION = _HV_VERSION,
848     + HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
849     +
850     +} HV_VersionNumber;
851    
852     /** Initializes the hypervisor.
853     *
854     @@ -385,9 +404,11 @@ typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
855     * that this program expects, typically HV_VERSION.
856     * @param chip_num Architecture number of the chip the client was built for.
857     * @param chip_rev_num Revision number of the chip the client was built for.
858     + * @param client_pl Privilege level the client is built for
859     + * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
860     */
861     void hv_init(HV_VersionNumber interface_version_number,
862     - int chip_num, int chip_rev_num);
863     + int chip_num, int chip_rev_num, int client_pl);
864    
865    
866     /** Queries we can make for hv_sysconf().
867     diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
868     index f71bfee..ac11530 100644
869     --- a/arch/tile/kernel/head_32.S
870     +++ b/arch/tile/kernel/head_32.S
871     @@ -38,7 +38,7 @@ ENTRY(_start)
872     movei r2, TILE_CHIP_REV
873     }
874     {
875     - moveli r0, _HV_VERSION
876     + moveli r0, _HV_VERSION_OLD_HV_INIT
877     jal hv_init
878     }
879     /* Get a reasonable default ASID in r0 */
880     diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
881     index f9a2734..6093964 100644
882     --- a/arch/tile/kernel/head_64.S
883     +++ b/arch/tile/kernel/head_64.S
884     @@ -34,13 +34,19 @@
885     ENTRY(_start)
886     /* Notify the hypervisor of what version of the API we want */
887     {
888     +#if KERNEL_PL == 1 && _HV_VERSION == 13
889     + /* Support older hypervisors by asking for API version 12. */
890     + movei r0, _HV_VERSION_OLD_HV_INIT
891     +#else
892     + movei r0, _HV_VERSION
893     +#endif
894     movei r1, TILE_CHIP
895     - movei r2, TILE_CHIP_REV
896     }
897     {
898     - moveli r0, _HV_VERSION
899     - jal hv_init
900     + movei r2, TILE_CHIP_REV
901     + movei r3, KERNEL_PL
902     }
903     + jal hv_init
904     /* Get a reasonable default ASID in r0 */
905     {
906     move r0, zero
907     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
908     index 15b5cef..6ef2a37 100644
909     --- a/arch/x86/Kconfig
910     +++ b/arch/x86/Kconfig
911     @@ -107,7 +107,6 @@ config X86
912     select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
913     select GENERIC_TIME_VSYSCALL if X86_64
914     select KTIME_SCALAR if X86_32
915     - select ALWAYS_USE_PERSISTENT_CLOCK
916     select GENERIC_STRNCPY_FROM_USER
917     select GENERIC_STRNLEN_USER
918     select HAVE_CONTEXT_TRACKING if X86_64
919     diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
920     index d893e8e..2e9e128 100644
921     --- a/arch/x86/kernel/microcode_intel_early.c
922     +++ b/arch/x86/kernel/microcode_intel_early.c
923     @@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
924     #endif
925    
926     #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
927     +static DEFINE_MUTEX(x86_cpu_microcode_mutex);
928     /*
929     * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
930     * hot added or resumes.
931     @@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
932     * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
933     * hotplug.
934     */
935     - cpu_hotplug_driver_lock();
936     + mutex_lock(&x86_cpu_microcode_mutex);
937    
938     mc_saved_count_init = mc_saved_data.mc_saved_count;
939     mc_saved_count = mc_saved_data.mc_saved_count;
940     @@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
941     }
942    
943     out:
944     - cpu_hotplug_driver_unlock();
945     + mutex_unlock(&x86_cpu_microcode_mutex);
946    
947     return ret;
948     }
949     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
950     index a9c9d3e..59622c9 100644
951     --- a/arch/x86/kvm/emulate.c
952     +++ b/arch/x86/kvm/emulate.c
953     @@ -60,6 +60,7 @@
954     #define OpGS 25ull /* GS */
955     #define OpMem8 26ull /* 8-bit zero extended memory operand */
956     #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
957     +#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
958    
959     #define OpBits 5 /* Width of operand field */
960     #define OpMask ((1ull << OpBits) - 1)
961     @@ -99,6 +100,7 @@
962     #define SrcImmUByte (OpImmUByte << SrcShift)
963     #define SrcImmU (OpImmU << SrcShift)
964     #define SrcSI (OpSI << SrcShift)
965     +#define SrcXLat (OpXLat << SrcShift)
966     #define SrcImmFAddr (OpImmFAddr << SrcShift)
967     #define SrcMemFAddr (OpMemFAddr << SrcShift)
968     #define SrcAcc (OpAcc << SrcShift)
969     @@ -532,6 +534,9 @@ FOP_SETCC(setle)
970     FOP_SETCC(setnle)
971     FOP_END;
972    
973     +FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
974     +FOP_END;
975     +
976     #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
977     do { \
978     unsigned long _tmp; \
979     @@ -2986,6 +2991,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
980     return X86EMUL_CONTINUE;
981     }
982    
983     +static int em_aam(struct x86_emulate_ctxt *ctxt)
984     +{
985     + u8 al, ah;
986     +
987     + if (ctxt->src.val == 0)
988     + return emulate_de(ctxt);
989     +
990     + al = ctxt->dst.val & 0xff;
991     + ah = al / ctxt->src.val;
992     + al %= ctxt->src.val;
993     +
994     + ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
995     +
996     + /* Set PF, ZF, SF */
997     + ctxt->src.type = OP_IMM;
998     + ctxt->src.val = 0;
999     + ctxt->src.bytes = 1;
1000     + fastop(ctxt, em_or);
1001     +
1002     + return X86EMUL_CONTINUE;
1003     +}
1004     +
1005     static int em_aad(struct x86_emulate_ctxt *ctxt)
1006     {
1007     u8 al = ctxt->dst.val & 0xff;
1008     @@ -3926,7 +3953,10 @@ static const struct opcode opcode_table[256] = {
1009     /* 0xD0 - 0xD7 */
1010     G(Src2One | ByteOp, group2), G(Src2One, group2),
1011     G(Src2CL | ByteOp, group2), G(Src2CL, group2),
1012     - N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
1013     + I(DstAcc | SrcImmUByte | No64, em_aam),
1014     + I(DstAcc | SrcImmUByte | No64, em_aad),
1015     + F(DstAcc | ByteOp | No64, em_salc),
1016     + I(DstAcc | SrcXLat | ByteOp, em_mov),
1017     /* 0xD8 - 0xDF */
1018     N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
1019     /* 0xE0 - 0xE7 */
1020     @@ -4188,6 +4218,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
1021     op->val = 0;
1022     op->count = 1;
1023     break;
1024     + case OpXLat:
1025     + op->type = OP_MEM;
1026     + op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1027     + op->addr.mem.ea =
1028     + register_address(ctxt,
1029     + reg_read(ctxt, VCPU_REGS_RBX) +
1030     + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
1031     + op->addr.mem.seg = seg_override(ctxt);
1032     + op->val = 0;
1033     + break;
1034     case OpImmFAddr:
1035     op->type = OP_IMM;
1036     op->addr.mem.ea = ctxt->_eip;
1037     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1038     index 6667042..0af1807 100644
1039     --- a/arch/x86/kvm/vmx.c
1040     +++ b/arch/x86/kvm/vmx.c
1041     @@ -5197,6 +5197,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
1042     return 0;
1043     }
1044    
1045     + if (vcpu->arch.halt_request) {
1046     + vcpu->arch.halt_request = 0;
1047     + ret = kvm_emulate_halt(vcpu);
1048     + goto out;
1049     + }
1050     +
1051     if (signal_pending(current))
1052     goto out;
1053     if (need_resched())
1054     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1055     index 2363127..cf95e19 100644
1056     --- a/arch/x86/xen/enlighten.c
1057     +++ b/arch/x86/xen/enlighten.c
1058     @@ -156,6 +156,21 @@ static void xen_vcpu_setup(int cpu)
1059    
1060     BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
1061    
1062     + /*
1063     + * This path is called twice on PVHVM - first during bootup via
1064     + * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
1065     + * hotplugged: cpu_up -> xen_hvm_cpu_notify.
1066     + * As we can only do the VCPUOP_register_vcpu_info once lets
1067     + * not over-write its result.
1068     + *
1069     + * For PV it is called during restore (xen_vcpu_restore) and bootup
1070     + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
1071     + * use this function.
1072     + */
1073     + if (xen_hvm_domain()) {
1074     + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
1075     + return;
1076     + }
1077     if (cpu < MAX_VIRT_CPUS)
1078     per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1079    
1080     diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
1081     index ec7f569..c84ee95 100644
1082     --- a/drivers/acpi/acpica/exfldio.c
1083     +++ b/drivers/acpi/acpica/exfldio.c
1084     @@ -720,7 +720,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
1085    
1086     if ((obj_desc->common_field.start_field_bit_offset == 0) &&
1087     (obj_desc->common_field.bit_length == access_bit_width)) {
1088     - status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
1089     + if (buffer_length >= sizeof(u64)) {
1090     + status =
1091     + acpi_ex_field_datum_io(obj_desc, 0, buffer,
1092     + ACPI_READ);
1093     + } else {
1094     + /* Use raw_datum (u64) to handle buffers < 64 bits */
1095     +
1096     + status =
1097     + acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
1098     + ACPI_READ);
1099     + ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
1100     + }
1101     +
1102     return_ACPI_STATUS(status);
1103     }
1104    
1105     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1106     index d45b287..edc0081 100644
1107     --- a/drivers/acpi/ec.c
1108     +++ b/drivers/acpi/ec.c
1109     @@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
1110     static int ec_poll(struct acpi_ec *ec)
1111     {
1112     unsigned long flags;
1113     - int repeat = 2; /* number of command restarts */
1114     + int repeat = 5; /* number of command restarts */
1115     while (repeat--) {
1116     unsigned long delay = jiffies +
1117     msecs_to_jiffies(ec_delay);
1118     @@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
1119     }
1120     advance_transaction(ec, acpi_ec_read_status(ec));
1121     } while (time_before(jiffies, delay));
1122     - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
1123     - break;
1124     pr_debug(PREFIX "controller reset, restart transaction\n");
1125     spin_lock_irqsave(&ec->lock, flags);
1126     start_transaction(ec);
1127     diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
1128     index e98da67..54d03d4 100644
1129     --- a/drivers/block/drbd/drbd_main.c
1130     +++ b/drivers/block/drbd/drbd_main.c
1131     @@ -2795,6 +2795,7 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
1132     blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1133     blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1134    
1135     + kfree(ldev->disk_conf);
1136     kfree(ldev);
1137     }
1138    
1139     diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
1140     index a9eccfc..2f5fffd 100644
1141     --- a/drivers/block/drbd/drbd_receiver.c
1142     +++ b/drivers/block/drbd/drbd_receiver.c
1143     @@ -2661,7 +2661,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
1144     if (hg == -1 && mdev->state.role == R_PRIMARY) {
1145     enum drbd_state_rv rv2;
1146    
1147     - drbd_set_role(mdev, R_SECONDARY, 0);
1148     /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
1149     * we might be here in C_WF_REPORT_PARAMS which is transient.
1150     * we do not need to wait for the after state change work either. */
1151     @@ -4659,8 +4658,8 @@ static int drbd_do_features(struct drbd_tconn *tconn)
1152     #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
1153     static int drbd_do_auth(struct drbd_tconn *tconn)
1154     {
1155     - dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
1156     - dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
1157     + conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
1158     + conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
1159     return -1;
1160     }
1161     #else
1162     diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
1163     index cdd4c09f..a22a7a5 100644
1164     --- a/drivers/char/ipmi/ipmi_bt_sm.c
1165     +++ b/drivers/char/ipmi/ipmi_bt_sm.c
1166     @@ -95,9 +95,9 @@ struct si_sm_data {
1167     enum bt_states state;
1168     unsigned char seq; /* BT sequence number */
1169     struct si_sm_io *io;
1170     - unsigned char write_data[IPMI_MAX_MSG_LENGTH];
1171     + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
1172     int write_count;
1173     - unsigned char read_data[IPMI_MAX_MSG_LENGTH];
1174     + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
1175     int read_count;
1176     int truncated;
1177     long timeout; /* microseconds countdown */
1178     diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
1179     index 9eb360f..d5a5f02 100644
1180     --- a/drivers/char/ipmi/ipmi_devintf.c
1181     +++ b/drivers/char/ipmi/ipmi_devintf.c
1182     @@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
1183     return ipmi_ioctl(filep, cmd, arg);
1184     }
1185     }
1186     +
1187     +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
1188     + unsigned long arg)
1189     +{
1190     + int ret;
1191     +
1192     + mutex_lock(&ipmi_mutex);
1193     + ret = compat_ipmi_ioctl(filep, cmd, arg);
1194     + mutex_unlock(&ipmi_mutex);
1195     +
1196     + return ret;
1197     +}
1198     #endif
1199    
1200     static const struct file_operations ipmi_fops = {
1201     .owner = THIS_MODULE,
1202     .unlocked_ioctl = ipmi_unlocked_ioctl,
1203     #ifdef CONFIG_COMPAT
1204     - .compat_ioctl = compat_ipmi_ioctl,
1205     + .compat_ioctl = unlocked_compat_ipmi_ioctl,
1206     #endif
1207     .open = ipmi_open,
1208     .release = ipmi_release,
1209     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1210     index 6133ef5..d8a8c9b 100644
1211     --- a/drivers/cpufreq/intel_pstate.c
1212     +++ b/drivers/cpufreq/intel_pstate.c
1213     @@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
1214     }
1215    
1216     struct sample {
1217     - ktime_t start_time;
1218     - ktime_t end_time;
1219     int core_pct_busy;
1220     - int pstate_pct_busy;
1221     - u64 duration_us;
1222     - u64 idletime_us;
1223     u64 aperf;
1224     u64 mperf;
1225     int freq;
1226     @@ -91,8 +86,6 @@ struct cpudata {
1227     int min_pstate_count;
1228     int idle_mode;
1229    
1230     - ktime_t prev_sample;
1231     - u64 prev_idle_time_us;
1232     u64 prev_aperf;
1233     u64 prev_mperf;
1234     int sample_ptr;
1235     @@ -124,6 +117,8 @@ struct perf_limits {
1236     int min_perf_pct;
1237     int32_t max_perf;
1238     int32_t min_perf;
1239     + int max_policy_pct;
1240     + int max_sysfs_pct;
1241     };
1242    
1243     static struct perf_limits limits = {
1244     @@ -132,6 +127,8 @@ static struct perf_limits limits = {
1245     .max_perf = int_tofp(1),
1246     .min_perf_pct = 0,
1247     .min_perf = 0,
1248     + .max_policy_pct = 100,
1249     + .max_sysfs_pct = 100,
1250     };
1251    
1252     static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
1253     @@ -302,7 +299,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1254     if (ret != 1)
1255     return -EINVAL;
1256    
1257     - limits.max_perf_pct = clamp_t(int, input, 0 , 100);
1258     + limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
1259     + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1260     limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
1261     return count;
1262     }
1263     @@ -450,48 +448,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
1264     struct sample *sample)
1265     {
1266     u64 core_pct;
1267     - sample->pstate_pct_busy = 100 - div64_u64(
1268     - sample->idletime_us * 100,
1269     - sample->duration_us);
1270     core_pct = div64_u64(sample->aperf * 100, sample->mperf);
1271     sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
1272    
1273     - sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
1274     - 100);
1275     + sample->core_pct_busy = core_pct;
1276     }
1277    
1278     static inline void intel_pstate_sample(struct cpudata *cpu)
1279     {
1280     - ktime_t now;
1281     - u64 idle_time_us;
1282     u64 aperf, mperf;
1283    
1284     - now = ktime_get();
1285     - idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
1286     -
1287     rdmsrl(MSR_IA32_APERF, aperf);
1288     rdmsrl(MSR_IA32_MPERF, mperf);
1289     - /* for the first sample, don't actually record a sample, just
1290     - * set the baseline */
1291     - if (cpu->prev_idle_time_us > 0) {
1292     - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
1293     - cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
1294     - cpu->samples[cpu->sample_ptr].end_time = now;
1295     - cpu->samples[cpu->sample_ptr].duration_us =
1296     - ktime_us_delta(now, cpu->prev_sample);
1297     - cpu->samples[cpu->sample_ptr].idletime_us =
1298     - idle_time_us - cpu->prev_idle_time_us;
1299     -
1300     - cpu->samples[cpu->sample_ptr].aperf = aperf;
1301     - cpu->samples[cpu->sample_ptr].mperf = mperf;
1302     - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
1303     - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
1304     -
1305     - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
1306     - }
1307     + cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
1308     + cpu->samples[cpu->sample_ptr].aperf = aperf;
1309     + cpu->samples[cpu->sample_ptr].mperf = mperf;
1310     + cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
1311     + cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
1312     +
1313     + intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
1314    
1315     - cpu->prev_sample = now;
1316     - cpu->prev_idle_time_us = idle_time_us;
1317     cpu->prev_aperf = aperf;
1318     cpu->prev_mperf = mperf;
1319     }
1320     @@ -575,22 +551,16 @@ static void intel_pstate_timer_func(unsigned long __data)
1321     struct cpudata *cpu = (struct cpudata *) __data;
1322    
1323     intel_pstate_sample(cpu);
1324     + intel_pstate_adjust_busy_pstate(cpu);
1325    
1326     - if (!cpu->idle_mode)
1327     - intel_pstate_adjust_busy_pstate(cpu);
1328     - else
1329     - intel_pstate_adjust_idle_pstate(cpu);
1330     -
1331     -#if defined(XPERF_FIX)
1332     if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
1333     cpu->min_pstate_count++;
1334     if (!(cpu->min_pstate_count % 5)) {
1335     intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
1336     - intel_pstate_idle_mode(cpu);
1337     }
1338     } else
1339     cpu->min_pstate_count = 0;
1340     -#endif
1341     +
1342     intel_pstate_set_sample_time(cpu);
1343     }
1344    
1345     @@ -670,8 +640,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1346     limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
1347     limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
1348    
1349     - limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
1350     - limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
1351     + limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
1352     + limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1353     + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1354     limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
1355    
1356     if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
1357     diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
1358     index 69d04d2..09c7ad1 100644
1359     --- a/drivers/dma/of-dma.c
1360     +++ b/drivers/dma/of-dma.c
1361     @@ -93,6 +93,7 @@ int of_dma_controller_register(struct device_node *np,
1362     {
1363     struct of_dma *ofdma;
1364     int nbcells;
1365     + const __be32 *prop;
1366    
1367     if (!np || !of_dma_xlate) {
1368     pr_err("%s: not enough information provided\n", __func__);
1369     @@ -103,8 +104,11 @@ int of_dma_controller_register(struct device_node *np,
1370     if (!ofdma)
1371     return -ENOMEM;
1372    
1373     - nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
1374     - if (!nbcells) {
1375     + prop = of_get_property(np, "#dma-cells", NULL);
1376     + if (prop)
1377     + nbcells = be32_to_cpup(prop);
1378     +
1379     + if (!prop || !nbcells) {
1380     pr_err("%s: #dma-cells property is missing or invalid\n",
1381     __func__);
1382     kfree(ofdma);
1383     diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
1384     index d01faeb..ce3dc3e 100644
1385     --- a/drivers/dma/pch_dma.c
1386     +++ b/drivers/dma/pch_dma.c
1387     @@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
1388     dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
1389    
1390     if (!ret) {
1391     - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
1392     + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
1393     if (ret) {
1394     spin_lock(&pd_chan->lock);
1395     pd_chan->descs_allocated++;
1396     diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1397     index dd64a06..016c5d8 100644
1398     --- a/drivers/gpu/drm/drm_crtc.c
1399     +++ b/drivers/gpu/drm/drm_crtc.c
1400     @@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
1401     {
1402     struct drm_crtc *crtc;
1403    
1404     + /* Locking is currently fubar in the panic handler. */
1405     + if (oops_in_progress)
1406     + return;
1407     +
1408     list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
1409     WARN_ON(!mutex_is_locked(&crtc->mutex));
1410    
1411     diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
1412     index db1e2d6..07cf99c 100644
1413     --- a/drivers/gpu/drm/drm_mm.c
1414     +++ b/drivers/gpu/drm/drm_mm.c
1415     @@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
1416     EXPORT_SYMBOL(drm_mm_debug_table);
1417    
1418     #if defined(CONFIG_DEBUG_FS)
1419     -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
1420     +static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
1421     {
1422     - struct drm_mm_node *entry;
1423     - unsigned long total_used = 0, total_free = 0, total = 0;
1424     unsigned long hole_start, hole_end, hole_size;
1425    
1426     - hole_start = drm_mm_hole_node_start(&mm->head_node);
1427     - hole_end = drm_mm_hole_node_end(&mm->head_node);
1428     - hole_size = hole_end - hole_start;
1429     - if (hole_size)
1430     + if (entry->hole_follows) {
1431     + hole_start = drm_mm_hole_node_start(entry);
1432     + hole_end = drm_mm_hole_node_end(entry);
1433     + hole_size = hole_end - hole_start;
1434     seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
1435     hole_start, hole_end, hole_size);
1436     - total_free += hole_size;
1437     + return hole_size;
1438     + }
1439     +
1440     + return 0;
1441     +}
1442     +
1443     +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
1444     +{
1445     + struct drm_mm_node *entry;
1446     + unsigned long total_used = 0, total_free = 0, total = 0;
1447     +
1448     + total_free += drm_mm_dump_hole(m, &mm->head_node);
1449    
1450     drm_mm_for_each_node(entry, mm) {
1451     seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
1452     entry->start, entry->start + entry->size,
1453     entry->size);
1454     total_used += entry->size;
1455     - if (entry->hole_follows) {
1456     - hole_start = drm_mm_hole_node_start(entry);
1457     - hole_end = drm_mm_hole_node_end(entry);
1458     - hole_size = hole_end - hole_start;
1459     - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
1460     - hole_start, hole_end, hole_size);
1461     - total_free += hole_size;
1462     - }
1463     + total_free += drm_mm_dump_hole(m, entry);
1464     }
1465     total = total_free + total_used;
1466    
1467     diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
1468     index 981bdce..898832b 100644
1469     --- a/drivers/gpu/drm/i915/intel_fb.c
1470     +++ b/drivers/gpu/drm/i915/intel_fb.c
1471     @@ -261,10 +261,22 @@ void intel_fbdev_fini(struct drm_device *dev)
1472     void intel_fbdev_set_suspend(struct drm_device *dev, int state)
1473     {
1474     drm_i915_private_t *dev_priv = dev->dev_private;
1475     - if (!dev_priv->fbdev)
1476     + struct intel_fbdev *ifbdev = dev_priv->fbdev;
1477     + struct fb_info *info;
1478     +
1479     + if (!ifbdev)
1480     return;
1481    
1482     - fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
1483     + info = ifbdev->helper.fbdev;
1484     +
1485     + /* On resume from hibernation: If the object is shmemfs backed, it has
1486     + * been restored from swap. If the object is stolen however, it will be
1487     + * full of whatever garbage was left in there.
1488     + */
1489     + if (!state && ifbdev->ifb.obj->stolen)
1490     + memset_io(info->screen_base, 0, info->screen_size);
1491     +
1492     + fb_set_suspend(info, state);
1493     }
1494    
1495     MODULE_LICENSE("GPL and additional rights");
1496     diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1497     index 78d8e91..713dd70 100644
1498     --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1499     +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1500     @@ -189,12 +189,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
1501     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1502     tmp = RREG8(DAC_DATA);
1503     tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
1504     - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
1505     + WREG8(DAC_DATA, tmp);
1506    
1507     WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
1508     tmp = RREG8(DAC_DATA);
1509     tmp |= MGA1064_REMHEADCTL_CLKDIS;
1510     - WREG_DAC(MGA1064_REMHEADCTL, tmp);
1511     + WREG8(DAC_DATA, tmp);
1512    
1513     /* select PLL Set C */
1514     tmp = RREG8(MGAREG_MEM_MISC_READ);
1515     @@ -204,7 +204,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
1516     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1517     tmp = RREG8(DAC_DATA);
1518     tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
1519     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1520     + WREG8(DAC_DATA, tmp);
1521    
1522     udelay(500);
1523    
1524     @@ -212,7 +212,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
1525     WREG8(DAC_INDEX, MGA1064_VREF_CTL);
1526     tmp = RREG8(DAC_DATA);
1527     tmp &= ~0x04;
1528     - WREG_DAC(MGA1064_VREF_CTL, tmp);
1529     + WREG8(DAC_DATA, tmp);
1530    
1531     udelay(50);
1532    
1533     @@ -236,13 +236,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
1534     tmp = RREG8(DAC_DATA);
1535     tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
1536     tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
1537     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1538     + WREG8(DAC_DATA, tmp);
1539    
1540     WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
1541     tmp = RREG8(DAC_DATA);
1542     tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
1543     tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
1544     - WREG_DAC(MGA1064_REMHEADCTL, tmp);
1545     + WREG8(DAC_DATA, tmp);
1546    
1547     /* reset dotclock rate bit */
1548     WREG8(MGAREG_SEQ_INDEX, 1);
1549     @@ -253,7 +253,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
1550     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1551     tmp = RREG8(DAC_DATA);
1552     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
1553     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1554     + WREG8(DAC_DATA, tmp);
1555    
1556     vcount = RREG8(MGAREG_VCOUNT);
1557    
1558     @@ -318,7 +318,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
1559     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1560     tmp = RREG8(DAC_DATA);
1561     tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
1562     - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
1563     + WREG8(DAC_DATA, tmp);
1564    
1565     tmp = RREG8(MGAREG_MEM_MISC_READ);
1566     tmp |= 0x3 << 2;
1567     @@ -326,12 +326,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
1568    
1569     WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
1570     tmp = RREG8(DAC_DATA);
1571     - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
1572     + WREG8(DAC_DATA, tmp & ~0x40);
1573    
1574     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1575     tmp = RREG8(DAC_DATA);
1576     tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
1577     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1578     + WREG8(DAC_DATA, tmp);
1579    
1580     WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
1581     WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
1582     @@ -342,7 +342,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
1583     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1584     tmp = RREG8(DAC_DATA);
1585     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
1586     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1587     + WREG8(DAC_DATA, tmp);
1588    
1589     udelay(500);
1590    
1591     @@ -350,11 +350,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
1592     tmp = RREG8(DAC_DATA);
1593     tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
1594     tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
1595     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1596     + WREG8(DAC_DATA, tmp);
1597    
1598     WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
1599     tmp = RREG8(DAC_DATA);
1600     - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
1601     + WREG8(DAC_DATA, tmp | 0x40);
1602    
1603     tmp = RREG8(MGAREG_MEM_MISC_READ);
1604     tmp |= (0x3 << 2);
1605     @@ -363,7 +363,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
1606     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1607     tmp = RREG8(DAC_DATA);
1608     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
1609     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1610     + WREG8(DAC_DATA, tmp);
1611    
1612     return 0;
1613     }
1614     @@ -416,7 +416,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
1615     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1616     tmp = RREG8(DAC_DATA);
1617     tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
1618     - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
1619     + WREG8(DAC_DATA, tmp);
1620    
1621     tmp = RREG8(MGAREG_MEM_MISC_READ);
1622     tmp |= 0x3 << 2;
1623     @@ -425,7 +425,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
1624     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1625     tmp = RREG8(DAC_DATA);
1626     tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
1627     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1628     + WREG8(DAC_DATA, tmp);
1629    
1630     udelay(500);
1631    
1632     @@ -439,13 +439,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
1633     tmp = RREG8(DAC_DATA);
1634     tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
1635     tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
1636     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1637     + WREG8(DAC_DATA, tmp);
1638    
1639     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1640     tmp = RREG8(DAC_DATA);
1641     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
1642     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
1643     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1644     + WREG8(DAC_DATA, tmp);
1645    
1646     vcount = RREG8(MGAREG_VCOUNT);
1647    
1648     @@ -515,12 +515,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
1649     WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
1650     tmp = RREG8(DAC_DATA);
1651     tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
1652     - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
1653     + WREG8(DAC_DATA, tmp);
1654    
1655     WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
1656     tmp = RREG8(DAC_DATA);
1657     tmp |= MGA1064_REMHEADCTL_CLKDIS;
1658     - WREG_DAC(MGA1064_REMHEADCTL, tmp);
1659     + WREG8(DAC_DATA, tmp);
1660    
1661     tmp = RREG8(MGAREG_MEM_MISC_READ);
1662     tmp |= (0x3<<2) | 0xc0;
1663     @@ -530,7 +530,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
1664     tmp = RREG8(DAC_DATA);
1665     tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
1666     tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
1667     - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
1668     + WREG8(DAC_DATA, tmp);
1669    
1670     udelay(500);
1671    
1672     @@ -657,12 +657,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
1673     WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
1674     }
1675    
1676     -
1677     +/*
1678     + This is how the framebuffer base address is stored in g200 cards:
1679     + * Assume @offset is the gpu_addr variable of the framebuffer object
1680     + * Then addr is the number of _pixels_ (not bytes) from the start of
1681     + VRAM to the first pixel we want to display. (divided by 2 for 32bit
1682     + framebuffers)
1683     + * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
1684     + addr<20> -> CRTCEXT0<6>
1685     + addr<19-16> -> CRTCEXT0<3-0>
1686     + addr<15-8> -> CRTCC<7-0>
1687     + addr<7-0> -> CRTCD<7-0>
1688     + CRTCEXT0 has to be programmed last to trigger an update and make the
1689     + new addr variable take effect.
1690     + */
1691     void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
1692     {
1693     struct mga_device *mdev = crtc->dev->dev_private;
1694     u32 addr;
1695     int count;
1696     + u8 crtcext0;
1697    
1698     while (RREG8(0x1fda) & 0x08);
1699     while (!(RREG8(0x1fda) & 0x08));
1700     @@ -670,10 +684,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
1701     count = RREG8(MGAREG_VCOUNT) + 2;
1702     while (RREG8(MGAREG_VCOUNT) < count);
1703    
1704     - addr = offset >> 2;
1705     + WREG8(MGAREG_CRTCEXT_INDEX, 0);
1706     + crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
1707     + crtcext0 &= 0xB0;
1708     + addr = offset / 8;
1709     + /* Can't store addresses any higher than that...
1710     + but we also don't have more than 16MB of memory, so it should be fine. */
1711     + WARN_ON(addr > 0x1fffff);
1712     + crtcext0 |= (!!(addr & (1<<20)))<<6;
1713     WREG_CRT(0x0d, (u8)(addr & 0xff));
1714     WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
1715     - WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
1716     + WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
1717     }
1718    
1719    
1720     diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
1721     index 865e2c9..60170ea 100644
1722     --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
1723     +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
1724     @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
1725     OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
1726    
1727     for (i = 0; i < nr; ++i) {
1728     - if (DRM_COPY_FROM_USER_UNCHECKED
1729     + if (DRM_COPY_FROM_USER
1730     (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
1731     DRM_ERROR("copy cliprect faulted\n");
1732     return -EFAULT;
1733     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
1734     index 66a7f0f..96cf439 100644
1735     --- a/drivers/gpu/drm/radeon/radeon_drv.c
1736     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
1737     @@ -144,7 +144,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
1738     #endif
1739    
1740     int radeon_no_wb;
1741     -int radeon_modeset = 1;
1742     +int radeon_modeset = -1;
1743     int radeon_dynclks = -1;
1744     int radeon_r4xx_atom = 0;
1745     int radeon_agpmode = 0;
1746     @@ -449,6 +449,16 @@ static struct pci_driver radeon_kms_pci_driver = {
1747    
1748     static int __init radeon_init(void)
1749     {
1750     +#ifdef CONFIG_VGA_CONSOLE
1751     + if (vgacon_text_force() && radeon_modeset == -1) {
1752     + DRM_INFO("VGACON disable radeon kernel modesetting.\n");
1753     + radeon_modeset = 0;
1754     + }
1755     +#endif
1756     + /* set to modesetting by default if not nomodeset */
1757     + if (radeon_modeset == -1)
1758     + radeon_modeset = 1;
1759     +
1760     if (radeon_modeset == 1) {
1761     DRM_INFO("radeon kernel modesetting enabled.\n");
1762     driver = &kms_driver;
1763     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1764     index aa341d1..e6dbf09 100644
1765     --- a/drivers/hid/hid-core.c
1766     +++ b/drivers/hid/hid-core.c
1767     @@ -1702,6 +1702,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1768     { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
1769     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
1770     { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
1771     + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
1772     { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
1773     { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
1774     { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
1775     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1776     index c608313..0387e05 100644
1777     --- a/drivers/md/dm-bufio.c
1778     +++ b/drivers/md/dm-bufio.c
1779     @@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
1780     static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1781     enum data_mode *data_mode)
1782     {
1783     + unsigned noio_flag;
1784     + void *ptr;
1785     +
1786     if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
1787     *data_mode = DATA_MODE_SLAB;
1788     return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
1789     @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1790     }
1791    
1792     *data_mode = DATA_MODE_VMALLOC;
1793     - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
1794     +
1795     + /*
1796     + * __vmalloc allocates the data pages and auxiliary structures with
1797     + * gfp_flags that were specified, but pagetables are always allocated
1798     + * with GFP_KERNEL, no matter what was specified as gfp_mask.
1799     + *
1800     + * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
1801     + * all allocations done by this process (including pagetables) are done
1802     + * as if GFP_NOIO was specified.
1803     + */
1804     +
1805     + if (gfp_mask & __GFP_NORETRY)
1806     + noio_flag = memalloc_noio_save();
1807     +
1808     + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
1809     +
1810     + if (gfp_mask & __GFP_NORETRY)
1811     + memalloc_noio_restore(noio_flag);
1812     +
1813     + return ptr;
1814     }
1815    
1816     /*
1817     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1818     index 1074409..6feaba2 100644
1819     --- a/drivers/md/dm-cache-target.c
1820     +++ b/drivers/md/dm-cache-target.c
1821     @@ -1971,6 +1971,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1822     atomic_set(&cache->nr_migrations, 0);
1823     init_waitqueue_head(&cache->migration_wait);
1824    
1825     + r = -ENOMEM;
1826     cache->nr_dirty = 0;
1827     cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
1828     if (!cache->dirty_bitset) {
1829     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1830     index c0e0702..c434e5a 100644
1831     --- a/drivers/md/dm-snap.c
1832     +++ b/drivers/md/dm-snap.c
1833     @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1834     s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1835     if (!s->pending_pool) {
1836     ti->error = "Could not allocate mempool for pending exceptions";
1837     + r = -ENOMEM;
1838     goto bad_pending_pool;
1839     }
1840    
1841     diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
1842     index d8837d3..7b8b2b9 100644
1843     --- a/drivers/md/dm-stripe.c
1844     +++ b/drivers/md/dm-stripe.c
1845     @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
1846     static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1847     {
1848     struct stripe_c *sc;
1849     - sector_t width;
1850     + sector_t width, tmp_len;
1851     uint32_t stripes;
1852     uint32_t chunk_size;
1853     int r;
1854     @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1855     }
1856    
1857     width = ti->len;
1858     - if (sector_div(width, chunk_size)) {
1859     + if (sector_div(width, stripes)) {
1860     ti->error = "Target length not divisible by "
1861     - "chunk size";
1862     + "number of stripes";
1863     return -EINVAL;
1864     }
1865    
1866     - if (sector_div(width, stripes)) {
1867     + tmp_len = width;
1868     + if (sector_div(tmp_len, chunk_size)) {
1869     ti->error = "Target length not divisible by "
1870     - "number of stripes";
1871     + "chunk size";
1872     return -EINVAL;
1873     }
1874    
1875     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1876     index e50dad0..1ff252a 100644
1877     --- a/drivers/md/dm-table.c
1878     +++ b/drivers/md/dm-table.c
1879     @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
1880     return false;
1881    
1882     if (!ti->type->iterate_devices ||
1883     - !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1884     + ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1885     return false;
1886     }
1887    
1888     diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
1889     index f36ff99..adb4bf5 100644
1890     --- a/drivers/net/ethernet/3com/3c509.c
1891     +++ b/drivers/net/ethernet/3com/3c509.c
1892     @@ -306,6 +306,7 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
1893     if (!dev)
1894     return -ENOMEM;
1895    
1896     + SET_NETDEV_DEV(dev, pdev);
1897     netdev_boot_setup_check(dev);
1898    
1899     if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
1900     @@ -595,6 +596,7 @@ static int __init el3_eisa_probe (struct device *device)
1901     return -ENOMEM;
1902     }
1903    
1904     + SET_NETDEV_DEV(dev, device);
1905     netdev_boot_setup_check(dev);
1906    
1907     el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
1908     diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
1909     index 1928e20..072c6f1 100644
1910     --- a/drivers/net/ethernet/3com/3c59x.c
1911     +++ b/drivers/net/ethernet/3com/3c59x.c
1912     @@ -632,7 +632,6 @@ struct vortex_private {
1913     pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
1914     open:1,
1915     medialock:1,
1916     - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
1917     large_frames:1, /* accept large frames */
1918     handling_irq:1; /* private in_irq indicator */
1919     /* {get|set}_wol operations are already serialized by rtnl.
1920     @@ -951,7 +950,7 @@ static int vortex_eisa_remove(struct device *device)
1921    
1922     unregister_netdev(dev);
1923     iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
1924     - release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
1925     + release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
1926    
1927     free_netdev(dev);
1928     return 0;
1929     @@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
1930     if (rc < 0)
1931     goto out;
1932    
1933     + rc = pci_request_regions(pdev, DRV_NAME);
1934     + if (rc < 0) {
1935     + pci_disable_device(pdev);
1936     + goto out;
1937     + }
1938     +
1939     unit = vortex_cards_found;
1940    
1941     if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
1942     @@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1943     if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1944     ioaddr = pci_iomap(pdev, 0, 0);
1945     if (!ioaddr) {
1946     + pci_release_regions(pdev);
1947     pci_disable_device(pdev);
1948     rc = -ENOMEM;
1949     goto out;
1950     @@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1951     ent->driver_data, unit);
1952     if (rc < 0) {
1953     pci_iounmap(pdev, ioaddr);
1954     + pci_release_regions(pdev);
1955     pci_disable_device(pdev);
1956     goto out;
1957     }
1958     @@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1959    
1960     /* PCI-only startup logic */
1961     if (pdev) {
1962     - /* EISA resources already marked, so only PCI needs to do this here */
1963     - /* Ignore return value, because Cardbus drivers already allocate for us */
1964     - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1965     - vp->must_free_region = 1;
1966     -
1967     /* enable bus-mastering if necessary */
1968     if (vci->flags & PCI_USES_MASTER)
1969     pci_set_master(pdev);
1970     @@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1971     &vp->rx_ring_dma);
1972     retval = -ENOMEM;
1973     if (!vp->rx_ring)
1974     - goto free_region;
1975     + goto free_device;
1976    
1977     vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1978     vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1979     @@ -1484,9 +1486,7 @@ free_ring:
1980     + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1981     vp->rx_ring,
1982     vp->rx_ring_dma);
1983     -free_region:
1984     - if (vp->must_free_region)
1985     - release_region(dev->base_addr, vci->io_size);
1986     +free_device:
1987     free_netdev(dev);
1988     pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
1989     out:
1990     @@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
1991     + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1992     vp->rx_ring,
1993     vp->rx_ring_dma);
1994     - if (vp->must_free_region)
1995     - release_region(dev->base_addr, vp->io_size);
1996     +
1997     + pci_release_regions(pdev);
1998     +
1999     free_netdev(dev);
2000     }
2001    
2002     diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
2003     index 0095ce9..97dd8f18 100644
2004     --- a/drivers/net/ethernet/sfc/mcdi.c
2005     +++ b/drivers/net/ethernet/sfc/mcdi.c
2006     @@ -667,7 +667,7 @@ fail:
2007     int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
2008     u16 *fw_subtype_list, u32 *capabilities)
2009     {
2010     - uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
2011     + uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
2012     size_t outlen, offset, i;
2013     int port_num = efx_port_num(efx);
2014     int rc;
2015     diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
2016     index 66e025a..f3c2d03 100644
2017     --- a/drivers/net/ethernet/tile/tilegx.c
2018     +++ b/drivers/net/ethernet/tile/tilegx.c
2019     @@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
2020     if (info->has_iqueue) {
2021     gxio_mpipe_request_notif_ring_interrupt(
2022     &context, cpu_x(cpu), cpu_y(cpu),
2023     - 1, ingress_irq, info->iqueue.ring);
2024     + KERNEL_PL, ingress_irq, info->iqueue.ring);
2025     }
2026     }
2027    
2028     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
2029     index 73abbc1..011062e 100644
2030     --- a/drivers/net/macvlan.c
2031     +++ b/drivers/net/macvlan.c
2032     @@ -222,7 +222,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
2033     }
2034    
2035     if (port->passthru)
2036     - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
2037     + vlan = list_first_or_null_rcu(&port->vlans,
2038     + struct macvlan_dev, list);
2039     else
2040     vlan = macvlan_hash_lookup(port, eth->h_dest);
2041     if (vlan == NULL)
2042     @@ -807,7 +808,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
2043     if (err < 0)
2044     goto upper_dev_unlink;
2045    
2046     - list_add_tail(&vlan->list, &port->vlans);
2047     + list_add_tail_rcu(&vlan->list, &port->vlans);
2048     netif_stacked_transfer_operstate(lowerdev, dev);
2049    
2050     return 0;
2051     @@ -835,7 +836,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
2052     {
2053     struct macvlan_dev *vlan = netdev_priv(dev);
2054    
2055     - list_del(&vlan->list);
2056     + list_del_rcu(&vlan->list);
2057     unregister_netdevice_queue(dev, head);
2058     netdev_upper_dev_unlink(vlan->lowerdev, dev);
2059     }
2060     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2061     index 729ed53..755fa9e 100644
2062     --- a/drivers/net/tun.c
2063     +++ b/drivers/net/tun.c
2064     @@ -1471,14 +1471,17 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
2065     if (!tun)
2066     return -EBADFD;
2067    
2068     - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
2069     - return -EINVAL;
2070     + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
2071     + ret = -EINVAL;
2072     + goto out;
2073     + }
2074     ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
2075     flags & MSG_DONTWAIT);
2076     if (ret > total_len) {
2077     m->msg_flags |= MSG_TRUNC;
2078     ret = flags & MSG_TRUNC ? ret : total_len;
2079     }
2080     +out:
2081     tun_put(tun);
2082     return ret;
2083     }
2084     diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
2085     index f7f623a..577c72d 100644
2086     --- a/drivers/net/usb/asix_common.c
2087     +++ b/drivers/net/usb/asix_common.c
2088     @@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
2089     netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
2090     rx->size);
2091     kfree_skb(rx->ax_skb);
2092     + rx->ax_skb = NULL;
2093     + rx->size = 0U;
2094     +
2095     return 0;
2096     }
2097    
2098     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2099     index 988372d..e509c37 100644
2100     --- a/drivers/net/wireless/ath/ath9k/main.c
2101     +++ b/drivers/net/wireless/ath/ath9k/main.c
2102     @@ -1308,6 +1308,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
2103     struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2104     struct ath_node *an = (struct ath_node *) sta->drv_priv;
2105     struct ieee80211_key_conf ps_key = { };
2106     + int key;
2107    
2108     ath_node_attach(sc, sta, vif);
2109    
2110     @@ -1315,7 +1316,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
2111     vif->type != NL80211_IFTYPE_AP_VLAN)
2112     return 0;
2113    
2114     - an->ps_key = ath_key_config(common, vif, sta, &ps_key);
2115     + key = ath_key_config(common, vif, sta, &ps_key);
2116     + if (key > 0)
2117     + an->ps_key = key;
2118    
2119     return 0;
2120     }
2121     @@ -1332,6 +1335,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
2122     return;
2123    
2124     ath_key_delete(common, &ps_key);
2125     + an->ps_key = 0;
2126     }
2127    
2128     static int ath9k_sta_remove(struct ieee80211_hw *hw,
2129     diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
2130     index 1221469..ee3d640 100644
2131     --- a/drivers/net/wireless/b43/dma.c
2132     +++ b/drivers/net/wireless/b43/dma.c
2133     @@ -1733,6 +1733,25 @@ drop_recycle_buffer:
2134     sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
2135     }
2136    
2137     +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
2138     +{
2139     + int current_slot, previous_slot;
2140     +
2141     + B43_WARN_ON(ring->tx);
2142     +
2143     + /* Device has filled all buffers, drop all packets and let TCP
2144     + * decrease speed.
2145     + * Decrement RX index by one will let the device to see all slots
2146     + * as free again
2147     + */
2148     + /*
2149     + *TODO: How to increase rx_drop in mac80211?
2150     + */
2151     + current_slot = ring->ops->get_current_rxslot(ring);
2152     + previous_slot = prev_slot(ring, current_slot);
2153     + ring->ops->set_current_rxslot(ring, previous_slot);
2154     +}
2155     +
2156     void b43_dma_rx(struct b43_dmaring *ring)
2157     {
2158     const struct b43_dma_ops *ops = ring->ops;
2159     diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
2160     index 9fdd198..df8c8cd 100644
2161     --- a/drivers/net/wireless/b43/dma.h
2162     +++ b/drivers/net/wireless/b43/dma.h
2163     @@ -9,7 +9,7 @@
2164     /* DMA-Interrupt reasons. */
2165     #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
2166     | (1 << 14) | (1 << 15))
2167     -#define B43_DMAIRQ_NONFATALMASK (1 << 13)
2168     +#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
2169     #define B43_DMAIRQ_RX_DONE (1 << 16)
2170    
2171     /*** 32-bit DMA Engine. ***/
2172     @@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
2173     void b43_dma_handle_txstatus(struct b43_wldev *dev,
2174     const struct b43_txstatus *status);
2175    
2176     +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
2177     +
2178     void b43_dma_rx(struct b43_dmaring *ring);
2179    
2180     void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
2181     diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
2182     index 0568273..64b637a 100644
2183     --- a/drivers/net/wireless/b43/main.c
2184     +++ b/drivers/net/wireless/b43/main.c
2185     @@ -1895,30 +1895,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
2186     }
2187     }
2188    
2189     - if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
2190     - B43_DMAIRQ_NONFATALMASK))) {
2191     - if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
2192     - b43err(dev->wl, "Fatal DMA error: "
2193     - "0x%08X, 0x%08X, 0x%08X, "
2194     - "0x%08X, 0x%08X, 0x%08X\n",
2195     - dma_reason[0], dma_reason[1],
2196     - dma_reason[2], dma_reason[3],
2197     - dma_reason[4], dma_reason[5]);
2198     - b43err(dev->wl, "This device does not support DMA "
2199     + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
2200     + b43err(dev->wl,
2201     + "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
2202     + dma_reason[0], dma_reason[1],
2203     + dma_reason[2], dma_reason[3],
2204     + dma_reason[4], dma_reason[5]);
2205     + b43err(dev->wl, "This device does not support DMA "
2206     "on your system. It will now be switched to PIO.\n");
2207     - /* Fall back to PIO transfers if we get fatal DMA errors! */
2208     - dev->use_pio = true;
2209     - b43_controller_restart(dev, "DMA error");
2210     - return;
2211     - }
2212     - if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
2213     - b43err(dev->wl, "DMA error: "
2214     - "0x%08X, 0x%08X, 0x%08X, "
2215     - "0x%08X, 0x%08X, 0x%08X\n",
2216     - dma_reason[0], dma_reason[1],
2217     - dma_reason[2], dma_reason[3],
2218     - dma_reason[4], dma_reason[5]);
2219     - }
2220     + /* Fall back to PIO transfers if we get fatal DMA errors! */
2221     + dev->use_pio = true;
2222     + b43_controller_restart(dev, "DMA error");
2223     + return;
2224     }
2225    
2226     if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
2227     @@ -1937,6 +1925,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
2228     handle_irq_noise(dev);
2229    
2230     /* Check the DMA reason registers for received data. */
2231     + if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
2232     + if (B43_DEBUG)
2233     + b43warn(dev->wl, "RX descriptor underrun\n");
2234     + b43_dma_handle_rx_overflow(dev->dma.rx_ring);
2235     + }
2236     if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
2237     if (b43_using_pio_transfers(dev))
2238     b43_pio_rx(dev->pio.rx_queue);
2239     @@ -1994,7 +1987,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
2240     return IRQ_NONE;
2241    
2242     dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
2243     - & 0x0001DC00;
2244     + & 0x0001FC00;
2245     dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
2246     & 0x0000DC00;
2247     dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
2248     @@ -3126,7 +3119,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2249     b43_write32(dev, 0x018C, 0x02000000);
2250     }
2251     b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
2252     - b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
2253     + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
2254     b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
2255     b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
2256     b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
2257     diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
2258     index 7941eb3..cbaa777 100644
2259     --- a/drivers/net/wireless/iwlegacy/4965-mac.c
2260     +++ b/drivers/net/wireless/iwlegacy/4965-mac.c
2261     @@ -5740,8 +5740,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
2262     hw->flags =
2263     IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
2264     IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
2265     - IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
2266     - IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2267     + IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2268     if (il->cfg->sku & IL_SKU_N)
2269     hw->flags |=
2270     IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2271     diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
2272     index 8aaf56a..c13f6e9 100644
2273     --- a/drivers/net/wireless/mwifiex/cfg80211.c
2274     +++ b/drivers/net/wireless/mwifiex/cfg80211.c
2275     @@ -2280,9 +2280,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2276     if (wdev->netdev->reg_state == NETREG_REGISTERED)
2277     unregister_netdevice(wdev->netdev);
2278    
2279     - if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
2280     - free_netdev(wdev->netdev);
2281     -
2282     /* Clear the priv in adapter */
2283     priv->netdev = NULL;
2284    
2285     diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
2286     index b5c8b96..aeade10 100644
2287     --- a/drivers/net/wireless/mwifiex/cmdevt.c
2288     +++ b/drivers/net/wireless/mwifiex/cmdevt.c
2289     @@ -1176,6 +1176,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
2290     adapter->if_ops.wakeup(adapter);
2291     adapter->hs_activated = false;
2292     adapter->is_hs_configured = false;
2293     + adapter->is_suspended = false;
2294     mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
2295     MWIFIEX_BSS_ROLE_ANY),
2296     false);
2297     diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
2298     index 9c802ed..6d9bc63 100644
2299     --- a/drivers/net/wireless/mwifiex/main.c
2300     +++ b/drivers/net/wireless/mwifiex/main.c
2301     @@ -646,6 +646,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
2302     struct net_device *dev)
2303     {
2304     dev->netdev_ops = &mwifiex_netdev_ops;
2305     + dev->destructor = free_netdev;
2306     /* Initialize private structure */
2307     priv->current_key_index = 0;
2308     priv->media_connected = false;
2309     diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
2310     index 13100f8..fb420fe 100644
2311     --- a/drivers/net/wireless/mwifiex/sta_ioctl.c
2312     +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
2313     @@ -99,7 +99,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
2314     } else {
2315     /* Multicast */
2316     priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
2317     - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
2318     + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
2319     dev_dbg(priv->adapter->dev,
2320     "info: Enabling All Multicast!\n");
2321     priv->curr_pkt_filter |=
2322     @@ -111,20 +111,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
2323     dev_dbg(priv->adapter->dev,
2324     "info: Set multicast list=%d\n",
2325     mcast_list->num_multicast_addr);
2326     - /* Set multicast addresses to firmware */
2327     - if (old_pkt_filter == priv->curr_pkt_filter) {
2328     - /* Send request to firmware */
2329     - ret = mwifiex_send_cmd_async(priv,
2330     - HostCmd_CMD_MAC_MULTICAST_ADR,
2331     - HostCmd_ACT_GEN_SET, 0,
2332     - mcast_list);
2333     - } else {
2334     - /* Send request to firmware */
2335     - ret = mwifiex_send_cmd_async(priv,
2336     - HostCmd_CMD_MAC_MULTICAST_ADR,
2337     - HostCmd_ACT_GEN_SET, 0,
2338     - mcast_list);
2339     - }
2340     + /* Send multicast addresses to firmware */
2341     + ret = mwifiex_send_cmd_async(priv,
2342     + HostCmd_CMD_MAC_MULTICAST_ADR,
2343     + HostCmd_ACT_GEN_SET, 0,
2344     + mcast_list);
2345     }
2346     }
2347     }
2348     diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
2349     index e64a7a8..a8e43cf 100644
2350     --- a/drivers/platform/x86/hp_accel.c
2351     +++ b/drivers/platform/x86/hp_accel.c
2352     @@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
2353    
2354     static int lis3lv02d_resume(struct device *dev)
2355     {
2356     - return lis3lv02d_poweron(&lis3_dev);
2357     + lis3lv02d_poweron(&lis3_dev);
2358     + return 0;
2359     }
2360    
2361     static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
2362     diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
2363     index 79fbe38..9e95473 100644
2364     --- a/drivers/rtc/Kconfig
2365     +++ b/drivers/rtc/Kconfig
2366     @@ -20,7 +20,6 @@ if RTC_CLASS
2367     config RTC_HCTOSYS
2368     bool "Set system time from RTC on startup and resume"
2369     default y
2370     - depends on !ALWAYS_USE_PERSISTENT_CLOCK
2371     help
2372     If you say yes here, the system time (wall clock) will be set using
2373     the value read from a specified RTC device. This is useful to avoid
2374     @@ -29,7 +28,6 @@ config RTC_HCTOSYS
2375     config RTC_SYSTOHC
2376     bool "Set the RTC time based on NTP synchronization"
2377     default y
2378     - depends on !ALWAYS_USE_PERSISTENT_CLOCK
2379     help
2380     If you say yes here, the system time (wall clock) will be stored
2381     in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
2382     diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
2383     index 02b742a..6dd6b38 100644
2384     --- a/drivers/rtc/rtc-pcf2123.c
2385     +++ b/drivers/rtc/rtc-pcf2123.c
2386     @@ -265,6 +265,7 @@ static int pcf2123_probe(struct spi_device *spi)
2387    
2388     if (!(rxbuf[0] & 0x20)) {
2389     dev_err(&spi->dev, "chip not found\n");
2390     + ret = -ENODEV;
2391     goto kfree_exit;
2392     }
2393    
2394     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2395     index 7992635..82910cc 100644
2396     --- a/drivers/scsi/sd.c
2397     +++ b/drivers/scsi/sd.c
2398     @@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
2399     char *buffer_data;
2400     struct scsi_mode_data data;
2401     struct scsi_sense_hdr sshdr;
2402     + const char *temp = "temporary ";
2403     int len;
2404    
2405     if (sdp->type != TYPE_DISK)
2406     @@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
2407     * it's not worth the risk */
2408     return -EINVAL;
2409    
2410     + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
2411     + buf += sizeof(temp) - 1;
2412     + sdkp->cache_override = 1;
2413     + } else {
2414     + sdkp->cache_override = 0;
2415     + }
2416     +
2417     for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
2418     len = strlen(sd_cache_types[i]);
2419     if (strncmp(sd_cache_types[i], buf, len) == 0 &&
2420     @@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
2421     return -EINVAL;
2422     rcd = ct & 0x01 ? 1 : 0;
2423     wce = ct & 0x02 ? 1 : 0;
2424     +
2425     + if (sdkp->cache_override) {
2426     + sdkp->WCE = wce;
2427     + sdkp->RCD = rcd;
2428     + return count;
2429     + }
2430     +
2431     if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
2432     SD_MAX_RETRIES, &data, NULL))
2433     return -EINVAL;
2434     @@ -2319,6 +2334,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2435     int old_rcd = sdkp->RCD;
2436     int old_dpofua = sdkp->DPOFUA;
2437    
2438     +
2439     + if (sdkp->cache_override)
2440     + return;
2441     +
2442     first_len = 4;
2443     if (sdp->skip_ms_page_8) {
2444     if (sdp->type == TYPE_RBC)
2445     @@ -2812,6 +2831,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2446     sdkp->capacity = 0;
2447     sdkp->media_present = 1;
2448     sdkp->write_prot = 0;
2449     + sdkp->cache_override = 0;
2450     sdkp->WCE = 0;
2451     sdkp->RCD = 0;
2452     sdkp->ATO = 0;
2453     diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
2454     index 74a1e4c..2386aeb 100644
2455     --- a/drivers/scsi/sd.h
2456     +++ b/drivers/scsi/sd.h
2457     @@ -73,6 +73,7 @@ struct scsi_disk {
2458     u8 protection_type;/* Data Integrity Field */
2459     u8 provisioning_mode;
2460     unsigned ATO : 1; /* state of disk ATO bit */
2461     + unsigned cache_override : 1; /* temp override of WCE,RCD */
2462     unsigned WCE : 1; /* state of disk WCE bit */
2463     unsigned RCD : 1; /* state of disk RCD bit, unused */
2464     unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
2465     diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
2466     index 0b52a23..805f3d2 100644
2467     --- a/drivers/target/iscsi/iscsi_target_erl1.c
2468     +++ b/drivers/target/iscsi/iscsi_target_erl1.c
2469     @@ -819,7 +819,7 @@ static int iscsit_attach_ooo_cmdsn(
2470     /*
2471     * CmdSN is greater than the tail of the list.
2472     */
2473     - if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
2474     + if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
2475     list_add_tail(&ooo_cmdsn->ooo_list,
2476     &sess->sess_ooo_cmdsn_list);
2477     else {
2478     @@ -829,11 +829,12 @@ static int iscsit_attach_ooo_cmdsn(
2479     */
2480     list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
2481     ooo_list) {
2482     - if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
2483     + if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
2484     continue;
2485    
2486     + /* Insert before this entry */
2487     list_add(&ooo_cmdsn->ooo_list,
2488     - &ooo_tmp->ooo_list);
2489     + ooo_tmp->ooo_list.prev);
2490     break;
2491     }
2492     }
2493     diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
2494     index 17a6acb..ca4b219 100644
2495     --- a/drivers/target/target_core_file.c
2496     +++ b/drivers/target/target_core_file.c
2497     @@ -148,13 +148,8 @@ static int fd_configure_device(struct se_device *dev)
2498     */
2499     inode = file->f_mapping->host;
2500     if (S_ISBLK(inode->i_mode)) {
2501     - struct request_queue *q = bdev_get_queue(inode->i_bdev);
2502     unsigned long long dev_size;
2503    
2504     - dev->dev_attrib.hw_block_size =
2505     - bdev_logical_block_size(inode->i_bdev);
2506     - dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
2507     -
2508     /*
2509     * Determine the number of bytes from i_size_read() minus
2510     * one (1) logical sector from underlying struct block_device
2511     @@ -173,13 +168,12 @@ static int fd_configure_device(struct se_device *dev)
2512     " block_device\n");
2513     goto fail;
2514     }
2515     -
2516     - dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
2517     - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
2518     }
2519    
2520     fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
2521    
2522     + dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
2523     + dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
2524     dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
2525    
2526     if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
2527     diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
2528     index 8bcc514..e1af9d5 100644
2529     --- a/drivers/target/target_core_iblock.c
2530     +++ b/drivers/target/target_core_iblock.c
2531     @@ -679,6 +679,8 @@ iblock_execute_rw(struct se_cmd *cmd)
2532     rw = WRITE_FUA;
2533     else if (!(q->flush_flags & REQ_FLUSH))
2534     rw = WRITE_FUA;
2535     + else
2536     + rw = WRITE;
2537     } else {
2538     rw = WRITE;
2539     }
2540     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2541     index 3243ea7..0d46276 100644
2542     --- a/drivers/target/target_core_transport.c
2543     +++ b/drivers/target/target_core_transport.c
2544     @@ -2213,21 +2213,19 @@ static void target_release_cmd_kref(struct kref *kref)
2545     {
2546     struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2547     struct se_session *se_sess = se_cmd->se_sess;
2548     - unsigned long flags;
2549    
2550     - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2551     if (list_empty(&se_cmd->se_cmd_list)) {
2552     - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2553     + spin_unlock(&se_sess->sess_cmd_lock);
2554     se_cmd->se_tfo->release_cmd(se_cmd);
2555     return;
2556     }
2557     if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2558     - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2559     + spin_unlock(&se_sess->sess_cmd_lock);
2560     complete(&se_cmd->cmd_wait_comp);
2561     return;
2562     }
2563     list_del(&se_cmd->se_cmd_list);
2564     - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2565     + spin_unlock(&se_sess->sess_cmd_lock);
2566    
2567     se_cmd->se_tfo->release_cmd(se_cmd);
2568     }
2569     @@ -2238,7 +2236,8 @@ static void target_release_cmd_kref(struct kref *kref)
2570     */
2571     int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2572     {
2573     - return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2574     + return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2575     + &se_sess->sess_cmd_lock);
2576     }
2577     EXPORT_SYMBOL(target_put_sess_cmd);
2578    
2579     diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
2580     index 08b48bb..faf4e18 100644
2581     --- a/drivers/watchdog/watchdog_dev.c
2582     +++ b/drivers/watchdog/watchdog_dev.c
2583     @@ -523,6 +523,7 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
2584     int err, devno;
2585    
2586     if (watchdog->id == 0) {
2587     + old_wdd = watchdog;
2588     watchdog_miscdev.parent = watchdog->parent;
2589     err = misc_register(&watchdog_miscdev);
2590     if (err != 0) {
2591     @@ -531,9 +532,9 @@ int watchdog_dev_register(struct watchdog_device *watchdog)
2592     if (err == -EBUSY)
2593     pr_err("%s: a legacy watchdog module is probably present.\n",
2594     watchdog->info->identity);
2595     + old_wdd = NULL;
2596     return err;
2597     }
2598     - old_wdd = watchdog;
2599     }
2600    
2601     /* Fill in the data structures */
2602     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2603     index cf3025c..f3190ab 100644
2604     --- a/fs/ext4/mballoc.c
2605     +++ b/fs/ext4/mballoc.c
2606     @@ -1994,7 +1994,11 @@ repeat:
2607     group = ac->ac_g_ex.fe_group;
2608    
2609     for (i = 0; i < ngroups; group++, i++) {
2610     - if (group == ngroups)
2611     + /*
2612     + * Artificially restricted ngroups for non-extent
2613     + * files makes group > ngroups possible on first loop.
2614     + */
2615     + if (group >= ngroups)
2616     group = 0;
2617    
2618     /* This now checks without needing the buddy page */
2619     diff --git a/fs/namei.c b/fs/namei.c
2620     index 57ae9c8..85e40d1 100644
2621     --- a/fs/namei.c
2622     +++ b/fs/namei.c
2623     @@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
2624     if (error)
2625     return error;
2626    
2627     - audit_inode(name, dir, 0);
2628     + audit_inode(name, dir, LOOKUP_PARENT);
2629     error = -EISDIR;
2630     /* trailing slashes? */
2631     if (nd->last.name[nd->last.len])
2632     diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
2633     index 8288b08..d401d01 100644
2634     --- a/fs/nfsd/nfs4proc.c
2635     +++ b/fs/nfsd/nfs4proc.c
2636     @@ -271,6 +271,7 @@ static __be32
2637     do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2638     {
2639     __be32 status;
2640     + int accmode = 0;
2641    
2642     /* We don't know the target directory, and therefore can not
2643     * set the change info
2644     @@ -284,9 +285,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
2645    
2646     open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
2647     (open->op_iattr.ia_size == 0);
2648     + /*
2649     + * In the delegation case, the client is telling us about an
2650     + * open that it *already* performed locally, some time ago. We
2651     + * should let it succeed now if possible.
2652     + *
2653     + * In the case of a CLAIM_FH open, on the other hand, the client
2654     + * may be counting on us to enforce permissions (the Linux 4.1
2655     + * client uses this for normal opens, for example).
2656     + */
2657     + if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
2658     + accmode = NFSD_MAY_OWNER_OVERRIDE;
2659    
2660     - status = do_open_permission(rqstp, current_fh, open,
2661     - NFSD_MAY_OWNER_OVERRIDE);
2662     + status = do_open_permission(rqstp, current_fh, open, accmode);
2663    
2664     return status;
2665     }
2666     diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
2667     index 899ca26..4e9a21d 100644
2668     --- a/fs/nfsd/nfs4recover.c
2669     +++ b/fs/nfsd/nfs4recover.c
2670     @@ -146,7 +146,7 @@ out_no_tfm:
2671     * then disable recovery tracking.
2672     */
2673     static void
2674     -legacy_recdir_name_error(int error)
2675     +legacy_recdir_name_error(struct nfs4_client *clp, int error)
2676     {
2677     printk(KERN_ERR "NFSD: unable to generate recoverydir "
2678     "name (%d).\n", error);
2679     @@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
2680     if (error == -ENOENT) {
2681     printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
2682     "Reboot recovery will not function correctly!\n");
2683     -
2684     - /* the argument is ignored by the legacy exit function */
2685     - nfsd4_client_tracking_exit(NULL);
2686     + nfsd4_client_tracking_exit(clp->net);
2687     }
2688     }
2689    
2690     @@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
2691    
2692     status = nfs4_make_rec_clidname(dname, &clp->cl_name);
2693     if (status)
2694     - return legacy_recdir_name_error(status);
2695     + return legacy_recdir_name_error(clp, status);
2696    
2697     status = nfs4_save_creds(&original_cred);
2698     if (status < 0)
2699     @@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
2700    
2701     status = nfs4_make_rec_clidname(dname, &clp->cl_name);
2702     if (status)
2703     - return legacy_recdir_name_error(status);
2704     + return legacy_recdir_name_error(clp, status);
2705    
2706     status = mnt_want_write_file(nn->rec_file);
2707     if (status)
2708     @@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
2709    
2710     status = nfs4_make_rec_clidname(dname, &clp->cl_name);
2711     if (status) {
2712     - legacy_recdir_name_error(status);
2713     + legacy_recdir_name_error(clp, status);
2714     return status;
2715     }
2716    
2717     diff --git a/include/linux/audit.h b/include/linux/audit.h
2718     index 5a6d718..b4086cf 100644
2719     --- a/include/linux/audit.h
2720     +++ b/include/linux/audit.h
2721     @@ -120,7 +120,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
2722     unsigned long a1, unsigned long a2,
2723     unsigned long a3)
2724     {
2725     - if (unlikely(!audit_dummy_context()))
2726     + if (unlikely(current->audit_context))
2727     __audit_syscall_entry(arch, major, a0, a1, a2, a3);
2728     }
2729     static inline void audit_syscall_exit(void *pt_regs)
2730     @@ -390,6 +390,11 @@ static inline void audit_ptrace(struct task_struct *t)
2731     #define audit_signals 0
2732     #endif /* CONFIG_AUDITSYSCALL */
2733    
2734     +static inline bool audit_loginuid_set(struct task_struct *tsk)
2735     +{
2736     + return uid_valid(audit_get_loginuid(tsk));
2737     +}
2738     +
2739     #ifdef CONFIG_AUDIT
2740     /* These are defined in audit.c */
2741     /* Public API */
2742     diff --git a/include/linux/kref.h b/include/linux/kref.h
2743     index 4972e6e..7419c02 100644
2744     --- a/include/linux/kref.h
2745     +++ b/include/linux/kref.h
2746     @@ -19,6 +19,7 @@
2747     #include <linux/atomic.h>
2748     #include <linux/kernel.h>
2749     #include <linux/mutex.h>
2750     +#include <linux/spinlock.h>
2751    
2752     struct kref {
2753     atomic_t refcount;
2754     @@ -95,6 +96,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
2755     return kref_sub(kref, 1, release);
2756     }
2757    
2758     +/**
2759     + * kref_put_spinlock_irqsave - decrement refcount for object.
2760     + * @kref: object.
2761     + * @release: pointer to the function that will clean up the object when the
2762     + * last reference to the object is released.
2763     + * This pointer is required, and it is not acceptable to pass kfree
2764     + * in as this function.
2765     + * @lock: lock to take in release case
2766     + *
2767     + * Behaves identical to kref_put with one exception. If the reference count
2768     + * drops to zero, the lock will be taken atomically wrt dropping the reference
2769     + * count. The release function has to call spin_unlock() without _irqrestore.
2770     + */
2771     +static inline int kref_put_spinlock_irqsave(struct kref *kref,
2772     + void (*release)(struct kref *kref),
2773     + spinlock_t *lock)
2774     +{
2775     + unsigned long flags;
2776     +
2777     + WARN_ON(release == NULL);
2778     + if (atomic_add_unless(&kref->refcount, -1, 1))
2779     + return 0;
2780     + spin_lock_irqsave(lock, flags);
2781     + if (atomic_dec_and_test(&kref->refcount)) {
2782     + release(kref);
2783     + local_irq_restore(flags);
2784     + return 1;
2785     + }
2786     + spin_unlock_irqrestore(lock, flags);
2787     + return 0;
2788     +}
2789     +
2790     static inline int kref_put_mutex(struct kref *kref,
2791     void (*release)(struct kref *kref),
2792     struct mutex *lock)
2793     diff --git a/include/linux/time.h b/include/linux/time.h
2794     index d4835df..afcdc4b 100644
2795     --- a/include/linux/time.h
2796     +++ b/include/linux/time.h
2797     @@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
2798    
2799     extern bool persistent_clock_exist;
2800    
2801     -#ifdef ALWAYS_USE_PERSISTENT_CLOCK
2802     -#define has_persistent_clock() true
2803     -#else
2804     static inline bool has_persistent_clock(void)
2805     {
2806     return persistent_clock_exist;
2807     }
2808     -#endif
2809    
2810     extern void read_persistent_clock(struct timespec *ts);
2811     extern void read_boot_clock(struct timespec *ts);
2812     diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
2813     index 0a1dcc2..ab3d0ac 100644
2814     --- a/include/net/inet_frag.h
2815     +++ b/include/net/inet_frag.h
2816     @@ -135,14 +135,15 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
2817     static inline void inet_frag_lru_move(struct inet_frag_queue *q)
2818     {
2819     spin_lock(&q->net->lru_lock);
2820     - list_move_tail(&q->lru_list, &q->net->lru_list);
2821     + if (!list_empty(&q->lru_list))
2822     + list_move_tail(&q->lru_list, &q->net->lru_list);
2823     spin_unlock(&q->net->lru_lock);
2824     }
2825    
2826     static inline void inet_frag_lru_del(struct inet_frag_queue *q)
2827     {
2828     spin_lock(&q->net->lru_lock);
2829     - list_del(&q->lru_list);
2830     + list_del_init(&q->lru_list);
2831     spin_unlock(&q->net->lru_lock);
2832     }
2833    
2834     diff --git a/include/net/sock.h b/include/net/sock.h
2835     index 14f6e9d..0be480a 100644
2836     --- a/include/net/sock.h
2837     +++ b/include/net/sock.h
2838     @@ -865,6 +865,18 @@ struct inet_hashinfo;
2839     struct raw_hashinfo;
2840     struct module;
2841    
2842     +/*
2843     + * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
2844     + * un-modified. Special care is taken when initializing object to zero.
2845     + */
2846     +static inline void sk_prot_clear_nulls(struct sock *sk, int size)
2847     +{
2848     + if (offsetof(struct sock, sk_node.next) != 0)
2849     + memset(sk, 0, offsetof(struct sock, sk_node.next));
2850     + memset(&sk->sk_node.pprev, 0,
2851     + size - offsetof(struct sock, sk_node.pprev));
2852     +}
2853     +
2854     /* Networking protocol blocks we attach to sockets.
2855     * socket layer -> transport layer interface
2856     * transport -> network interface is defined by struct inet_proto
2857     diff --git a/include/net/tcp.h b/include/net/tcp.h
2858     index cf0694d..a345480 100644
2859     --- a/include/net/tcp.h
2860     +++ b/include/net/tcp.h
2861     @@ -1049,6 +1049,7 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
2862     skb_queue_len(&tp->ucopy.prequeue) == 0)
2863     return false;
2864    
2865     + skb_dst_force(skb);
2866     __skb_queue_tail(&tp->ucopy.prequeue, skb);
2867     tp->ucopy.memory += skb->truesize;
2868     if (tp->ucopy.memory > sk->sk_rcvbuf) {
2869     diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
2870     index 9f096f1..9554a19 100644
2871     --- a/include/uapi/linux/audit.h
2872     +++ b/include/uapi/linux/audit.h
2873     @@ -246,6 +246,7 @@
2874     #define AUDIT_OBJ_TYPE 21
2875     #define AUDIT_OBJ_LEV_LOW 22
2876     #define AUDIT_OBJ_LEV_HIGH 23
2877     +#define AUDIT_LOGINUID_SET 24
2878    
2879     /* These are ONLY useful when checking
2880     * at syscall exit time (AUDIT_AT_EXIT). */
2881     diff --git a/include/uapi/linux/if_cablemodem.h b/include/uapi/linux/if_cablemodem.h
2882     index 9ca1007..ee6b3c4 100644
2883     --- a/include/uapi/linux/if_cablemodem.h
2884     +++ b/include/uapi/linux/if_cablemodem.h
2885     @@ -12,11 +12,11 @@
2886     */
2887    
2888     /* some useful defines for sb1000.c e cmconfig.c - fv */
2889     -#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
2890     -#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
2891     -#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
2892     -#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
2893     -#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
2894     -#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
2895     +#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
2896     +#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
2897     +#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
2898     +#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
2899     +#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
2900     +#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
2901    
2902     #endif
2903     diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
2904     index a5a8c88..c520203 100644
2905     --- a/include/uapi/linux/virtio_net.h
2906     +++ b/include/uapi/linux/virtio_net.h
2907     @@ -191,7 +191,7 @@ struct virtio_net_ctrl_mac {
2908     * specified.
2909     */
2910     struct virtio_net_ctrl_mq {
2911     - u16 virtqueue_pairs;
2912     + __u16 virtqueue_pairs;
2913     };
2914    
2915     #define VIRTIO_NET_CTRL_MQ 4
2916     diff --git a/ipc/shm.c b/ipc/shm.c
2917     index 34af1fe..7e199fa 100644
2918     --- a/ipc/shm.c
2919     +++ b/ipc/shm.c
2920     @@ -493,7 +493,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
2921     if (shmflg & SHM_HUGETLB) {
2922     struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
2923     & SHM_HUGE_MASK);
2924     - size_t hugesize = ALIGN(size, huge_page_size(hs));
2925     + size_t hugesize;
2926     +
2927     + if (!hs) {
2928     + error = -EINVAL;
2929     + goto no_file;
2930     + }
2931     + hugesize = ALIGN(size, huge_page_size(hs));
2932    
2933     /* hugetlb_file_setup applies strict accounting */
2934     if (shmflg & SHM_NORESERVE)
2935     diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
2936     index f9fc54b..2bf508d 100644
2937     --- a/kernel/auditfilter.c
2938     +++ b/kernel/auditfilter.c
2939     @@ -345,6 +345,12 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
2940     f->uid = INVALID_UID;
2941     f->gid = INVALID_GID;
2942    
2943     + /* Support legacy tests for a valid loginuid */
2944     + if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
2945     + f->type = AUDIT_LOGINUID_SET;
2946     + f->val = 0;
2947     + }
2948     +
2949     err = -EINVAL;
2950     if (f->op == Audit_bad)
2951     goto exit_free;
2952     @@ -352,6 +358,12 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
2953     switch(f->type) {
2954     default:
2955     goto exit_free;
2956     + case AUDIT_LOGINUID_SET:
2957     + if ((f->val != 0) && (f->val != 1))
2958     + goto exit_free;
2959     + if (f->op != Audit_not_equal && f->op != Audit_equal)
2960     + goto exit_free;
2961     + break;
2962     case AUDIT_UID:
2963     case AUDIT_EUID:
2964     case AUDIT_SUID:
2965     @@ -459,7 +471,20 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2966     f->gid = INVALID_GID;
2967     f->lsm_str = NULL;
2968     f->lsm_rule = NULL;
2969     - switch(f->type) {
2970     +
2971     + /* Support legacy tests for a valid loginuid */
2972     + if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
2973     + f->type = AUDIT_LOGINUID_SET;
2974     + f->val = 0;
2975     + }
2976     +
2977     + switch (f->type) {
2978     + case AUDIT_LOGINUID_SET:
2979     + if ((f->val != 0) && (f->val != 1))
2980     + goto exit_free;
2981     + if (f->op != Audit_not_equal && f->op != Audit_equal)
2982     + goto exit_free;
2983     + break;
2984     case AUDIT_UID:
2985     case AUDIT_EUID:
2986     case AUDIT_SUID:
2987     @@ -1378,6 +1403,10 @@ static int audit_filter_user_rules(struct audit_krule *rule,
2988     result = audit_uid_comparator(audit_get_loginuid(current),
2989     f->op, f->uid);
2990     break;
2991     + case AUDIT_LOGINUID_SET:
2992     + result = audit_comparator(audit_loginuid_set(current),
2993     + f->op, f->val);
2994     + break;
2995     case AUDIT_SUBJ_USER:
2996     case AUDIT_SUBJ_ROLE:
2997     case AUDIT_SUBJ_TYPE:
2998     diff --git a/kernel/auditsc.c b/kernel/auditsc.c
2999     index a371f85..c4b72b0 100644
3000     --- a/kernel/auditsc.c
3001     +++ b/kernel/auditsc.c
3002     @@ -742,6 +742,9 @@ static int audit_filter_rules(struct task_struct *tsk,
3003     if (ctx)
3004     result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
3005     break;
3006     + case AUDIT_LOGINUID_SET:
3007     + result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
3008     + break;
3009     case AUDIT_SUBJ_USER:
3010     case AUDIT_SUBJ_ROLE:
3011     case AUDIT_SUBJ_TYPE:
3012     @@ -2309,7 +2312,7 @@ int audit_set_loginuid(kuid_t loginuid)
3013     unsigned int sessionid;
3014    
3015     #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
3016     - if (uid_valid(task->loginuid))
3017     + if (audit_loginuid_set(task))
3018     return -EPERM;
3019     #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
3020     if (!capable(CAP_AUDIT_CONTROL))
3021     diff --git a/kernel/kmod.c b/kernel/kmod.c
3022     index 56dd349..8985c87 100644
3023     --- a/kernel/kmod.c
3024     +++ b/kernel/kmod.c
3025     @@ -570,6 +570,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
3026     int retval = 0;
3027    
3028     helper_lock();
3029     + if (!sub_info->path) {
3030     + retval = -EINVAL;
3031     + goto out;
3032     + }
3033     +
3034     if (sub_info->path[0] == '\0')
3035     goto out;
3036    
3037     diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
3038     index e93cca9..6af50ad 100644
3039     --- a/kernel/sched/cputime.c
3040     +++ b/kernel/sched/cputime.c
3041     @@ -521,18 +521,49 @@ EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
3042    
3043     #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
3044    
3045     -static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
3046     +/*
3047     + * Perform (stime * rtime) / total, but avoid multiplication overflow by
3048     + * loosing precision when the numbers are big.
3049     + */
3050     +static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
3051     {
3052     - u64 temp = (__force u64) rtime;
3053     + u64 scaled;
3054    
3055     - temp *= (__force u64) stime;
3056     + for (;;) {
3057     + /* Make sure "rtime" is the bigger of stime/rtime */
3058     + if (stime > rtime) {
3059     + u64 tmp = rtime; rtime = stime; stime = tmp;
3060     + }
3061    
3062     - if (sizeof(cputime_t) == 4)
3063     - temp = div_u64(temp, (__force u32) total);
3064     - else
3065     - temp = div64_u64(temp, (__force u64) total);
3066     + /* Make sure 'total' fits in 32 bits */
3067     + if (total >> 32)
3068     + goto drop_precision;
3069     +
3070     + /* Does rtime (and thus stime) fit in 32 bits? */
3071     + if (!(rtime >> 32))
3072     + break;
3073    
3074     - return (__force cputime_t) temp;
3075     + /* Can we just balance rtime/stime rather than dropping bits? */
3076     + if (stime >> 31)
3077     + goto drop_precision;
3078     +
3079     + /* We can grow stime and shrink rtime and try to make them both fit */
3080     + stime <<= 1;
3081     + rtime >>= 1;
3082     + continue;
3083     +
3084     +drop_precision:
3085     + /* We drop from rtime, it has more bits than stime */
3086     + rtime >>= 1;
3087     + total >>= 1;
3088     + }
3089     +
3090     + /*
3091     + * Make sure gcc understands that this is a 32x32->64 multiply,
3092     + * followed by a 64/32->64 divide.
3093     + */
3094     + scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
3095     + return (__force cputime_t) scaled;
3096     }
3097    
3098     /*
3099     @@ -543,7 +574,7 @@ static void cputime_adjust(struct task_cputime *curr,
3100     struct cputime *prev,
3101     cputime_t *ut, cputime_t *st)
3102     {
3103     - cputime_t rtime, stime, total;
3104     + cputime_t rtime, stime, utime, total;
3105    
3106     stime = curr->stime;
3107     total = stime + curr->utime;
3108     @@ -560,10 +591,22 @@ static void cputime_adjust(struct task_cputime *curr,
3109     */
3110     rtime = nsecs_to_cputime(curr->sum_exec_runtime);
3111    
3112     - if (total)
3113     - stime = scale_stime(stime, rtime, total);
3114     - else
3115     + /*
3116     + * Update userspace visible utime/stime values only if actual execution
3117     + * time is bigger than already exported. Note that can happen, that we
3118     + * provided bigger values due to scaling inaccuracy on big numbers.
3119     + */
3120     + if (prev->stime + prev->utime >= rtime)
3121     + goto out;
3122     +
3123     + if (total) {
3124     + stime = scale_stime((__force u64)stime,
3125     + (__force u64)rtime, (__force u64)total);
3126     + utime = rtime - stime;
3127     + } else {
3128     stime = rtime;
3129     + utime = 0;
3130     + }
3131    
3132     /*
3133     * If the tick based count grows faster than the scheduler one,
3134     @@ -571,8 +614,9 @@ static void cputime_adjust(struct task_cputime *curr,
3135     * Let's enforce monotonicity.
3136     */
3137     prev->stime = max(prev->stime, stime);
3138     - prev->utime = max(prev->utime, rtime - prev->stime);
3139     + prev->utime = max(prev->utime, utime);
3140    
3141     +out:
3142     *ut = prev->utime;
3143     *st = prev->stime;
3144     }
3145     diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
3146     index 24510d8..b696922 100644
3147     --- a/kernel/time/Kconfig
3148     +++ b/kernel/time/Kconfig
3149     @@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
3150     config ARCH_CLOCKSOURCE_DATA
3151     bool
3152    
3153     -# Platforms has a persistent clock
3154     -config ALWAYS_USE_PERSISTENT_CLOCK
3155     - bool
3156     - default n
3157     -
3158     # Timekeeping vsyscall support
3159     config GENERIC_TIME_VSYSCALL
3160     bool
3161     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
3162     index a19a399..e717ad9 100644
3163     --- a/kernel/time/tick-sched.c
3164     +++ b/kernel/time/tick-sched.c
3165     @@ -904,7 +904,7 @@ void tick_cancel_sched_timer(int cpu)
3166     hrtimer_cancel(&ts->sched_timer);
3167     # endif
3168    
3169     - ts->nohz_mode = NOHZ_MODE_INACTIVE;
3170     + memset(ts, 0, sizeof(*ts));
3171     }
3172     #endif
3173    
3174     diff --git a/kernel/timer.c b/kernel/timer.c
3175     index dbf7a78..1b399c8 100644
3176     --- a/kernel/timer.c
3177     +++ b/kernel/timer.c
3178     @@ -1678,12 +1678,12 @@ static int __cpuinit init_timers_cpu(int cpu)
3179     boot_done = 1;
3180     base = &boot_tvec_bases;
3181     }
3182     + spin_lock_init(&base->lock);
3183     tvec_base_done[cpu] = 1;
3184     } else {
3185     base = per_cpu(tvec_bases, cpu);
3186     }
3187    
3188     - spin_lock_init(&base->lock);
3189    
3190     for (j = 0; j < TVN_SIZE; j++) {
3191     INIT_LIST_HEAD(base->tv5.vec + j);
3192     diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
3193     index e5b0ca8..5a8a53e 100644
3194     --- a/kernel/trace/trace_events_filter.c
3195     +++ b/kernel/trace/trace_events_filter.c
3196     @@ -777,7 +777,11 @@ static int filter_set_pred(struct event_filter *filter,
3197    
3198     static void __free_preds(struct event_filter *filter)
3199     {
3200     + int i;
3201     +
3202     if (filter->preds) {
3203     + for (i = 0; i < filter->n_preds; i++)
3204     + kfree(filter->preds[i].ops);
3205     kfree(filter->preds);
3206     filter->preds = NULL;
3207     }
3208     diff --git a/mm/mmap.c b/mm/mmap.c
3209     index e17fc06..0dceed8 100644
3210     --- a/mm/mmap.c
3211     +++ b/mm/mmap.c
3212     @@ -1331,9 +1331,13 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
3213     len = ALIGN(len, huge_page_size(hstate_file(file)));
3214     } else if (flags & MAP_HUGETLB) {
3215     struct user_struct *user = NULL;
3216     + struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
3217     + SHM_HUGE_MASK);
3218    
3219     - len = ALIGN(len, huge_page_size(hstate_sizelog(
3220     - (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)));
3221     + if (!hs)
3222     + return -EINVAL;
3223     +
3224     + len = ALIGN(len, huge_page_size(hs));
3225     /*
3226     * VM_NORESERVE is used because the reservations will be
3227     * taken when vm_ops->mmap() is called
3228     diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
3229     index 19cf81b..63bd98c 100644
3230     --- a/net/8021q/vlan_dev.c
3231     +++ b/net/8021q/vlan_dev.c
3232     @@ -627,7 +627,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
3233     netdev_features_t features)
3234     {
3235     struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
3236     - u32 old_features = features;
3237     + netdev_features_t old_features = features;
3238    
3239     features &= real_dev->vlan_features;
3240     features |= NETIF_F_RXCSUM;
3241     diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
3242     index c3530a8..950663d 100644
3243     --- a/net/bridge/br_stp_timer.c
3244     +++ b/net/bridge/br_stp_timer.c
3245     @@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
3246    
3247     br_debug(br, "tcn timer expired\n");
3248     spin_lock(&br->lock);
3249     - if (br->dev->flags & IFF_UP) {
3250     + if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
3251     br_transmit_tcn(br);
3252    
3253     mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
3254     diff --git a/net/core/dev.c b/net/core/dev.c
3255     index b24ab0e9..9a278e9 100644
3256     --- a/net/core/dev.c
3257     +++ b/net/core/dev.c
3258     @@ -2458,7 +2458,7 @@ EXPORT_SYMBOL(netif_skb_features);
3259     * 2. skb is fragmented and the device does not support SG.
3260     */
3261     static inline int skb_needs_linearize(struct sk_buff *skb,
3262     - int features)
3263     + netdev_features_t features)
3264     {
3265     return skb_is_nonlinear(skb) &&
3266     ((skb_has_frag_list(skb) &&
3267     diff --git a/net/core/ethtool.c b/net/core/ethtool.c
3268     index 3e9b2c3..41f4bdf 100644
3269     --- a/net/core/ethtool.c
3270     +++ b/net/core/ethtool.c
3271     @@ -1416,7 +1416,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
3272     void __user *useraddr = ifr->ifr_data;
3273     u32 ethcmd;
3274     int rc;
3275     - u32 old_features;
3276     + netdev_features_t old_features;
3277    
3278     if (!dev || !netif_device_present(dev))
3279     return -ENODEV;
3280     diff --git a/net/core/sock.c b/net/core/sock.c
3281     index b261a79..1432266 100644
3282     --- a/net/core/sock.c
3283     +++ b/net/core/sock.c
3284     @@ -1209,18 +1209,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
3285     #endif
3286     }
3287    
3288     -/*
3289     - * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
3290     - * un-modified. Special care is taken when initializing object to zero.
3291     - */
3292     -static inline void sk_prot_clear_nulls(struct sock *sk, int size)
3293     -{
3294     - if (offsetof(struct sock, sk_node.next) != 0)
3295     - memset(sk, 0, offsetof(struct sock, sk_node.next));
3296     - memset(&sk->sk_node.pprev, 0,
3297     - size - offsetof(struct sock, sk_node.pprev));
3298     -}
3299     -
3300     void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
3301     {
3302     unsigned long nulls1, nulls2;
3303     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
3304     index f4fd23d..3211914 100644
3305     --- a/net/ipv4/inet_fragment.c
3306     +++ b/net/ipv4/inet_fragment.c
3307     @@ -257,6 +257,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
3308     setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
3309     spin_lock_init(&q->lock);
3310     atomic_set(&q->refcnt, 1);
3311     + INIT_LIST_HEAD(&q->lru_list);
3312    
3313     return q;
3314     }
3315     diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
3316     index b83a49c..2f672e7 100644
3317     --- a/net/ipv4/tcp_minisocks.c
3318     +++ b/net/ipv4/tcp_minisocks.c
3319     @@ -583,8 +583,13 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
3320     *
3321     * Note that even if there is new data in the SYN packet
3322     * they will be thrown away too.
3323     + *
3324     + * Reset timer after retransmitting SYNACK, similar to
3325     + * the idea of fast retransmit in recovery.
3326     */
3327     - inet_rtx_syn_ack(sk, req);
3328     + if (!inet_rtx_syn_ack(sk, req))
3329     + req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
3330     + TCP_RTO_MAX) + jiffies;
3331     return NULL;
3332     }
3333    
3334     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3335     index e4efffe..95d13c7 100644
3336     --- a/net/ipv6/ip6_gre.c
3337     +++ b/net/ipv6/ip6_gre.c
3338     @@ -1135,6 +1135,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
3339     }
3340     if (t == NULL)
3341     t = netdev_priv(dev);
3342     + memset(&p, 0, sizeof(p));
3343     ip6gre_tnl_parm_to_user(&p, &t->parms);
3344     if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
3345     err = -EFAULT;
3346     @@ -1182,6 +1183,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
3347     if (t) {
3348     err = 0;
3349    
3350     + memset(&p, 0, sizeof(p));
3351     ip6gre_tnl_parm_to_user(&p, &t->parms);
3352     if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
3353     err = -EFAULT;
3354     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3355     index 46a5be8..0fce928 100644
3356     --- a/net/ipv6/tcp_ipv6.c
3357     +++ b/net/ipv6/tcp_ipv6.c
3358     @@ -1937,6 +1937,17 @@ void tcp6_proc_exit(struct net *net)
3359     }
3360     #endif
3361    
3362     +static void tcp_v6_clear_sk(struct sock *sk, int size)
3363     +{
3364     + struct inet_sock *inet = inet_sk(sk);
3365     +
3366     + /* we do not want to clear pinet6 field, because of RCU lookups */
3367     + sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
3368     +
3369     + size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
3370     + memset(&inet->pinet6 + 1, 0, size);
3371     +}
3372     +
3373     struct proto tcpv6_prot = {
3374     .name = "TCPv6",
3375     .owner = THIS_MODULE,
3376     @@ -1980,6 +1991,7 @@ struct proto tcpv6_prot = {
3377     #ifdef CONFIG_MEMCG_KMEM
3378     .proto_cgroup = tcp_proto_cgroup,
3379     #endif
3380     + .clear_sk = tcp_v6_clear_sk,
3381     };
3382    
3383     static const struct inet6_protocol tcpv6_protocol = {
3384     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3385     index d8e5e85..27f0f8e 100644
3386     --- a/net/ipv6/udp.c
3387     +++ b/net/ipv6/udp.c
3388     @@ -1422,6 +1422,17 @@ void udp6_proc_exit(struct net *net) {
3389     }
3390     #endif /* CONFIG_PROC_FS */
3391    
3392     +void udp_v6_clear_sk(struct sock *sk, int size)
3393     +{
3394     + struct inet_sock *inet = inet_sk(sk);
3395     +
3396     + /* we do not want to clear pinet6 field, because of RCU lookups */
3397     + sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
3398     +
3399     + size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
3400     + memset(&inet->pinet6 + 1, 0, size);
3401     +}
3402     +
3403     /* ------------------------------------------------------------------------ */
3404    
3405     struct proto udpv6_prot = {
3406     @@ -1452,7 +1463,7 @@ struct proto udpv6_prot = {
3407     .compat_setsockopt = compat_udpv6_setsockopt,
3408     .compat_getsockopt = compat_udpv6_getsockopt,
3409     #endif
3410     - .clear_sk = sk_prot_clear_portaddr_nulls,
3411     + .clear_sk = udp_v6_clear_sk,
3412     };
3413    
3414     static struct inet_protosw udpv6_protosw = {
3415     diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
3416     index d757104..4691ed5 100644
3417     --- a/net/ipv6/udp_impl.h
3418     +++ b/net/ipv6/udp_impl.h
3419     @@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
3420     extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
3421     extern void udpv6_destroy_sock(struct sock *sk);
3422    
3423     +extern void udp_v6_clear_sk(struct sock *sk, int size);
3424     +
3425     #ifdef CONFIG_PROC_FS
3426     extern int udp6_seq_show(struct seq_file *seq, void *v);
3427     #endif
3428     diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
3429     index 1d08e21..dfcc4be 100644
3430     --- a/net/ipv6/udplite.c
3431     +++ b/net/ipv6/udplite.c
3432     @@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
3433     .compat_setsockopt = compat_udpv6_setsockopt,
3434     .compat_getsockopt = compat_udpv6_getsockopt,
3435     #endif
3436     - .clear_sk = sk_prot_clear_portaddr_nulls,
3437     + .clear_sk = udp_v6_clear_sk,
3438     };
3439    
3440     static struct inet_protosw udplite6_protosw = {
3441     diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
3442     index 4ef7bdb..23ed03d 100644
3443     --- a/net/ipv6/xfrm6_policy.c
3444     +++ b/net/ipv6/xfrm6_policy.c
3445     @@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
3446     dev_hold(dev);
3447    
3448     xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
3449     - if (!xdst->u.rt6.rt6i_idev)
3450     + if (!xdst->u.rt6.rt6i_idev) {
3451     + dev_put(dev);
3452     return -ENODEV;
3453     + }
3454    
3455     rt6_transfer_peer(&xdst->u.rt6, rt);
3456    
3457     diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
3458     index a4dcaf1..703c121 100644
3459     --- a/net/mac802154/mac802154.h
3460     +++ b/net/mac802154/mac802154.h
3461     @@ -90,7 +90,7 @@ struct mac802154_sub_if_data {
3462    
3463     #define MAC802154_MAX_XMIT_ATTEMPTS 3
3464    
3465     -#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
3466     +#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
3467    
3468     extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
3469     extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
3470     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3471     index 1d6793d..f83e172 100644
3472     --- a/net/packet/af_packet.c
3473     +++ b/net/packet/af_packet.c
3474     @@ -693,36 +693,33 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
3475    
3476     smp_rmb();
3477    
3478     - if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
3479     + /* We could have just memset this but we will lose the
3480     + * flexibility of making the priv area sticky
3481     + */
3482    
3483     - /* We could have just memset this but we will lose the
3484     - * flexibility of making the priv area sticky
3485     - */
3486     - BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
3487     - BLOCK_NUM_PKTS(pbd1) = 0;
3488     - BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3489     - getnstimeofday(&ts);
3490     - h1->ts_first_pkt.ts_sec = ts.tv_sec;
3491     - h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
3492     - pkc1->pkblk_start = (char *)pbd1;
3493     - pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3494     - BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3495     - BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
3496     - pbd1->version = pkc1->version;
3497     - pkc1->prev = pkc1->nxt_offset;
3498     - pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
3499     - prb_thaw_queue(pkc1);
3500     - _prb_refresh_rx_retire_blk_timer(pkc1);
3501     + BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
3502     + BLOCK_NUM_PKTS(pbd1) = 0;
3503     + BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3504    
3505     - smp_wmb();
3506     + getnstimeofday(&ts);
3507    
3508     - return;
3509     - }
3510     + h1->ts_first_pkt.ts_sec = ts.tv_sec;
3511     + h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
3512    
3513     - WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
3514     - pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
3515     - dump_stack();
3516     - BUG();
3517     + pkc1->pkblk_start = (char *)pbd1;
3518     + pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3519     +
3520     + BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
3521     + BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
3522     +
3523     + pbd1->version = pkc1->version;
3524     + pkc1->prev = pkc1->nxt_offset;
3525     + pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
3526     +
3527     + prb_thaw_queue(pkc1);
3528     + _prb_refresh_rx_retire_blk_timer(pkc1);
3529     +
3530     + smp_wmb();
3531     }
3532    
3533     /*
3534     @@ -813,10 +810,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
3535     prb_close_block(pkc, pbd, po, status);
3536     return;
3537     }
3538     -
3539     - WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
3540     - dump_stack();
3541     - BUG();
3542     }
3543    
3544     static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
3545     diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
3546     index e0f6de6..60d88b6 100644
3547     --- a/net/sched/act_ipt.c
3548     +++ b/net/sched/act_ipt.c
3549     @@ -8,7 +8,7 @@
3550     * as published by the Free Software Foundation; either version
3551     * 2 of the License, or (at your option) any later version.
3552     *
3553     - * Copyright: Jamal Hadi Salim (2002-4)
3554     + * Copyright: Jamal Hadi Salim (2002-13)
3555     */
3556    
3557     #include <linux/types.h>
3558     @@ -303,17 +303,44 @@ static struct tc_action_ops act_ipt_ops = {
3559     .walk = tcf_generic_walker
3560     };
3561    
3562     -MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
3563     +static struct tc_action_ops act_xt_ops = {
3564     + .kind = "xt",
3565     + .hinfo = &ipt_hash_info,
3566     + .type = TCA_ACT_IPT,
3567     + .capab = TCA_CAP_NONE,
3568     + .owner = THIS_MODULE,
3569     + .act = tcf_ipt,
3570     + .dump = tcf_ipt_dump,
3571     + .cleanup = tcf_ipt_cleanup,
3572     + .lookup = tcf_hash_search,
3573     + .init = tcf_ipt_init,
3574     + .walk = tcf_generic_walker
3575     +};
3576     +
3577     +MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
3578     MODULE_DESCRIPTION("Iptables target actions");
3579     MODULE_LICENSE("GPL");
3580     +MODULE_ALIAS("act_xt");
3581    
3582     static int __init ipt_init_module(void)
3583     {
3584     - return tcf_register_action(&act_ipt_ops);
3585     + int ret1, ret2;
3586     + ret1 = tcf_register_action(&act_xt_ops);
3587     + if (ret1 < 0)
3588     + printk("Failed to load xt action\n");
3589     + ret2 = tcf_register_action(&act_ipt_ops);
3590     + if (ret2 < 0)
3591     + printk("Failed to load ipt action\n");
3592     +
3593     + if (ret1 < 0 && ret2 < 0)
3594     + return ret1;
3595     + else
3596     + return 0;
3597     }
3598    
3599     static void __exit ipt_cleanup_module(void)
3600     {
3601     + tcf_unregister_action(&act_xt_ops);
3602     tcf_unregister_action(&act_ipt_ops);
3603     }
3604    
3605     diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3606     index 7f93e2a..2e330e8 100644
3607     --- a/net/vmw_vsock/af_vsock.c
3608     +++ b/net/vmw_vsock/af_vsock.c
3609     @@ -165,7 +165,7 @@ static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
3610     static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
3611     static DEFINE_SPINLOCK(vsock_table_lock);
3612    
3613     -static __init void vsock_init_tables(void)
3614     +static void vsock_init_tables(void)
3615     {
3616     int i;
3617    
3618     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
3619     index 4aba764..c414cdd 100644
3620     --- a/sound/pci/hda/hda_codec.c
3621     +++ b/sound/pci/hda/hda_codec.c
3622     @@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
3623     struct hda_bus_unsolicited *unsol;
3624     unsigned int wp;
3625    
3626     + if (!bus || !bus->workq)
3627     + return 0;
3628     +
3629     trace_hda_unsol_event(bus, res, res_ex);
3630     unsol = bus->unsol;
3631     if (!unsol)
3632     @@ -1577,7 +1580,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
3633     "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
3634     nid, stream_tag, channel_id, format);
3635     p = get_hda_cvt_setup(codec, nid);
3636     - if (!p || p->active)
3637     + if (!p)
3638     return;
3639    
3640     if (codec->pcm_format_first)
3641     @@ -1624,7 +1627,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
3642    
3643     snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
3644     p = get_hda_cvt_setup(codec, nid);
3645     - if (p && p->active) {
3646     + if (p) {
3647     /* here we just clear the active flag when do_now isn't set;
3648     * actual clean-ups will be done later in
3649     * purify_inactive_streams() called from snd_hda_codec_prpapre()
3650     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3651     index 2a89d1ee..1e5a30f 100644
3652     --- a/sound/pci/hda/patch_conexant.c
3653     +++ b/sound/pci/hda/patch_conexant.c
3654     @@ -64,6 +64,7 @@ struct conexant_spec {
3655     /* extra EAPD pins */
3656     unsigned int num_eapds;
3657     hda_nid_t eapds[4];
3658     + bool dynamic_eapd;
3659    
3660     #ifdef ENABLE_CXT_STATIC_QUIRKS
3661     const struct snd_kcontrol_new *mixers[5];
3662     @@ -3152,7 +3153,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
3663     * thus it might control over all pins.
3664     */
3665     if (spec->num_eapds > 2)
3666     - spec->gen.own_eapd_ctl = 1;
3667     + spec->dynamic_eapd = 1;
3668     }
3669    
3670     static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
3671     @@ -3191,6 +3192,15 @@ static int cx_auto_build_controls(struct hda_codec *codec)
3672     return 0;
3673     }
3674    
3675     +static int cx_auto_init(struct hda_codec *codec)
3676     +{
3677     + struct conexant_spec *spec = codec->spec;
3678     + snd_hda_gen_init(codec);
3679     + if (!spec->dynamic_eapd)
3680     + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
3681     + return 0;
3682     +}
3683     +
3684     static void cx_auto_free(struct hda_codec *codec)
3685     {
3686     snd_hda_detach_beep_device(codec);
3687     @@ -3200,7 +3210,7 @@ static void cx_auto_free(struct hda_codec *codec)
3688     static const struct hda_codec_ops cx_auto_patch_ops = {
3689     .build_controls = cx_auto_build_controls,
3690     .build_pcms = snd_hda_gen_build_pcms,
3691     - .init = snd_hda_gen_init,
3692     + .init = cx_auto_init,
3693     .free = cx_auto_free,
3694     .unsol_event = snd_hda_jack_unsol_event,
3695     #ifdef CONFIG_PM
3696     @@ -3350,7 +3360,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
3697    
3698     cx_auto_parse_beep(codec);
3699     cx_auto_parse_eapd(codec);
3700     - if (spec->gen.own_eapd_ctl)
3701     + spec->gen.own_eapd_ctl = 1;
3702     + if (spec->dynamic_eapd)
3703     spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
3704    
3705     switch (codec->vendor_id) {
3706     diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
3707     index 41230ad..4a6f1da 100644
3708     --- a/sound/soc/codecs/da7213.c
3709     +++ b/sound/soc/codecs/da7213.c
3710     @@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
3711     DA7213_DMIC_DATA_SEL_SHIFT);
3712     break;
3713     }
3714     - switch (pdata->dmic_data_sel) {
3715     + switch (pdata->dmic_samplephase) {
3716     case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
3717     case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
3718     - dmic_cfg |= (pdata->dmic_data_sel <<
3719     + dmic_cfg |= (pdata->dmic_samplephase <<
3720     DA7213_DMIC_SAMPLEPHASE_SHIFT);
3721     break;
3722     }
3723     - switch (pdata->dmic_data_sel) {
3724     + switch (pdata->dmic_clk_rate) {
3725     case DA7213_DMIC_CLK_3_0MHZ:
3726     case DA7213_DMIC_CLK_1_5MHZ:
3727     - dmic_cfg |= (pdata->dmic_data_sel <<
3728     + dmic_cfg |= (pdata->dmic_clk_rate <<
3729     DA7213_DMIC_CLK_RATE_SHIFT);
3730     break;
3731     }
3732     diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
3733     index c9bd445..e5f96c9 100644
3734     --- a/sound/soc/codecs/wm8994.c
3735     +++ b/sound/soc/codecs/wm8994.c
3736     @@ -2841,6 +2841,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
3737     default:
3738     return 0;
3739     }
3740     + break;
3741     default:
3742     return 0;
3743     }