Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.12/0114-3.12.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2429 - (hide annotations) (download)
Tue May 13 11:02:30 2014 UTC (10 years, 1 month ago) by niro
File size: 246656 byte(s)
-linux-3.12.15
1 niro 2429 diff --git a/Makefile b/Makefile
2     index 5d38a5a79b3a..517391a3093e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 12
8     -SUBLEVEL = 14
9     +SUBLEVEL = 15
10     EXTRAVERSION =
11     NAME = One Giant Leap for Frogkind
12    
13     diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
14     index f33679d2d3ee..50e1d850ee2e 100644
15     --- a/arch/arm/mach-sa1100/include/mach/collie.h
16     +++ b/arch/arm/mach-sa1100/include/mach/collie.h
17     @@ -13,6 +13,8 @@
18     #ifndef __ASM_ARCH_COLLIE_H
19     #define __ASM_ARCH_COLLIE_H
20    
21     +#include "hardware.h" /* Gives GPIO_MAX */
22     +
23     extern void locomolcd_power(int on);
24    
25     #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
26     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
27     index 0b27b6574296..965c28ff7b3b 100644
28     --- a/arch/arm64/include/asm/pgtable.h
29     +++ b/arch/arm64/include/asm/pgtable.h
30     @@ -136,10 +136,10 @@ extern struct page *empty_zero_page;
31     /*
32     * The following only work if pte_present(). Undefined behaviour otherwise.
33     */
34     -#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
35     -#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
36     -#define pte_young(pte) (pte_val(pte) & PTE_AF)
37     -#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
38     +#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
39     +#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
40     +#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
41     +#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
42     #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
43     #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
44    
45     diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
46     index e0331414c7d6..86479bbf4714 100644
47     --- a/arch/mips/include/asm/mipsregs.h
48     +++ b/arch/mips/include/asm/mipsregs.h
49     @@ -14,6 +14,7 @@
50     #define _ASM_MIPSREGS_H
51    
52     #include <linux/linkage.h>
53     +#include <linux/types.h>
54     #include <asm/hazards.h>
55     #include <asm/war.h>
56    
57     diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
58     index 599545738af3..c2dcfaa51987 100644
59     --- a/arch/powerpc/include/asm/ppc_asm.h
60     +++ b/arch/powerpc/include/asm/ppc_asm.h
61     @@ -478,13 +478,6 @@ BEGIN_FTR_SECTION_NESTED(945) \
62     std ra,TASKTHREADPPR(rb); \
63     END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
64    
65     -#define RESTORE_PPR(ra, rb) \
66     -BEGIN_FTR_SECTION_NESTED(946) \
67     - ld ra,PACACURRENT(r13); \
68     - ld rb,TASKTHREADPPR(ra); \
69     - mtspr SPRN_PPR,rb; /* Restore PPR */ \
70     -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
71     -
72     #endif
73    
74     /*
75     diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
76     index c04cdf70d487..7be37170fda7 100644
77     --- a/arch/powerpc/kernel/entry_64.S
78     +++ b/arch/powerpc/kernel/entry_64.S
79     @@ -820,6 +820,12 @@ fast_exception_return:
80     andi. r0,r3,MSR_RI
81     beq- unrecov_restore
82    
83     + /* Load PPR from thread struct before we clear MSR:RI */
84     +BEGIN_FTR_SECTION
85     + ld r2,PACACURRENT(r13)
86     + ld r2,TASKTHREADPPR(r2)
87     +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
88     +
89     /*
90     * Clear RI before restoring r13. If we are returning to
91     * userspace and we take an exception after restoring r13,
92     @@ -840,8 +846,10 @@ fast_exception_return:
93     */
94     andi. r0,r3,MSR_PR
95     beq 1f
96     +BEGIN_FTR_SECTION
97     + mtspr SPRN_PPR,r2 /* Restore PPR */
98     +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
99     ACCOUNT_CPU_USER_EXIT(r2, r4)
100     - RESTORE_PPR(r2, r4)
101     REST_GPR(13, r1)
102     1:
103     mtspr SPRN_SRR1,r3
104     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
105     index 96d2fdf3aa9e..aa75b2beba7d 100644
106     --- a/arch/powerpc/kernel/process.c
107     +++ b/arch/powerpc/kernel/process.c
108     @@ -928,6 +928,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
109     flush_altivec_to_thread(src);
110     flush_vsx_to_thread(src);
111     flush_spe_to_thread(src);
112     + /*
113     + * Flush TM state out so we can copy it. __switch_to_tm() does this
114     + * flush but it removes the checkpointed state from the current CPU and
115     + * transitions the CPU out of TM mode. Hence we need to call
116     + * tm_recheckpoint_new_task() (on the same task) to restore the
117     + * checkpointed state back and the TM mode.
118     + */
119     + __switch_to_tm(src);
120     + tm_recheckpoint_new_task(src);
121    
122     *dst = *src;
123    
124     diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
125     index b47a0e1ab001..c712ecec13ba 100644
126     --- a/arch/powerpc/kernel/reloc_64.S
127     +++ b/arch/powerpc/kernel/reloc_64.S
128     @@ -81,6 +81,7 @@ _GLOBAL(relocate)
129    
130     6: blr
131    
132     +.balign 8
133     p_dyn: .llong __dynamic_start - 0b
134     p_rela: .llong __rela_dyn_start - 0b
135     p_st: .llong _stext - 0b
136     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
137     index 7143793859fa..3e01afa21710 100644
138     --- a/arch/s390/Kconfig
139     +++ b/arch/s390/Kconfig
140     @@ -100,7 +100,7 @@ config S390
141     select GENERIC_CLOCKEVENTS
142     select GENERIC_CPU_DEVICES if !SMP
143     select GENERIC_SMP_IDLE_THREAD
144     - select GENERIC_TIME_VSYSCALL_OLD
145     + select GENERIC_TIME_VSYSCALL
146     select HAVE_ALIGNED_STRUCT_PAGE if SLUB
147     select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
148     select HAVE_ARCH_SECCOMP_FILTER
149     diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
150     index 87a22092b68f..6c0281f30d44 100644
151     --- a/arch/s390/appldata/appldata_base.c
152     +++ b/arch/s390/appldata/appldata_base.c
153     @@ -527,6 +527,7 @@ static int __init appldata_init(void)
154     {
155     int rc;
156    
157     + init_virt_timer(&appldata_timer);
158     appldata_timer.function = appldata_timer_function;
159     appldata_timer.data = (unsigned long) &appldata_work;
160    
161     diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
162     index a73eb2e1e918..bc9746a7d47c 100644
163     --- a/arch/s390/include/asm/vdso.h
164     +++ b/arch/s390/include/asm/vdso.h
165     @@ -26,8 +26,9 @@ struct vdso_data {
166     __u64 wtom_clock_nsec; /* 0x28 */
167     __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
168     __u32 tz_dsttime; /* Type of dst correction 0x34 */
169     - __u32 ectg_available;
170     - __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
171     + __u32 ectg_available; /* ECTG instruction present 0x38 */
172     + __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
173     + __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
174     };
175    
176     struct vdso_per_cpu_data {
177     diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
178     index 2416138ebd3e..496116cd65ec 100644
179     --- a/arch/s390/kernel/asm-offsets.c
180     +++ b/arch/s390/kernel/asm-offsets.c
181     @@ -65,7 +65,8 @@ int main(void)
182     DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
183     DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
184     DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
185     - DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
186     + DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
187     + DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
188     DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
189     DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
190     /* constants used by the vdso */
191     diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
192     index 064c3082ab33..dd95f1631621 100644
193     --- a/arch/s390/kernel/time.c
194     +++ b/arch/s390/kernel/time.c
195     @@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
196     set_clock_comparator(S390_lowcore.clock_comparator);
197     }
198    
199     -static int s390_next_ktime(ktime_t expires,
200     +static int s390_next_event(unsigned long delta,
201     struct clock_event_device *evt)
202     {
203     - struct timespec ts;
204     - u64 nsecs;
205     -
206     - ts.tv_sec = ts.tv_nsec = 0;
207     - monotonic_to_bootbased(&ts);
208     - nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
209     - do_div(nsecs, 125);
210     - S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
211     - /* Program the maximum value if we have an overflow (== year 2042) */
212     - if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
213     - S390_lowcore.clock_comparator = -1ULL;
214     + S390_lowcore.clock_comparator = get_tod_clock() + delta;
215     set_clock_comparator(S390_lowcore.clock_comparator);
216     return 0;
217     }
218     @@ -146,15 +136,14 @@ void init_cpu_timer(void)
219     cpu = smp_processor_id();
220     cd = &per_cpu(comparators, cpu);
221     cd->name = "comparator";
222     - cd->features = CLOCK_EVT_FEAT_ONESHOT |
223     - CLOCK_EVT_FEAT_KTIME;
224     + cd->features = CLOCK_EVT_FEAT_ONESHOT;
225     cd->mult = 16777;
226     cd->shift = 12;
227     cd->min_delta_ns = 1;
228     cd->max_delta_ns = LONG_MAX;
229     cd->rating = 400;
230     cd->cpumask = cpumask_of(cpu);
231     - cd->set_next_ktime = s390_next_ktime;
232     + cd->set_next_event = s390_next_event;
233     cd->set_mode = s390_set_mode;
234    
235     clockevents_register_device(cd);
236     @@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
237     return &clocksource_tod;
238     }
239    
240     -void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
241     - struct clocksource *clock, u32 mult)
242     +void update_vsyscall(struct timekeeper *tk)
243     {
244     - if (clock != &clocksource_tod)
245     + u64 nsecps;
246     +
247     + if (tk->clock != &clocksource_tod)
248     return;
249    
250     /* Make userspace gettimeofday spin until we're done. */
251     ++vdso_data->tb_update_count;
252     smp_wmb();
253     - vdso_data->xtime_tod_stamp = clock->cycle_last;
254     - vdso_data->xtime_clock_sec = wall_time->tv_sec;
255     - vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
256     - vdso_data->wtom_clock_sec = wtm->tv_sec;
257     - vdso_data->wtom_clock_nsec = wtm->tv_nsec;
258     - vdso_data->ntp_mult = mult;
259     + vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
260     + vdso_data->xtime_clock_sec = tk->xtime_sec;
261     + vdso_data->xtime_clock_nsec = tk->xtime_nsec;
262     + vdso_data->wtom_clock_sec =
263     + tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
264     + vdso_data->wtom_clock_nsec = tk->xtime_nsec +
265     + + (tk->wall_to_monotonic.tv_nsec << tk->shift);
266     + nsecps = (u64) NSEC_PER_SEC << tk->shift;
267     + while (vdso_data->wtom_clock_nsec >= nsecps) {
268     + vdso_data->wtom_clock_nsec -= nsecps;
269     + vdso_data->wtom_clock_sec++;
270     + }
271     + vdso_data->tk_mult = tk->mult;
272     + vdso_data->tk_shift = tk->shift;
273     smp_wmb();
274     ++vdso_data->tb_update_count;
275     }
276     diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
277     index b2224e0b974c..5be8e472f57d 100644
278     --- a/arch/s390/kernel/vdso32/clock_gettime.S
279     +++ b/arch/s390/kernel/vdso32/clock_gettime.S
280     @@ -38,25 +38,26 @@ __kernel_clock_gettime:
281     sl %r1,__VDSO_XTIME_STAMP+4(%r5)
282     brc 3,2f
283     ahi %r0,-1
284     -2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
285     +2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
286     lr %r2,%r0
287     - l %r0,__VDSO_NTP_MULT(%r5)
288     + l %r0,__VDSO_TK_MULT(%r5)
289     ltr %r1,%r1
290     mr %r0,%r0
291     jnm 3f
292     - a %r0,__VDSO_NTP_MULT(%r5)
293     + a %r0,__VDSO_TK_MULT(%r5)
294     3: alr %r0,%r2
295     - srdl %r0,12
296     - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
297     + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
298     al %r1,__VDSO_XTIME_NSEC+4(%r5)
299     brc 12,4f
300     ahi %r0,1
301     -4: l %r2,__VDSO_XTIME_SEC+4(%r5)
302     - al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
303     +4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
304     al %r1,__VDSO_WTOM_NSEC+4(%r5)
305     brc 12,5f
306     ahi %r0,1
307     -5: al %r2,__VDSO_WTOM_SEC+4(%r5)
308     +5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
309     + srdl %r0,0(%r2) /* >> tk->shift */
310     + l %r2,__VDSO_XTIME_SEC+4(%r5)
311     + al %r2,__VDSO_WTOM_SEC+4(%r5)
312     cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
313     jne 1b
314     basr %r5,0
315     @@ -86,20 +87,21 @@ __kernel_clock_gettime:
316     sl %r1,__VDSO_XTIME_STAMP+4(%r5)
317     brc 3,12f
318     ahi %r0,-1
319     -12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
320     +12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
321     lr %r2,%r0
322     - l %r0,__VDSO_NTP_MULT(%r5)
323     + l %r0,__VDSO_TK_MULT(%r5)
324     ltr %r1,%r1
325     mr %r0,%r0
326     jnm 13f
327     - a %r0,__VDSO_NTP_MULT(%r5)
328     + a %r0,__VDSO_TK_MULT(%r5)
329     13: alr %r0,%r2
330     - srdl %r0,12
331     - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
332     + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
333     al %r1,__VDSO_XTIME_NSEC+4(%r5)
334     brc 12,14f
335     ahi %r0,1
336     -14: l %r2,__VDSO_XTIME_SEC+4(%r5)
337     +14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
338     + srdl %r0,0(%r2) /* >> tk->shift */
339     + l %r2,__VDSO_XTIME_SEC+4(%r5)
340     cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
341     jne 11b
342     basr %r5,0
343     diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
344     index 2d3633175e3b..fd621a950f7c 100644
345     --- a/arch/s390/kernel/vdso32/gettimeofday.S
346     +++ b/arch/s390/kernel/vdso32/gettimeofday.S
347     @@ -35,15 +35,14 @@ __kernel_gettimeofday:
348     sl %r1,__VDSO_XTIME_STAMP+4(%r5)
349     brc 3,3f
350     ahi %r0,-1
351     -3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
352     +3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
353     st %r0,24(%r15)
354     - l %r0,__VDSO_NTP_MULT(%r5)
355     + l %r0,__VDSO_TK_MULT(%r5)
356     ltr %r1,%r1
357     mr %r0,%r0
358     jnm 4f
359     - a %r0,__VDSO_NTP_MULT(%r5)
360     + a %r0,__VDSO_TK_MULT(%r5)
361     4: al %r0,24(%r15)
362     - srdl %r0,12
363     al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
364     al %r1,__VDSO_XTIME_NSEC+4(%r5)
365     brc 12,5f
366     @@ -51,6 +50,8 @@ __kernel_gettimeofday:
367     5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
368     cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
369     jne 1b
370     + l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
371     + srdl %r0,0(%r4) /* >> tk->shift */
372     l %r4,24(%r15) /* get tv_sec from stack */
373     basr %r5,0
374     6: ltr %r0,%r0
375     diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
376     index d46c95ed5f19..0add1072ba30 100644
377     --- a/arch/s390/kernel/vdso64/clock_gettime.S
378     +++ b/arch/s390/kernel/vdso64/clock_gettime.S
379     @@ -34,14 +34,15 @@ __kernel_clock_gettime:
380     tmll %r4,0x0001 /* pending update ? loop */
381     jnz 0b
382     stck 48(%r15) /* Store TOD clock */
383     + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
384     + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
385     + alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
386     lg %r1,48(%r15)
387     sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
388     - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
389     - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
390     - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
391     - lg %r0,__VDSO_XTIME_SEC(%r5)
392     - alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
393     - alg %r0,__VDSO_WTOM_SEC(%r5)
394     + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
395     + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
396     + alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
397     + srlg %r1,%r1,0(%r2) /* >> tk->shift */
398     clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
399     jne 0b
400     larl %r5,13f
401     @@ -62,12 +63,13 @@ __kernel_clock_gettime:
402     tmll %r4,0x0001 /* pending update ? loop */
403     jnz 5b
404     stck 48(%r15) /* Store TOD clock */
405     + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
406     lg %r1,48(%r15)
407     sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
408     - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
409     - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
410     - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
411     - lg %r0,__VDSO_XTIME_SEC(%r5)
412     + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
413     + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
414     + srlg %r1,%r1,0(%r2) /* >> tk->shift */
415     + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
416     clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
417     jne 5b
418     larl %r5,13f
419     diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
420     index 36ee674722ec..d0860d1d0ccc 100644
421     --- a/arch/s390/kernel/vdso64/gettimeofday.S
422     +++ b/arch/s390/kernel/vdso64/gettimeofday.S
423     @@ -31,12 +31,13 @@ __kernel_gettimeofday:
424     stck 48(%r15) /* Store TOD clock */
425     lg %r1,48(%r15)
426     sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
427     - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
428     - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
429     - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
430     - lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
431     + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
432     + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
433     + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
434     clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
435     jne 0b
436     + lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
437     + srlg %r1,%r1,0(%r5) /* >> tk->shift */
438     larl %r5,5f
439     2: clg %r1,0(%r5)
440     jl 3f
441     diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
442     index 2c37aadcbc35..32ce71375b21 100644
443     --- a/arch/x86/include/asm/kdebug.h
444     +++ b/arch/x86/include/asm/kdebug.h
445     @@ -21,7 +21,7 @@ enum die_val {
446     DIE_NMIUNKNOWN,
447     };
448    
449     -extern void printk_address(unsigned long address, int reliable);
450     +extern void printk_address(unsigned long address);
451     extern void die(const char *, struct pt_regs *,long);
452     extern int __must_check __die(const char *, struct pt_regs *, long);
453     extern void show_trace(struct task_struct *t, struct pt_regs *regs,
454     diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
455     index deb6421c9e69..d9c12d3022a7 100644
456     --- a/arch/x86/kernel/dumpstack.c
457     +++ b/arch/x86/kernel/dumpstack.c
458     @@ -25,12 +25,17 @@ unsigned int code_bytes = 64;
459     int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
460     static int die_counter;
461    
462     -void printk_address(unsigned long address, int reliable)
463     +static void printk_stack_address(unsigned long address, int reliable)
464     {
465     pr_cont(" [<%p>] %s%pB\n",
466     (void *)address, reliable ? "" : "? ", (void *)address);
467     }
468    
469     +void printk_address(unsigned long address)
470     +{
471     + pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
472     +}
473     +
474     #ifdef CONFIG_FUNCTION_GRAPH_TRACER
475     static void
476     print_ftrace_graph_addr(unsigned long addr, void *data,
477     @@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
478     {
479     touch_nmi_watchdog();
480     printk(data);
481     - printk_address(addr, reliable);
482     + printk_stack_address(addr, reliable);
483     }
484    
485     static const struct stacktrace_ops print_trace_ops = {
486     @@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
487     #else
488     /* Executive summary in case the oops scrolled away */
489     printk(KERN_ALERT "RIP ");
490     - printk_address(regs->ip, 1);
491     + printk_address(regs->ip);
492     printk(" RSP <%016lx>\n", regs->sp);
493     #endif
494     return 0;
495     diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
496     index 81ba27679f18..f36bd42d6f0c 100644
497     --- a/arch/x86/kernel/head_32.S
498     +++ b/arch/x86/kernel/head_32.S
499     @@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
500     /* This is global to keep gas from relaxing the jumps */
501     ENTRY(early_idt_handler)
502     cld
503     +
504     + cmpl $2,(%esp) # X86_TRAP_NMI
505     + je is_nmi # Ignore NMI
506     +
507     cmpl $2,%ss:early_recursion_flag
508     je hlt_loop
509     incl %ss:early_recursion_flag
510     @@ -594,8 +598,9 @@ ex_entry:
511     pop %edx
512     pop %ecx
513     pop %eax
514     - addl $8,%esp /* drop vector number and error code */
515     decl %ss:early_recursion_flag
516     +is_nmi:
517     + addl $8,%esp /* drop vector number and error code */
518     iret
519     ENDPROC(early_idt_handler)
520    
521     diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
522     index e1aabdb314c8..a468c0a65c42 100644
523     --- a/arch/x86/kernel/head_64.S
524     +++ b/arch/x86/kernel/head_64.S
525     @@ -343,6 +343,9 @@ early_idt_handlers:
526     ENTRY(early_idt_handler)
527     cld
528    
529     + cmpl $2,(%rsp) # X86_TRAP_NMI
530     + je is_nmi # Ignore NMI
531     +
532     cmpl $2,early_recursion_flag(%rip)
533     jz 1f
534     incl early_recursion_flag(%rip)
535     @@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
536     popq %rdx
537     popq %rcx
538     popq %rax
539     - addq $16,%rsp # drop vector number and error code
540     decl early_recursion_flag(%rip)
541     +is_nmi:
542     + addq $16,%rsp # drop vector number and error code
543     INTERRUPT_RETURN
544     ENDPROC(early_idt_handler)
545    
546     diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
547     index 5d576ab34403..21935afebe19 100644
548     --- a/arch/x86/kernel/i387.c
549     +++ b/arch/x86/kernel/i387.c
550     @@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
551    
552     void __kernel_fpu_end(void)
553     {
554     - if (use_eager_fpu())
555     - math_state_restore();
556     - else
557     + if (use_eager_fpu()) {
558     + /*
559     + * For eager fpu, most the time, tsk_used_math() is true.
560     + * Restore the user math as we are done with the kernel usage.
561     + * At few instances during thread exit, signal handling etc,
562     + * tsk_used_math() is false. Those few places will take proper
563     + * actions, so we don't need to restore the math here.
564     + */
565     + if (likely(tsk_used_math(current)))
566     + math_state_restore();
567     + } else {
568     stts();
569     + }
570     }
571     EXPORT_SYMBOL(__kernel_fpu_end);
572    
573     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
574     index bb1dc51bab05..8e9fe8dfd37b 100644
575     --- a/arch/x86/kernel/process_64.c
576     +++ b/arch/x86/kernel/process_64.c
577     @@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all)
578     unsigned int ds, cs, es;
579    
580     printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
581     - printk_address(regs->ip, 1);
582     + printk_address(regs->ip);
583     printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
584     regs->sp, regs->flags);
585     printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
586     diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
587     index 04ee1e2e4c02..52dbf1e400dc 100644
588     --- a/arch/x86/kernel/quirks.c
589     +++ b/arch/x86/kernel/quirks.c
590     @@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
591     return;
592    
593     pci_read_config_dword(nb_ht, 0x60, &val);
594     - node = val & 7;
595     + node = pcibus_to_node(dev->bus) | (val & 7);
596     /*
597     * Some hardware may return an invalid node ID,
598     * so check it first:
599     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
600     index c0bc80391e40..612c717747dd 100644
601     --- a/arch/x86/kvm/svm.c
602     +++ b/arch/x86/kvm/svm.c
603     @@ -2993,10 +2993,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
604     u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
605     /* instruction emulation calls kvm_set_cr8() */
606     r = cr_interception(svm);
607     - if (irqchip_in_kernel(svm->vcpu.kvm)) {
608     - clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
609     + if (irqchip_in_kernel(svm->vcpu.kvm))
610     return r;
611     - }
612     if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
613     return r;
614     kvm_run->exit_reason = KVM_EXIT_SET_TPR;
615     @@ -3558,6 +3556,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
616     if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
617     return;
618    
619     + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
620     +
621     if (irr == -1)
622     return;
623    
624     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
625     index d8b1ff68dbb9..5b90bbcad9f6 100644
626     --- a/arch/x86/mm/fault.c
627     +++ b/arch/x86/mm/fault.c
628     @@ -596,7 +596,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
629    
630     printk(KERN_CONT " at %p\n", (void *) address);
631     printk(KERN_ALERT "IP:");
632     - printk_address(regs->ip, 1);
633     + printk_address(regs->ip);
634    
635     dump_pagetable(address);
636     }
637     diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
638     index 9515f18898b2..f37dec579712 100644
639     --- a/drivers/acpi/blacklist.c
640     +++ b/drivers/acpi/blacklist.c
641     @@ -297,6 +297,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
642     DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
643     },
644     },
645     + {
646     + .callback = dmi_disable_osi_win8,
647     + .ident = "ThinkPad Edge E530",
648     + .matches = {
649     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
650     + DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
651     + },
652     + },
653     + {
654     + .callback = dmi_disable_osi_win8,
655     + .ident = "ThinkPad Edge E530",
656     + .matches = {
657     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
658     + DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
659     + },
660     + },
661     + {
662     + .callback = dmi_disable_osi_win8,
663     + .ident = "Acer Aspire V5-573G",
664     + .matches = {
665     + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
666     + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
667     + },
668     + },
669     + {
670     + .callback = dmi_disable_osi_win8,
671     + .ident = "Acer Aspire V5-572G",
672     + .matches = {
673     + DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
674     + DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
675     + },
676     + },
677     + {
678     + .callback = dmi_disable_osi_win8,
679     + .ident = "ThinkPad T431s",
680     + .matches = {
681     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
682     + DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
683     + },
684     + },
685     + {
686     + .callback = dmi_disable_osi_win8,
687     + .ident = "ThinkPad T430",
688     + .matches = {
689     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
690     + DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
691     + },
692     + },
693    
694     /*
695     * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
696     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
697     index 15986f32009e..3cc0b92e3544 100644
698     --- a/drivers/acpi/ec.c
699     +++ b/drivers/acpi/ec.c
700     @@ -70,6 +70,8 @@ enum ec_command {
701     #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
702     #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
703     #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
704     +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
705     + * when trying to clear the EC */
706    
707     enum {
708     EC_FLAGS_QUERY_PENDING, /* Query is pending */
709     @@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
710     static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
711     static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
712     static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
713     +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
714    
715     /* --------------------------------------------------------------------------
716     Transaction Management
717     @@ -468,6 +471,29 @@ acpi_handle ec_get_handle(void)
718    
719     EXPORT_SYMBOL(ec_get_handle);
720    
721     +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
722     +
723     +/*
724     + * Clears stale _Q events that might have accumulated in the EC.
725     + * Run with locked ec mutex.
726     + */
727     +static void acpi_ec_clear(struct acpi_ec *ec)
728     +{
729     + int i, status;
730     + u8 value = 0;
731     +
732     + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
733     + status = acpi_ec_query_unlocked(ec, &value);
734     + if (status || !value)
735     + break;
736     + }
737     +
738     + if (unlikely(i == ACPI_EC_CLEAR_MAX))
739     + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
740     + else
741     + pr_info("%d stale EC events cleared\n", i);
742     +}
743     +
744     void acpi_ec_block_transactions(void)
745     {
746     struct acpi_ec *ec = first_ec;
747     @@ -491,6 +517,10 @@ void acpi_ec_unblock_transactions(void)
748     mutex_lock(&ec->mutex);
749     /* Allow transactions to be carried out again */
750     clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
751     +
752     + if (EC_FLAGS_CLEAR_ON_RESUME)
753     + acpi_ec_clear(ec);
754     +
755     mutex_unlock(&ec->mutex);
756     }
757    
758     @@ -848,6 +878,13 @@ static int acpi_ec_add(struct acpi_device *device)
759    
760     /* EC is fully operational, allow queries */
761     clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
762     +
763     + /* Clear stale _Q events if hardware might require that */
764     + if (EC_FLAGS_CLEAR_ON_RESUME) {
765     + mutex_lock(&ec->mutex);
766     + acpi_ec_clear(ec);
767     + mutex_unlock(&ec->mutex);
768     + }
769     return ret;
770     }
771    
772     @@ -949,6 +986,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
773     return 0;
774     }
775    
776     +/*
777     + * On some hardware it is necessary to clear events accumulated by the EC during
778     + * sleep. These ECs stop reporting GPEs until they are manually polled, if too
779     + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
780     + *
781     + * https://bugzilla.kernel.org/show_bug.cgi?id=44161
782     + *
783     + * Ideally, the EC should also be instructed NOT to accumulate events during
784     + * sleep (which Windows seems to do somehow), but the interface to control this
785     + * behaviour is not known at this time.
786     + *
787     + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
788     + * however it is very likely that other Samsung models are affected.
789     + *
790     + * On systems which don't accumulate _Q events during sleep, this extra check
791     + * should be harmless.
792     + */
793     +static int ec_clear_on_resume(const struct dmi_system_id *id)
794     +{
795     + pr_debug("Detected system needing EC poll on resume.\n");
796     + EC_FLAGS_CLEAR_ON_RESUME = 1;
797     + return 0;
798     +}
799     +
800     static struct dmi_system_id ec_dmi_table[] __initdata = {
801     {
802     ec_skip_dsdt_scan, "Compal JFL92", {
803     @@ -992,6 +1053,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
804     ec_validate_ecdt, "ASUS hardware", {
805     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
806     DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
807     + {
808     + ec_clear_on_resume, "Samsung hardware", {
809     + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
810     {},
811     };
812    
813     diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
814     index b7201fc6f1e1..0bdacc5e26a3 100644
815     --- a/drivers/acpi/resource.c
816     +++ b/drivers/acpi/resource.c
817     @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
818     switch (ares->type) {
819     case ACPI_RESOURCE_TYPE_MEMORY24:
820     memory24 = &ares->data.memory24;
821     + if (!memory24->address_length)
822     + return false;
823     acpi_dev_get_memresource(res, memory24->minimum,
824     memory24->address_length,
825     memory24->write_protect);
826     break;
827     case ACPI_RESOURCE_TYPE_MEMORY32:
828     memory32 = &ares->data.memory32;
829     + if (!memory32->address_length)
830     + return false;
831     acpi_dev_get_memresource(res, memory32->minimum,
832     memory32->address_length,
833     memory32->write_protect);
834     break;
835     case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
836     fixed_memory32 = &ares->data.fixed_memory32;
837     + if (!fixed_memory32->address_length)
838     + return false;
839     acpi_dev_get_memresource(res, fixed_memory32->address,
840     fixed_memory32->address_length,
841     fixed_memory32->write_protect);
842     @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
843     switch (ares->type) {
844     case ACPI_RESOURCE_TYPE_IO:
845     io = &ares->data.io;
846     + if (!io->address_length)
847     + return false;
848     acpi_dev_get_ioresource(res, io->minimum,
849     io->address_length,
850     io->io_decode);
851     break;
852     case ACPI_RESOURCE_TYPE_FIXED_IO:
853     fixed_io = &ares->data.fixed_io;
854     + if (!fixed_io->address_length)
855     + return false;
856     acpi_dev_get_ioresource(res, fixed_io->address,
857     fixed_io->address_length,
858     ACPI_DECODE_10);
859     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
860     index 14df30580e15..99e5158456d8 100644
861     --- a/drivers/acpi/sleep.c
862     +++ b/drivers/acpi/sleep.c
863     @@ -75,6 +75,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
864     return 0;
865     }
866    
867     +static bool acpi_sleep_state_supported(u8 sleep_state)
868     +{
869     + acpi_status status;
870     + u8 type_a, type_b;
871     +
872     + status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
873     + return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
874     + || (acpi_gbl_FADT.sleep_control.address
875     + && acpi_gbl_FADT.sleep_status.address));
876     +}
877     +
878     #ifdef CONFIG_ACPI_SLEEP
879     static u32 acpi_target_sleep_state = ACPI_STATE_S0;
880    
881     @@ -608,15 +619,9 @@ static void acpi_sleep_suspend_setup(void)
882     {
883     int i;
884    
885     - for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
886     - acpi_status status;
887     - u8 type_a, type_b;
888     -
889     - status = acpi_get_sleep_type_data(i, &type_a, &type_b);
890     - if (ACPI_SUCCESS(status)) {
891     + for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
892     + if (acpi_sleep_state_supported(i))
893     sleep_states[i] = 1;
894     - }
895     - }
896    
897     suspend_set_ops(old_suspend_ordering ?
898     &acpi_suspend_ops_old : &acpi_suspend_ops);
899     @@ -747,11 +752,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
900    
901     static void acpi_sleep_hibernate_setup(void)
902     {
903     - acpi_status status;
904     - u8 type_a, type_b;
905     -
906     - status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
907     - if (ACPI_FAILURE(status))
908     + if (!acpi_sleep_state_supported(ACPI_STATE_S4))
909     return;
910    
911     hibernation_set_ops(old_suspend_ordering ?
912     @@ -800,8 +801,6 @@ static void acpi_power_off(void)
913    
914     int __init acpi_sleep_init(void)
915     {
916     - acpi_status status;
917     - u8 type_a, type_b;
918     char supported[ACPI_S_STATE_COUNT * 3 + 1];
919     char *pos = supported;
920     int i;
921     @@ -816,8 +815,7 @@ int __init acpi_sleep_init(void)
922     acpi_sleep_suspend_setup();
923     acpi_sleep_hibernate_setup();
924    
925     - status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
926     - if (ACPI_SUCCESS(status)) {
927     + if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
928     sleep_states[ACPI_STATE_S5] = 1;
929     pm_power_off_prepare = acpi_power_off_prepare;
930     pm_power_off = acpi_power_off;
931     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
932     index f3c361b5c5e5..c5d056e974f1 100644
933     --- a/drivers/ata/libata-core.c
934     +++ b/drivers/ata/libata-core.c
935     @@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
936    
937     /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
938     { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
939     + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
940    
941     /* Blacklist entries taken from Silicon Image 3124/3132
942     Windows driver .inf file - also several Linux problem reports */
943     @@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
944    
945     /* devices that don't properly handle queued TRIM commands */
946     { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
947     - { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
948     + { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
949    
950     /*
951     * Some WD SATA-I drives spin up and down erratically when the link
952     diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
953     index de4aa409abe2..2c6d5e118ac1 100644
954     --- a/drivers/firewire/core-device.c
955     +++ b/drivers/firewire/core-device.c
956     @@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
957     old->config_rom_retries = 0;
958     fw_notice(card, "rediscovered device %s\n", dev_name(dev));
959    
960     - PREPARE_DELAYED_WORK(&old->work, fw_device_update);
961     + old->workfn = fw_device_update;
962     fw_schedule_device_work(old, 0);
963    
964     if (current_node == card->root_node)
965     @@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
966     if (atomic_cmpxchg(&device->state,
967     FW_DEVICE_INITIALIZING,
968     FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
969     - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
970     + device->workfn = fw_device_shutdown;
971     fw_schedule_device_work(device, SHUTDOWN_DELAY);
972     } else {
973     fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
974     @@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
975     dev_name(&device->device), fw_rcode_string(ret));
976     gone:
977     atomic_set(&device->state, FW_DEVICE_GONE);
978     - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
979     + device->workfn = fw_device_shutdown;
980     fw_schedule_device_work(device, SHUTDOWN_DELAY);
981     out:
982     if (node_id == card->root_node->node_id)
983     fw_schedule_bm_work(card, 0);
984     }
985    
986     +static void fw_device_workfn(struct work_struct *work)
987     +{
988     + struct fw_device *device = container_of(to_delayed_work(work),
989     + struct fw_device, work);
990     + device->workfn(work);
991     +}
992     +
993     void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
994     {
995     struct fw_device *device;
996     @@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
997     * power-up after getting plugged in. We schedule the
998     * first config rom scan half a second after bus reset.
999     */
1000     - INIT_DELAYED_WORK(&device->work, fw_device_init);
1001     + device->workfn = fw_device_init;
1002     + INIT_DELAYED_WORK(&device->work, fw_device_workfn);
1003     fw_schedule_device_work(device, INITIAL_DELAY);
1004     break;
1005    
1006     @@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1007     if (atomic_cmpxchg(&device->state,
1008     FW_DEVICE_RUNNING,
1009     FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1010     - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
1011     + device->workfn = fw_device_refresh;
1012     fw_schedule_device_work(device,
1013     device->is_local ? 0 : INITIAL_DELAY);
1014     }
1015     @@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1016     smp_wmb(); /* update node_id before generation */
1017     device->generation = card->generation;
1018     if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1019     - PREPARE_DELAYED_WORK(&device->work, fw_device_update);
1020     + device->workfn = fw_device_update;
1021     fw_schedule_device_work(device, 0);
1022     }
1023     break;
1024     @@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1025     device = node->data;
1026     if (atomic_xchg(&device->state,
1027     FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1028     - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1029     + device->workfn = fw_device_shutdown;
1030     fw_schedule_device_work(device,
1031     list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1032     }
1033     diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
1034     index 6b895986dc22..4af0a7bad7f2 100644
1035     --- a/drivers/firewire/net.c
1036     +++ b/drivers/firewire/net.c
1037     @@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
1038     if (rcode == RCODE_COMPLETE) {
1039     fwnet_transmit_packet_done(ptask);
1040     } else {
1041     - fwnet_transmit_packet_failed(ptask);
1042     -
1043     if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
1044     dev_err(&ptask->dev->netdev->dev,
1045     "fwnet_write_complete failed: %x (skipped %d)\n",
1046     @@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
1047    
1048     errors_skipped = 0;
1049     last_rcode = rcode;
1050     - } else
1051     + } else {
1052     errors_skipped++;
1053     + }
1054     + fwnet_transmit_packet_failed(ptask);
1055     }
1056     }
1057    
1058     diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
1059     index 6aa8a86cb83b..ee805a57b72d 100644
1060     --- a/drivers/firewire/ohci.c
1061     +++ b/drivers/firewire/ohci.c
1062     @@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
1063     #define QUIRK_NO_MSI 0x10
1064     #define QUIRK_TI_SLLZ059 0x20
1065     #define QUIRK_IR_WAKE 0x40
1066     -#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
1067    
1068     /* In case of multiple matches in ohci_quirks[], only the first one is used. */
1069     static const struct {
1070     @@ -303,10 +302,7 @@ static const struct {
1071     QUIRK_BE_HEADERS},
1072    
1073     {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
1074     - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
1075     -
1076     - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
1077     - QUIRK_PHY_LCTRL_TIMEOUT},
1078     + QUIRK_NO_MSI},
1079    
1080     {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
1081     QUIRK_RESET_PACKET},
1082     @@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
1083     ", disable MSI = " __stringify(QUIRK_NO_MSI)
1084     ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
1085     ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
1086     - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
1087     ")");
1088    
1089     #define OHCI_PARAM_DEBUG_AT_AR 1
1090     @@ -2295,9 +2290,6 @@ static int ohci_enable(struct fw_card *card,
1091     * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
1092     * cannot actually use the phy at that time. These need tens of
1093     * millisecods pause between LPS write and first phy access too.
1094     - *
1095     - * But do not wait for 50msec on Agere/LSI cards. Their phy
1096     - * arbitration state machine may time out during such a long wait.
1097     */
1098    
1099     reg_write(ohci, OHCI1394_HCControlSet,
1100     @@ -2305,11 +2297,8 @@ static int ohci_enable(struct fw_card *card,
1101     OHCI1394_HCControl_postedWriteEnable);
1102     flush_writes(ohci);
1103    
1104     - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
1105     + for (lps = 0, i = 0; !lps && i < 3; i++) {
1106     msleep(50);
1107     -
1108     - for (lps = 0, i = 0; !lps && i < 150; i++) {
1109     - msleep(1);
1110     lps = reg_read(ohci, OHCI1394_HCControlSet) &
1111     OHCI1394_HCControl_LPS;
1112     }
1113     diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
1114     index 281029daf98c..7aef911fdc71 100644
1115     --- a/drivers/firewire/sbp2.c
1116     +++ b/drivers/firewire/sbp2.c
1117     @@ -146,6 +146,7 @@ struct sbp2_logical_unit {
1118     */
1119     int generation;
1120     int retries;
1121     + work_func_t workfn;
1122     struct delayed_work work;
1123     bool has_sdev;
1124     bool blocked;
1125     @@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
1126     /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
1127     sbp2_set_busy_timeout(lu);
1128    
1129     - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
1130     + lu->workfn = sbp2_reconnect;
1131     sbp2_agent_reset(lu);
1132    
1133     /* This was a re-login. */
1134     @@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
1135     * If a bus reset happened, sbp2_update will have requeued
1136     * lu->work already. Reset the work from reconnect to login.
1137     */
1138     - PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1139     + lu->workfn = sbp2_login;
1140     }
1141    
1142     static void sbp2_reconnect(struct work_struct *work)
1143     @@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
1144     lu->retries++ >= 5) {
1145     dev_err(tgt_dev(tgt), "failed to reconnect\n");
1146     lu->retries = 0;
1147     - PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
1148     + lu->workfn = sbp2_login;
1149     }
1150     sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
1151    
1152     @@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
1153     sbp2_conditionally_unblock(lu);
1154     }
1155    
1156     +static void sbp2_lu_workfn(struct work_struct *work)
1157     +{
1158     + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
1159     + struct sbp2_logical_unit, work);
1160     + lu->workfn(work);
1161     +}
1162     +
1163     static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1164     {
1165     struct sbp2_logical_unit *lu;
1166     @@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1167     lu->blocked = false;
1168     ++tgt->dont_block;
1169     INIT_LIST_HEAD(&lu->orb_list);
1170     - INIT_DELAYED_WORK(&lu->work, sbp2_login);
1171     + lu->workfn = sbp2_login;
1172     + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
1173    
1174     list_add_tail(&lu->link, &tgt->lu_list);
1175     return 0;
1176     diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1177     index 2ad27880cd04..2bef0e4cfda8 100644
1178     --- a/drivers/gpu/drm/i915/i915_drv.c
1179     +++ b/drivers/gpu/drm/i915/i915_drv.c
1180     @@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
1181     void intel_detect_pch(struct drm_device *dev)
1182     {
1183     struct drm_i915_private *dev_priv = dev->dev_private;
1184     - struct pci_dev *pch;
1185     + struct pci_dev *pch = NULL;
1186    
1187     /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
1188     * (which really amounts to a PCH but no South Display).
1189     @@ -397,12 +397,9 @@ void intel_detect_pch(struct drm_device *dev)
1190     * all the ISA bridge devices and check for the first match, instead
1191     * of only checking the first one.
1192     */
1193     - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1194     - while (pch) {
1195     - struct pci_dev *curr = pch;
1196     + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
1197     if (pch->vendor == PCI_VENDOR_ID_INTEL) {
1198     - unsigned short id;
1199     - id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
1200     + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
1201     dev_priv->pch_id = id;
1202    
1203     if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
1204     @@ -428,18 +425,16 @@ void intel_detect_pch(struct drm_device *dev)
1205     DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
1206     WARN_ON(!IS_HASWELL(dev));
1207     WARN_ON(!IS_ULT(dev));
1208     - } else {
1209     - goto check_next;
1210     - }
1211     - pci_dev_put(pch);
1212     + } else
1213     + continue;
1214     +
1215     break;
1216     }
1217     -check_next:
1218     - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
1219     - pci_dev_put(curr);
1220     }
1221     if (!pch)
1222     - DRM_DEBUG_KMS("No PCH found?\n");
1223     + DRM_DEBUG_KMS("No PCH found.\n");
1224     +
1225     + pci_dev_put(pch);
1226     }
1227    
1228     bool i915_semaphore_is_enabled(struct drm_device *dev)
1229     diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1230     index 4148cc85bf7f..4d302f3dec89 100644
1231     --- a/drivers/gpu/drm/i915/intel_hdmi.c
1232     +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1233     @@ -834,7 +834,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
1234     {
1235     struct drm_device *dev = intel_hdmi_to_dev(hdmi);
1236    
1237     - if (IS_G4X(dev))
1238     + if (!hdmi->has_hdmi_sink || IS_G4X(dev))
1239     return 165000;
1240     else if (IS_HASWELL(dev))
1241     return 300000;
1242     @@ -887,8 +887,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1243     * outputs. We also need to check that the higher clock still fits
1244     * within limits.
1245     */
1246     - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
1247     - && HAS_PCH_SPLIT(dev)) {
1248     + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
1249     + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
1250     DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
1251     desired_bpp = 12*3;
1252    
1253     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1254     index 5e891b226acf..7bb7074a131f 100644
1255     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1256     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1257     @@ -1313,7 +1313,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1258     }
1259     if (is_dp)
1260     args.v5.ucLaneNum = dp_lane_count;
1261     - else if (radeon_encoder->pixel_clock > 165000)
1262     + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
1263     args.v5.ucLaneNum = 8;
1264     else
1265     args.v5.ucLaneNum = 4;
1266     diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1267     index 31f5f0e88328..25370ac56b4b 100644
1268     --- a/drivers/gpu/drm/radeon/cik.c
1269     +++ b/drivers/gpu/drm/radeon/cik.c
1270     @@ -3517,8 +3517,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
1271     {
1272     if (enable)
1273     WREG32(CP_MEC_CNTL, 0);
1274     - else
1275     + else {
1276     WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
1277     + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1278     + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1279     + }
1280     udelay(50);
1281     }
1282    
1283     @@ -6995,26 +6998,7 @@ static int cik_startup(struct radeon_device *rdev)
1284    
1285     cik_mc_program(rdev);
1286    
1287     - if (rdev->flags & RADEON_IS_IGP) {
1288     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1289     - !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
1290     - r = cik_init_microcode(rdev);
1291     - if (r) {
1292     - DRM_ERROR("Failed to load firmware!\n");
1293     - return r;
1294     - }
1295     - }
1296     - } else {
1297     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1298     - !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
1299     - !rdev->mc_fw) {
1300     - r = cik_init_microcode(rdev);
1301     - if (r) {
1302     - DRM_ERROR("Failed to load firmware!\n");
1303     - return r;
1304     - }
1305     - }
1306     -
1307     + if (!(rdev->flags & RADEON_IS_IGP)) {
1308     r = ci_mc_load_microcode(rdev);
1309     if (r) {
1310     DRM_ERROR("Failed to load MC firmware!\n");
1311     @@ -7327,6 +7311,27 @@ int cik_init(struct radeon_device *rdev)
1312     if (r)
1313     return r;
1314    
1315     + if (rdev->flags & RADEON_IS_IGP) {
1316     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1317     + !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
1318     + r = cik_init_microcode(rdev);
1319     + if (r) {
1320     + DRM_ERROR("Failed to load firmware!\n");
1321     + return r;
1322     + }
1323     + }
1324     + } else {
1325     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1326     + !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
1327     + !rdev->mc_fw) {
1328     + r = cik_init_microcode(rdev);
1329     + if (r) {
1330     + DRM_ERROR("Failed to load firmware!\n");
1331     + return r;
1332     + }
1333     + }
1334     + }
1335     +
1336     ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1337     ring->ring_obj = NULL;
1338     r600_ring_init(rdev, ring, 1024 * 1024);
1339     diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
1340     index aaf7ffce8b5b..d565f4076a23 100644
1341     --- a/drivers/gpu/drm/radeon/cik_sdma.c
1342     +++ b/drivers/gpu/drm/radeon/cik_sdma.c
1343     @@ -174,6 +174,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
1344     WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
1345     WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
1346     }
1347     + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1348     + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1349     }
1350    
1351     /**
1352     @@ -201,6 +203,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
1353     u32 me_cntl, reg_offset;
1354     int i;
1355    
1356     + if (enable == false) {
1357     + cik_sdma_gfx_stop(rdev);
1358     + cik_sdma_rlc_stop(rdev);
1359     + }
1360     +
1361     for (i = 0; i < 2; i++) {
1362     if (i == 0)
1363     reg_offset = SDMA0_REGISTER_OFFSET;
1364     @@ -328,10 +335,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
1365     if (!rdev->sdma_fw)
1366     return -EINVAL;
1367    
1368     - /* stop the gfx rings and rlc compute queues */
1369     - cik_sdma_gfx_stop(rdev);
1370     - cik_sdma_rlc_stop(rdev);
1371     -
1372     /* halt the MEs */
1373     cik_sdma_enable(rdev, false);
1374    
1375     @@ -400,9 +403,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
1376     */
1377     void cik_sdma_fini(struct radeon_device *rdev)
1378     {
1379     - /* stop the gfx rings and rlc compute queues */
1380     - cik_sdma_gfx_stop(rdev);
1381     - cik_sdma_rlc_stop(rdev);
1382     /* halt the MEs */
1383     cik_sdma_enable(rdev, false);
1384     radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1385     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1386     index 5f07d1bfbd76..c429bb9b17b6 100644
1387     --- a/drivers/gpu/drm/radeon/evergreen.c
1388     +++ b/drivers/gpu/drm/radeon/evergreen.c
1389     @@ -5061,26 +5061,11 @@ static int evergreen_startup(struct radeon_device *rdev)
1390     evergreen_mc_program(rdev);
1391    
1392     if (ASIC_IS_DCE5(rdev)) {
1393     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1394     - r = ni_init_microcode(rdev);
1395     - if (r) {
1396     - DRM_ERROR("Failed to load firmware!\n");
1397     - return r;
1398     - }
1399     - }
1400     r = ni_mc_load_microcode(rdev);
1401     if (r) {
1402     DRM_ERROR("Failed to load MC firmware!\n");
1403     return r;
1404     }
1405     - } else {
1406     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1407     - r = r600_init_microcode(rdev);
1408     - if (r) {
1409     - DRM_ERROR("Failed to load firmware!\n");
1410     - return r;
1411     - }
1412     - }
1413     }
1414    
1415     if (rdev->flags & RADEON_IS_AGP) {
1416     @@ -5308,6 +5293,24 @@ int evergreen_init(struct radeon_device *rdev)
1417     if (r)
1418     return r;
1419    
1420     + if (ASIC_IS_DCE5(rdev)) {
1421     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1422     + r = ni_init_microcode(rdev);
1423     + if (r) {
1424     + DRM_ERROR("Failed to load firmware!\n");
1425     + return r;
1426     + }
1427     + }
1428     + } else {
1429     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1430     + r = r600_init_microcode(rdev);
1431     + if (r) {
1432     + DRM_ERROR("Failed to load firmware!\n");
1433     + return r;
1434     + }
1435     + }
1436     + }
1437     +
1438     rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1439     r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1440    
1441     diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
1442     index 76ada8cfe902..3a03ba37d043 100644
1443     --- a/drivers/gpu/drm/radeon/evergreen_smc.h
1444     +++ b/drivers/gpu/drm/radeon/evergreen_smc.h
1445     @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
1446    
1447     #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
1448    
1449     -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
1450     +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
1451     #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
1452     #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
1453    
1454     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1455     index b2dbd48f7f28..474343adf262 100644
1456     --- a/drivers/gpu/drm/radeon/ni.c
1457     +++ b/drivers/gpu/drm/radeon/ni.c
1458     @@ -1881,23 +1881,7 @@ static int cayman_startup(struct radeon_device *rdev)
1459    
1460     evergreen_mc_program(rdev);
1461    
1462     - if (rdev->flags & RADEON_IS_IGP) {
1463     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1464     - r = ni_init_microcode(rdev);
1465     - if (r) {
1466     - DRM_ERROR("Failed to load firmware!\n");
1467     - return r;
1468     - }
1469     - }
1470     - } else {
1471     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1472     - r = ni_init_microcode(rdev);
1473     - if (r) {
1474     - DRM_ERROR("Failed to load firmware!\n");
1475     - return r;
1476     - }
1477     - }
1478     -
1479     + if (!(rdev->flags & RADEON_IS_IGP)) {
1480     r = ni_mc_load_microcode(rdev);
1481     if (r) {
1482     DRM_ERROR("Failed to load MC firmware!\n");
1483     @@ -2148,6 +2132,24 @@ int cayman_init(struct radeon_device *rdev)
1484     if (r)
1485     return r;
1486    
1487     + if (rdev->flags & RADEON_IS_IGP) {
1488     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1489     + r = ni_init_microcode(rdev);
1490     + if (r) {
1491     + DRM_ERROR("Failed to load firmware!\n");
1492     + return r;
1493     + }
1494     + }
1495     + } else {
1496     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1497     + r = ni_init_microcode(rdev);
1498     + if (r) {
1499     + DRM_ERROR("Failed to load firmware!\n");
1500     + return r;
1501     + }
1502     + }
1503     + }
1504     +
1505     ring->ring_obj = NULL;
1506     r600_ring_init(rdev, ring, 1024 * 1024);
1507    
1508     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1509     index 67da7e285cde..5af2729f2055 100644
1510     --- a/drivers/gpu/drm/radeon/r600.c
1511     +++ b/drivers/gpu/drm/radeon/r600.c
1512     @@ -2726,14 +2726,6 @@ static int r600_startup(struct radeon_device *rdev)
1513    
1514     r600_mc_program(rdev);
1515    
1516     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1517     - r = r600_init_microcode(rdev);
1518     - if (r) {
1519     - DRM_ERROR("Failed to load firmware!\n");
1520     - return r;
1521     - }
1522     - }
1523     -
1524     if (rdev->flags & RADEON_IS_AGP) {
1525     r600_agp_enable(rdev);
1526     } else {
1527     @@ -2921,6 +2913,14 @@ int r600_init(struct radeon_device *rdev)
1528     if (r)
1529     return r;
1530    
1531     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1532     + r = r600_init_microcode(rdev);
1533     + if (r) {
1534     + DRM_ERROR("Failed to load firmware!\n");
1535     + return r;
1536     + }
1537     + }
1538     +
1539     rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1540     r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1541    
1542     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1543     index 71245d6f34a2..84323c943bfc 100644
1544     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
1545     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1546     @@ -712,6 +712,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
1547     DRM_ERROR("Failed initializing VRAM heap.\n");
1548     return r;
1549     }
1550     + /* Change the size here instead of the init above so only lpfn is affected */
1551     + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1552     +
1553     r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
1554     RADEON_GEM_DOMAIN_VRAM,
1555     NULL, &rdev->stollen_vga_memory);
1556     diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1557     index 99dd9d8fcf72..c4960ad71e5e 100644
1558     --- a/drivers/gpu/drm/radeon/rv770.c
1559     +++ b/drivers/gpu/drm/radeon/rv770.c
1560     @@ -1665,14 +1665,6 @@ static int rv770_startup(struct radeon_device *rdev)
1561    
1562     rv770_mc_program(rdev);
1563    
1564     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1565     - r = r600_init_microcode(rdev);
1566     - if (r) {
1567     - DRM_ERROR("Failed to load firmware!\n");
1568     - return r;
1569     - }
1570     - }
1571     -
1572     if (rdev->flags & RADEON_IS_AGP) {
1573     rv770_agp_enable(rdev);
1574     } else {
1575     @@ -1876,6 +1868,14 @@ int rv770_init(struct radeon_device *rdev)
1576     if (r)
1577     return r;
1578    
1579     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1580     + r = r600_init_microcode(rdev);
1581     + if (r) {
1582     + DRM_ERROR("Failed to load firmware!\n");
1583     + return r;
1584     + }
1585     + }
1586     +
1587     rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1588     r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1589    
1590     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1591     index 8277ee01a7b4..873e0a608948 100644
1592     --- a/drivers/gpu/drm/radeon/si.c
1593     +++ b/drivers/gpu/drm/radeon/si.c
1594     @@ -6387,15 +6387,6 @@ static int si_startup(struct radeon_device *rdev)
1595    
1596     si_mc_program(rdev);
1597    
1598     - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1599     - !rdev->rlc_fw || !rdev->mc_fw) {
1600     - r = si_init_microcode(rdev);
1601     - if (r) {
1602     - DRM_ERROR("Failed to load firmware!\n");
1603     - return r;
1604     - }
1605     - }
1606     -
1607     r = si_mc_load_microcode(rdev);
1608     if (r) {
1609     DRM_ERROR("Failed to load MC firmware!\n");
1610     @@ -6663,6 +6654,15 @@ int si_init(struct radeon_device *rdev)
1611     if (r)
1612     return r;
1613    
1614     + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1615     + !rdev->rlc_fw || !rdev->mc_fw) {
1616     + r = si_init_microcode(rdev);
1617     + if (r) {
1618     + DRM_ERROR("Failed to load firmware!\n");
1619     + return r;
1620     + }
1621     + }
1622     +
1623     ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1624     ring->ring_obj = NULL;
1625     r600_ring_init(rdev, ring, 1024 * 1024);
1626     diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1627     index 2332aa1bf93c..83895f2d16c6 100644
1628     --- a/drivers/gpu/drm/radeon/si_dpm.c
1629     +++ b/drivers/gpu/drm/radeon/si_dpm.c
1630     @@ -2396,7 +2396,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
1631     if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
1632     enable_sq_ramping = false;
1633    
1634     - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1635     + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1636     enable_sq_ramping = false;
1637    
1638     for (i = 0; i < state->performance_level_count; i++) {
1639     @@ -5409,7 +5409,7 @@ static void si_populate_mc_reg_addresses(struct radeon_device *rdev,
1640    
1641     for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
1642     if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
1643     - if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
1644     + if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
1645     break;
1646     mc_reg_table->address[i].s0 =
1647     cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
1648     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1649     index 729805322883..acd0fe0c80d2 100644
1650     --- a/drivers/gpu/drm/ttm/ttm_bo.c
1651     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
1652     @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
1653    
1654     moved:
1655     if (bo->evicted) {
1656     - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
1657     - if (ret)
1658     - pr_err("Can not flush read caches\n");
1659     + if (bdev->driver->invalidate_caches) {
1660     + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
1661     + if (ret)
1662     + pr_err("Can not flush read caches\n");
1663     + }
1664     bo->evicted = false;
1665     }
1666    
1667     diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
1668     index ff758eded96f..cd30d98ac510 100644
1669     --- a/drivers/i2c/busses/Kconfig
1670     +++ b/drivers/i2c/busses/Kconfig
1671     @@ -376,7 +376,7 @@ config I2C_CBUS_GPIO
1672    
1673     config I2C_CPM
1674     tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
1675     - depends on (CPM1 || CPM2) && OF_I2C
1676     + depends on CPM1 || CPM2
1677     help
1678     This supports the use of the I2C interface on Freescale
1679     processors with CPM1 or CPM2.
1680     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1681     index ea7051ee1493..ba93ef85652d 100644
1682     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1683     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1684     @@ -496,8 +496,8 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1685     isert_conn->state = ISER_CONN_INIT;
1686     INIT_LIST_HEAD(&isert_conn->conn_accept_node);
1687     init_completion(&isert_conn->conn_login_comp);
1688     - init_waitqueue_head(&isert_conn->conn_wait);
1689     - init_waitqueue_head(&isert_conn->conn_wait_comp_err);
1690     + init_completion(&isert_conn->conn_wait);
1691     + init_completion(&isert_conn->conn_wait_comp_err);
1692     kref_init(&isert_conn->conn_kref);
1693     kref_get(&isert_conn->conn_kref);
1694     mutex_init(&isert_conn->conn_mutex);
1695     @@ -669,11 +669,11 @@ isert_disconnect_work(struct work_struct *work)
1696    
1697     pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1698     mutex_lock(&isert_conn->conn_mutex);
1699     - isert_conn->state = ISER_CONN_DOWN;
1700     + if (isert_conn->state == ISER_CONN_UP)
1701     + isert_conn->state = ISER_CONN_TERMINATING;
1702    
1703     if (isert_conn->post_recv_buf_count == 0 &&
1704     atomic_read(&isert_conn->post_send_buf_count) == 0) {
1705     - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
1706     mutex_unlock(&isert_conn->conn_mutex);
1707     goto wake_up;
1708     }
1709     @@ -693,7 +693,7 @@ isert_disconnect_work(struct work_struct *work)
1710     mutex_unlock(&isert_conn->conn_mutex);
1711    
1712     wake_up:
1713     - wake_up(&isert_conn->conn_wait);
1714     + complete(&isert_conn->conn_wait);
1715     isert_put_conn(isert_conn);
1716     }
1717    
1718     @@ -1427,7 +1427,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1719     case ISCSI_OP_SCSI_CMD:
1720     spin_lock_bh(&conn->cmd_lock);
1721     if (!list_empty(&cmd->i_conn_node))
1722     - list_del(&cmd->i_conn_node);
1723     + list_del_init(&cmd->i_conn_node);
1724     spin_unlock_bh(&conn->cmd_lock);
1725    
1726     if (cmd->data_direction == DMA_TO_DEVICE)
1727     @@ -1439,7 +1439,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1728     case ISCSI_OP_SCSI_TMFUNC:
1729     spin_lock_bh(&conn->cmd_lock);
1730     if (!list_empty(&cmd->i_conn_node))
1731     - list_del(&cmd->i_conn_node);
1732     + list_del_init(&cmd->i_conn_node);
1733     spin_unlock_bh(&conn->cmd_lock);
1734    
1735     transport_generic_free_cmd(&cmd->se_cmd, 0);
1736     @@ -1449,7 +1449,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1737     case ISCSI_OP_TEXT:
1738     spin_lock_bh(&conn->cmd_lock);
1739     if (!list_empty(&cmd->i_conn_node))
1740     - list_del(&cmd->i_conn_node);
1741     + list_del_init(&cmd->i_conn_node);
1742     spin_unlock_bh(&conn->cmd_lock);
1743    
1744     /*
1745     @@ -1512,6 +1512,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1746     iscsit_stop_dataout_timer(cmd);
1747     device->unreg_rdma_mem(isert_cmd, isert_conn);
1748     cmd->write_data_done = wr->cur_rdma_length;
1749     + wr->send_wr_num = 0;
1750    
1751     pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1752     spin_lock_bh(&cmd->istate_lock);
1753     @@ -1552,7 +1553,7 @@ isert_do_control_comp(struct work_struct *work)
1754     pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1755     /*
1756     * Call atomic_dec(&isert_conn->post_send_buf_count)
1757     - * from isert_free_conn()
1758     + * from isert_wait_conn()
1759     */
1760     isert_conn->logout_posted = true;
1761     iscsit_logout_post_handler(cmd, cmd->conn);
1762     @@ -1576,6 +1577,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1763     struct ib_device *ib_dev)
1764     {
1765     struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1766     + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1767    
1768     if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1769     cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1770     @@ -1587,7 +1589,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1771     queue_work(isert_comp_wq, &isert_cmd->comp_work);
1772     return;
1773     }
1774     - atomic_dec(&isert_conn->post_send_buf_count);
1775     + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1776    
1777     cmd->i_state = ISTATE_SENT_STATUS;
1778     isert_completion_put(tx_desc, isert_cmd, ib_dev);
1779     @@ -1625,7 +1627,7 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1780     case ISER_IB_RDMA_READ:
1781     pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1782    
1783     - atomic_dec(&isert_conn->post_send_buf_count);
1784     + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1785     isert_completion_rdma_read(tx_desc, isert_cmd);
1786     break;
1787     default:
1788     @@ -1636,31 +1638,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1789     }
1790    
1791     static void
1792     -isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1793     +isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1794     {
1795     struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1796     + struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1797     +
1798     + if (!isert_cmd)
1799     + isert_unmap_tx_desc(tx_desc, ib_dev);
1800     + else
1801     + isert_completion_put(tx_desc, isert_cmd, ib_dev);
1802     +}
1803     +
1804     +static void
1805     +isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1806     +{
1807     + struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1808     + struct iscsi_conn *conn = isert_conn->conn;
1809    
1810     - if (tx_desc) {
1811     - struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1812     + if (isert_conn->post_recv_buf_count)
1813     + return;
1814    
1815     - if (!isert_cmd)
1816     - isert_unmap_tx_desc(tx_desc, ib_dev);
1817     - else
1818     - isert_completion_put(tx_desc, isert_cmd, ib_dev);
1819     + if (conn->sess) {
1820     + target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1821     + target_wait_for_sess_cmds(conn->sess->se_sess);
1822     }
1823    
1824     - if (isert_conn->post_recv_buf_count == 0 &&
1825     - atomic_read(&isert_conn->post_send_buf_count) == 0) {
1826     - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1827     - pr_debug("Calling wake_up from isert_cq_comp_err\n");
1828     + while (atomic_read(&isert_conn->post_send_buf_count))
1829     + msleep(3000);
1830    
1831     - mutex_lock(&isert_conn->conn_mutex);
1832     - if (isert_conn->state != ISER_CONN_DOWN)
1833     - isert_conn->state = ISER_CONN_TERMINATING;
1834     - mutex_unlock(&isert_conn->conn_mutex);
1835     + mutex_lock(&isert_conn->conn_mutex);
1836     + isert_conn->state = ISER_CONN_DOWN;
1837     + mutex_unlock(&isert_conn->conn_mutex);
1838    
1839     - wake_up(&isert_conn->conn_wait_comp_err);
1840     - }
1841     + complete(&isert_conn->conn_wait_comp_err);
1842     }
1843    
1844     static void
1845     @@ -1685,8 +1695,11 @@ isert_cq_tx_work(struct work_struct *work)
1846     pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1847     pr_debug("TX wc.status: 0x%08x\n", wc.status);
1848     pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1849     - atomic_dec(&isert_conn->post_send_buf_count);
1850     - isert_cq_comp_err(tx_desc, isert_conn);
1851     +
1852     + if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1853     + atomic_dec(&isert_conn->post_send_buf_count);
1854     + isert_cq_tx_comp_err(tx_desc, isert_conn);
1855     + }
1856     }
1857     }
1858    
1859     @@ -1729,7 +1742,7 @@ isert_cq_rx_work(struct work_struct *work)
1860     wc.vendor_err);
1861     }
1862     isert_conn->post_recv_buf_count--;
1863     - isert_cq_comp_err(NULL, isert_conn);
1864     + isert_cq_rx_comp_err(isert_conn);
1865     }
1866     }
1867    
1868     @@ -2151,6 +2164,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
1869    
1870     if (!fr_desc->valid) {
1871     memset(&inv_wr, 0, sizeof(inv_wr));
1872     + inv_wr.wr_id = ISER_FASTREG_LI_WRID;
1873     inv_wr.opcode = IB_WR_LOCAL_INV;
1874     inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
1875     wr = &inv_wr;
1876     @@ -2161,6 +2175,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
1877    
1878     /* Prepare FASTREG WR */
1879     memset(&fr_wr, 0, sizeof(fr_wr));
1880     + fr_wr.wr_id = ISER_FASTREG_LI_WRID;
1881     fr_wr.opcode = IB_WR_FAST_REG_MR;
1882     fr_wr.wr.fast_reg.iova_start =
1883     fr_desc->data_frpl->page_list[0] + page_off;
1884     @@ -2325,12 +2340,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1885     isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1886     isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1887    
1888     - atomic_inc(&isert_conn->post_send_buf_count);
1889     + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1890    
1891     rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1892     if (rc) {
1893     pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1894     - atomic_dec(&isert_conn->post_send_buf_count);
1895     + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1896     }
1897     pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
1898     isert_cmd);
1899     @@ -2358,12 +2373,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1900     return rc;
1901     }
1902    
1903     - atomic_inc(&isert_conn->post_send_buf_count);
1904     + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
1905    
1906     rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1907     if (rc) {
1908     pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1909     - atomic_dec(&isert_conn->post_send_buf_count);
1910     + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1911     }
1912     pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
1913     isert_cmd);
1914     @@ -2650,22 +2665,11 @@ isert_free_np(struct iscsi_np *np)
1915     kfree(isert_np);
1916     }
1917    
1918     -static int isert_check_state(struct isert_conn *isert_conn, int state)
1919     -{
1920     - int ret;
1921     -
1922     - mutex_lock(&isert_conn->conn_mutex);
1923     - ret = (isert_conn->state == state);
1924     - mutex_unlock(&isert_conn->conn_mutex);
1925     -
1926     - return ret;
1927     -}
1928     -
1929     -static void isert_free_conn(struct iscsi_conn *conn)
1930     +static void isert_wait_conn(struct iscsi_conn *conn)
1931     {
1932     struct isert_conn *isert_conn = conn->context;
1933    
1934     - pr_debug("isert_free_conn: Starting \n");
1935     + pr_debug("isert_wait_conn: Starting \n");
1936     /*
1937     * Decrement post_send_buf_count for special case when called
1938     * from isert_do_control_comp() -> iscsit_logout_post_handler()
1939     @@ -2675,38 +2679,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
1940     atomic_dec(&isert_conn->post_send_buf_count);
1941    
1942     if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
1943     - pr_debug("Calling rdma_disconnect from isert_free_conn\n");
1944     + pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
1945     rdma_disconnect(isert_conn->conn_cm_id);
1946     }
1947     /*
1948     * Only wait for conn_wait_comp_err if the isert_conn made it
1949     * into full feature phase..
1950     */
1951     - if (isert_conn->state == ISER_CONN_UP) {
1952     - pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
1953     - isert_conn->state);
1954     - mutex_unlock(&isert_conn->conn_mutex);
1955     -
1956     - wait_event(isert_conn->conn_wait_comp_err,
1957     - (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
1958     -
1959     - wait_event(isert_conn->conn_wait,
1960     - (isert_check_state(isert_conn, ISER_CONN_DOWN)));
1961     -
1962     - isert_put_conn(isert_conn);
1963     - return;
1964     - }
1965     if (isert_conn->state == ISER_CONN_INIT) {
1966     mutex_unlock(&isert_conn->conn_mutex);
1967     - isert_put_conn(isert_conn);
1968     return;
1969     }
1970     - pr_debug("isert_free_conn: wait_event conn_wait %d\n",
1971     - isert_conn->state);
1972     + if (isert_conn->state == ISER_CONN_UP)
1973     + isert_conn->state = ISER_CONN_TERMINATING;
1974     mutex_unlock(&isert_conn->conn_mutex);
1975    
1976     - wait_event(isert_conn->conn_wait,
1977     - (isert_check_state(isert_conn, ISER_CONN_DOWN)));
1978     + wait_for_completion(&isert_conn->conn_wait_comp_err);
1979     +
1980     + wait_for_completion(&isert_conn->conn_wait);
1981     +}
1982     +
1983     +static void isert_free_conn(struct iscsi_conn *conn)
1984     +{
1985     + struct isert_conn *isert_conn = conn->context;
1986    
1987     isert_put_conn(isert_conn);
1988     }
1989     @@ -2719,6 +2714,7 @@ static struct iscsit_transport iser_target_transport = {
1990     .iscsit_setup_np = isert_setup_np,
1991     .iscsit_accept_np = isert_accept_np,
1992     .iscsit_free_np = isert_free_np,
1993     + .iscsit_wait_conn = isert_wait_conn,
1994     .iscsit_free_conn = isert_free_conn,
1995     .iscsit_get_login_rx = isert_get_login_rx,
1996     .iscsit_put_login_tx = isert_put_login_tx,
1997     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1998     index 631f2090f0b8..52f4bf0d1a0f 100644
1999     --- a/drivers/infiniband/ulp/isert/ib_isert.h
2000     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
2001     @@ -6,6 +6,7 @@
2002    
2003     #define ISERT_RDMA_LISTEN_BACKLOG 10
2004     #define ISCSI_ISER_SG_TABLESIZE 256
2005     +#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
2006    
2007     enum isert_desc_type {
2008     ISCSI_TX_CONTROL,
2009     @@ -114,8 +115,8 @@ struct isert_conn {
2010     struct isert_device *conn_device;
2011     struct work_struct conn_logout_work;
2012     struct mutex conn_mutex;
2013     - wait_queue_head_t conn_wait;
2014     - wait_queue_head_t conn_wait_comp_err;
2015     + struct completion conn_wait;
2016     + struct completion conn_wait_comp_err;
2017     struct kref conn_kref;
2018     struct list_head conn_frwr_pool;
2019     int conn_frwr_pool_size;
2020     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2021     index ea3e4b4f7e58..6ab68e058a0a 100644
2022     --- a/drivers/md/dm-cache-target.c
2023     +++ b/drivers/md/dm-cache-target.c
2024     @@ -867,12 +867,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
2025     int r;
2026     struct dm_io_region o_region, c_region;
2027     struct cache *cache = mg->cache;
2028     + sector_t cblock = from_cblock(mg->cblock);
2029    
2030     o_region.bdev = cache->origin_dev->bdev;
2031     o_region.count = cache->sectors_per_block;
2032    
2033     c_region.bdev = cache->cache_dev->bdev;
2034     - c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
2035     + c_region.sector = cblock * cache->sectors_per_block;
2036     c_region.count = cache->sectors_per_block;
2037    
2038     if (mg->writeback || mg->demote) {
2039     @@ -2181,20 +2182,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2040     bool discarded_block;
2041     struct dm_bio_prison_cell *cell;
2042     struct policy_result lookup_result;
2043     - struct per_bio_data *pb;
2044     + struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2045    
2046     - if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
2047     + if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2048     /*
2049     * This can only occur if the io goes to a partial block at
2050     * the end of the origin device. We don't cache these.
2051     * Just remap to the origin and carry on.
2052     */
2053     - remap_to_origin_clear_discard(cache, bio, block);
2054     + remap_to_origin(cache, bio);
2055     return DM_MAPIO_REMAPPED;
2056     }
2057    
2058     - pb = init_per_bio_data(bio, pb_data_size);
2059     -
2060     if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2061     defer_bio(cache, bio);
2062     return DM_MAPIO_SUBMITTED;
2063     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
2064     index afb419e514bf..579b58200bf2 100644
2065     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
2066     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
2067     @@ -91,6 +91,69 @@ struct block_op {
2068     dm_block_t block;
2069     };
2070    
2071     +struct bop_ring_buffer {
2072     + unsigned begin;
2073     + unsigned end;
2074     + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
2075     +};
2076     +
2077     +static void brb_init(struct bop_ring_buffer *brb)
2078     +{
2079     + brb->begin = 0;
2080     + brb->end = 0;
2081     +}
2082     +
2083     +static bool brb_empty(struct bop_ring_buffer *brb)
2084     +{
2085     + return brb->begin == brb->end;
2086     +}
2087     +
2088     +static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
2089     +{
2090     + unsigned r = old + 1;
2091     + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
2092     +}
2093     +
2094     +static int brb_push(struct bop_ring_buffer *brb,
2095     + enum block_op_type type, dm_block_t b)
2096     +{
2097     + struct block_op *bop;
2098     + unsigned next = brb_next(brb, brb->end);
2099     +
2100     + /*
2101     + * We don't allow the last bop to be filled, this way we can
2102     + * differentiate between full and empty.
2103     + */
2104     + if (next == brb->begin)
2105     + return -ENOMEM;
2106     +
2107     + bop = brb->bops + brb->end;
2108     + bop->type = type;
2109     + bop->block = b;
2110     +
2111     + brb->end = next;
2112     +
2113     + return 0;
2114     +}
2115     +
2116     +static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
2117     +{
2118     + struct block_op *bop;
2119     +
2120     + if (brb_empty(brb))
2121     + return -ENODATA;
2122     +
2123     + bop = brb->bops + brb->begin;
2124     + result->type = bop->type;
2125     + result->block = bop->block;
2126     +
2127     + brb->begin = brb_next(brb, brb->begin);
2128     +
2129     + return 0;
2130     +}
2131     +
2132     +/*----------------------------------------------------------------*/
2133     +
2134     struct sm_metadata {
2135     struct dm_space_map sm;
2136    
2137     @@ -101,25 +164,20 @@ struct sm_metadata {
2138    
2139     unsigned recursion_count;
2140     unsigned allocated_this_transaction;
2141     - unsigned nr_uncommitted;
2142     - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
2143     + struct bop_ring_buffer uncommitted;
2144    
2145     struct threshold threshold;
2146     };
2147    
2148     static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
2149     {
2150     - struct block_op *op;
2151     + int r = brb_push(&smm->uncommitted, type, b);
2152    
2153     - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
2154     + if (r) {
2155     DMERR("too many recursive allocations");
2156     return -ENOMEM;
2157     }
2158    
2159     - op = smm->uncommitted + smm->nr_uncommitted++;
2160     - op->type = type;
2161     - op->block = b;
2162     -
2163     return 0;
2164     }
2165    
2166     @@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
2167     return -ENOMEM;
2168     }
2169    
2170     - if (smm->recursion_count == 1 && smm->nr_uncommitted) {
2171     - while (smm->nr_uncommitted && !r) {
2172     - smm->nr_uncommitted--;
2173     - r = commit_bop(smm, smm->uncommitted +
2174     - smm->nr_uncommitted);
2175     + if (smm->recursion_count == 1) {
2176     + while (!brb_empty(&smm->uncommitted)) {
2177     + struct block_op bop;
2178     +
2179     + r = brb_pop(&smm->uncommitted, &bop);
2180     + if (r) {
2181     + DMERR("bug in bop ring buffer");
2182     + break;
2183     + }
2184     +
2185     + r = commit_bop(smm, &bop);
2186     if (r)
2187     break;
2188     }
2189     @@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
2190     static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2191     uint32_t *result)
2192     {
2193     - int r, i;
2194     + int r;
2195     + unsigned i;
2196     struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
2197     unsigned adjustment = 0;
2198    
2199     @@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2200     * We may have some uncommitted adjustments to add. This list
2201     * should always be really short.
2202     */
2203     - for (i = 0; i < smm->nr_uncommitted; i++) {
2204     - struct block_op *op = smm->uncommitted + i;
2205     + for (i = smm->uncommitted.begin;
2206     + i != smm->uncommitted.end;
2207     + i = brb_next(&smm->uncommitted, i)) {
2208     + struct block_op *op = smm->uncommitted.bops + i;
2209    
2210     if (op->block != b)
2211     continue;
2212     @@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
2213     static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
2214     dm_block_t b, int *result)
2215     {
2216     - int r, i, adjustment = 0;
2217     + int r, adjustment = 0;
2218     + unsigned i;
2219     struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
2220     uint32_t rc;
2221    
2222     @@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
2223     * We may have some uncommitted adjustments to add. This list
2224     * should always be really short.
2225     */
2226     - for (i = 0; i < smm->nr_uncommitted; i++) {
2227     - struct block_op *op = smm->uncommitted + i;
2228     + for (i = smm->uncommitted.begin;
2229     + i != smm->uncommitted.end;
2230     + i = brb_next(&smm->uncommitted, i)) {
2231     +
2232     + struct block_op *op = smm->uncommitted.bops + i;
2233    
2234     if (op->block != b)
2235     continue;
2236     @@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
2237     smm->begin = superblock + 1;
2238     smm->recursion_count = 0;
2239     smm->allocated_this_transaction = 0;
2240     - smm->nr_uncommitted = 0;
2241     + brb_init(&smm->uncommitted);
2242     threshold_init(&smm->threshold);
2243    
2244     memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
2245     @@ -713,7 +784,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
2246     smm->begin = 0;
2247     smm->recursion_count = 0;
2248     smm->allocated_this_transaction = 0;
2249     - smm->nr_uncommitted = 0;
2250     + brb_init(&smm->uncommitted);
2251     threshold_init(&smm->threshold);
2252    
2253     memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
2254     diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
2255     index c1c3b132fed5..e381142d636f 100644
2256     --- a/drivers/net/can/flexcan.c
2257     +++ b/drivers/net/can/flexcan.c
2258     @@ -144,6 +144,8 @@
2259    
2260     #define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
2261    
2262     +#define FLEXCAN_TIMEOUT_US (50)
2263     +
2264     /*
2265     * FLEXCAN hardware feature flags
2266     *
2267     @@ -259,6 +261,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
2268     }
2269     #endif
2270    
2271     +static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
2272     +{
2273     + if (!priv->reg_xceiver)
2274     + return 0;
2275     +
2276     + return regulator_enable(priv->reg_xceiver);
2277     +}
2278     +
2279     +static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
2280     +{
2281     + if (!priv->reg_xceiver)
2282     + return 0;
2283     +
2284     + return regulator_disable(priv->reg_xceiver);
2285     +}
2286     +
2287     static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
2288     u32 reg_esr)
2289     {
2290     @@ -266,26 +284,42 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
2291     (reg_esr & FLEXCAN_ESR_ERR_BUS);
2292     }
2293    
2294     -static inline void flexcan_chip_enable(struct flexcan_priv *priv)
2295     +static int flexcan_chip_enable(struct flexcan_priv *priv)
2296     {
2297     struct flexcan_regs __iomem *regs = priv->base;
2298     + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
2299     u32 reg;
2300    
2301     reg = flexcan_read(&regs->mcr);
2302     reg &= ~FLEXCAN_MCR_MDIS;
2303     flexcan_write(reg, &regs->mcr);
2304    
2305     - udelay(10);
2306     + while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2307     + usleep_range(10, 20);
2308     +
2309     + if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
2310     + return -ETIMEDOUT;
2311     +
2312     + return 0;
2313     }
2314    
2315     -static inline void flexcan_chip_disable(struct flexcan_priv *priv)
2316     +static int flexcan_chip_disable(struct flexcan_priv *priv)
2317     {
2318     struct flexcan_regs __iomem *regs = priv->base;
2319     + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
2320     u32 reg;
2321    
2322     reg = flexcan_read(&regs->mcr);
2323     reg |= FLEXCAN_MCR_MDIS;
2324     flexcan_write(reg, &regs->mcr);
2325     +
2326     + while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2327     + usleep_range(10, 20);
2328     +
2329     + if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
2330     + return -ETIMEDOUT;
2331     +
2332     + return 0;
2333     }
2334    
2335     static int flexcan_get_berr_counter(const struct net_device *dev,
2336     @@ -706,7 +740,9 @@ static int flexcan_chip_start(struct net_device *dev)
2337     u32 reg_mcr, reg_ctrl;
2338    
2339     /* enable module */
2340     - flexcan_chip_enable(priv);
2341     + err = flexcan_chip_enable(priv);
2342     + if (err)
2343     + return err;
2344    
2345     /* soft reset */
2346     flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
2347     @@ -785,11 +821,9 @@ static int flexcan_chip_start(struct net_device *dev)
2348     if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
2349     flexcan_write(0x0, &regs->rxfgmask);
2350    
2351     - if (priv->reg_xceiver) {
2352     - err = regulator_enable(priv->reg_xceiver);
2353     - if (err)
2354     - goto out;
2355     - }
2356     + err = flexcan_transceiver_enable(priv);
2357     + if (err)
2358     + goto out;
2359    
2360     /* synchronize with the can bus */
2361     reg_mcr = flexcan_read(&regs->mcr);
2362     @@ -824,16 +858,17 @@ static void flexcan_chip_stop(struct net_device *dev)
2363     struct flexcan_regs __iomem *regs = priv->base;
2364     u32 reg;
2365    
2366     - /* Disable all interrupts */
2367     - flexcan_write(0, &regs->imask1);
2368     -
2369     /* Disable + halt module */
2370     reg = flexcan_read(&regs->mcr);
2371     reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
2372     flexcan_write(reg, &regs->mcr);
2373    
2374     - if (priv->reg_xceiver)
2375     - regulator_disable(priv->reg_xceiver);
2376     + /* Disable all interrupts */
2377     + flexcan_write(0, &regs->imask1);
2378     + flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
2379     + &regs->ctrl);
2380     +
2381     + flexcan_transceiver_disable(priv);
2382     priv->can.state = CAN_STATE_STOPPED;
2383    
2384     return;
2385     @@ -863,7 +898,7 @@ static int flexcan_open(struct net_device *dev)
2386     /* start chip and queuing */
2387     err = flexcan_chip_start(dev);
2388     if (err)
2389     - goto out_close;
2390     + goto out_free_irq;
2391    
2392     can_led_event(dev, CAN_LED_EVENT_OPEN);
2393    
2394     @@ -872,6 +907,8 @@ static int flexcan_open(struct net_device *dev)
2395    
2396     return 0;
2397    
2398     + out_free_irq:
2399     + free_irq(dev->irq, dev);
2400     out_close:
2401     close_candev(dev);
2402     out_disable_per:
2403     @@ -942,12 +979,16 @@ static int register_flexcandev(struct net_device *dev)
2404     goto out_disable_ipg;
2405    
2406     /* select "bus clock", chip must be disabled */
2407     - flexcan_chip_disable(priv);
2408     + err = flexcan_chip_disable(priv);
2409     + if (err)
2410     + goto out_disable_per;
2411     reg = flexcan_read(&regs->ctrl);
2412     reg |= FLEXCAN_CTRL_CLK_SRC;
2413     flexcan_write(reg, &regs->ctrl);
2414    
2415     - flexcan_chip_enable(priv);
2416     + err = flexcan_chip_enable(priv);
2417     + if (err)
2418     + goto out_chip_disable;
2419    
2420     /* set freeze, halt and activate FIFO, restrict register access */
2421     reg = flexcan_read(&regs->mcr);
2422     @@ -964,14 +1005,15 @@ static int register_flexcandev(struct net_device *dev)
2423     if (!(reg & FLEXCAN_MCR_FEN)) {
2424     netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
2425     err = -ENODEV;
2426     - goto out_disable_per;
2427     + goto out_chip_disable;
2428     }
2429    
2430     err = register_candev(dev);
2431    
2432     - out_disable_per:
2433     /* disable core and turn off clocks */
2434     + out_chip_disable:
2435     flexcan_chip_disable(priv);
2436     + out_disable_per:
2437     clk_disable_unprepare(priv->clk_per);
2438     out_disable_ipg:
2439     clk_disable_unprepare(priv->clk_ipg);
2440     @@ -1101,9 +1143,10 @@ static int flexcan_probe(struct platform_device *pdev)
2441     static int flexcan_remove(struct platform_device *pdev)
2442     {
2443     struct net_device *dev = platform_get_drvdata(pdev);
2444     + struct flexcan_priv *priv = netdev_priv(dev);
2445    
2446     unregister_flexcandev(dev);
2447     -
2448     + netif_napi_del(&priv->napi);
2449     free_candev(dev);
2450    
2451     return 0;
2452     @@ -1114,8 +1157,11 @@ static int flexcan_suspend(struct device *device)
2453     {
2454     struct net_device *dev = dev_get_drvdata(device);
2455     struct flexcan_priv *priv = netdev_priv(dev);
2456     + int err;
2457    
2458     - flexcan_chip_disable(priv);
2459     + err = flexcan_chip_disable(priv);
2460     + if (err)
2461     + return err;
2462    
2463     if (netif_running(dev)) {
2464     netif_stop_queue(dev);
2465     @@ -1136,9 +1182,7 @@ static int flexcan_resume(struct device *device)
2466     netif_device_attach(dev);
2467     netif_start_queue(dev);
2468     }
2469     - flexcan_chip_enable(priv);
2470     -
2471     - return 0;
2472     + return flexcan_chip_enable(priv);
2473     }
2474     #endif /* CONFIG_PM_SLEEP */
2475    
2476     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2477     index c0acf98d1ea5..14a50a11d72e 100644
2478     --- a/drivers/net/ethernet/broadcom/tg3.c
2479     +++ b/drivers/net/ethernet/broadcom/tg3.c
2480     @@ -6813,8 +6813,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
2481    
2482     work_mask |= opaque_key;
2483    
2484     - if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2485     - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2486     + if (desc->err_vlan & RXD_ERR_MASK) {
2487     drop_it:
2488     tg3_recycle_rx(tnapi, tpr, opaque_key,
2489     desc_idx, *post_ptr);
2490     diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
2491     index 70257808aa37..ac50e7c9c2b8 100644
2492     --- a/drivers/net/ethernet/broadcom/tg3.h
2493     +++ b/drivers/net/ethernet/broadcom/tg3.h
2494     @@ -2598,7 +2598,11 @@ struct tg3_rx_buffer_desc {
2495     #define RXD_ERR_TOO_SMALL 0x00400000
2496     #define RXD_ERR_NO_RESOURCES 0x00800000
2497     #define RXD_ERR_HUGE_FRAME 0x01000000
2498     -#define RXD_ERR_MASK 0xffff0000
2499     +
2500     +#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
2501     + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
2502     + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
2503     + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
2504    
2505     u32 reserved;
2506     u32 opaque;
2507     diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
2508     index 3dd39dcfe36b..a12410381cb1 100644
2509     --- a/drivers/net/ethernet/sfc/ptp.c
2510     +++ b/drivers/net/ethernet/sfc/ptp.c
2511     @@ -1360,6 +1360,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
2512     struct efx_ptp_data *ptp = efx->ptp_data;
2513     int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
2514    
2515     + if (!ptp) {
2516     + if (net_ratelimit())
2517     + netif_warn(efx, drv, efx->net_dev,
2518     + "Received PTP event but PTP not set up\n");
2519     + return;
2520     + }
2521     +
2522     if (!ptp->enabled)
2523     return;
2524    
2525     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2526     index 7c8343a4f918..10636cbd3807 100644
2527     --- a/drivers/net/tun.c
2528     +++ b/drivers/net/tun.c
2529     @@ -1650,7 +1650,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2530     TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2531     NETIF_F_HW_VLAN_STAG_TX;
2532     dev->features = dev->hw_features;
2533     - dev->vlan_features = dev->features;
2534     + dev->vlan_features = dev->features &
2535     + ~(NETIF_F_HW_VLAN_CTAG_TX |
2536     + NETIF_F_HW_VLAN_STAG_TX);
2537    
2538     INIT_LIST_HEAD(&tun->disabled);
2539     err = tun_attach(tun, file, false);
2540     diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2541     index 5e2bac650bd8..3ecb2133dee6 100644
2542     --- a/drivers/net/usb/ax88179_178a.c
2543     +++ b/drivers/net/usb/ax88179_178a.c
2544     @@ -1031,20 +1031,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
2545     dev->mii.phy_id = 0x03;
2546     dev->mii.supports_gmii = 1;
2547    
2548     - if (usb_device_no_sg_constraint(dev->udev))
2549     - dev->can_dma_sg = 1;
2550     -
2551     dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2552     NETIF_F_RXCSUM;
2553    
2554     dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2555     NETIF_F_RXCSUM;
2556    
2557     - if (dev->can_dma_sg) {
2558     - dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
2559     - dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
2560     - }
2561     -
2562     /* Enable checksum offload */
2563     *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
2564     AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
2565     diff --git a/drivers/net/veth.c b/drivers/net/veth.c
2566     index eee1f19ef1e9..61c4044f644e 100644
2567     --- a/drivers/net/veth.c
2568     +++ b/drivers/net/veth.c
2569     @@ -269,7 +269,8 @@ static void veth_setup(struct net_device *dev)
2570     dev->ethtool_ops = &veth_ethtool_ops;
2571     dev->features |= NETIF_F_LLTX;
2572     dev->features |= VETH_FEATURES;
2573     - dev->vlan_features = dev->features;
2574     + dev->vlan_features = dev->features &
2575     + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
2576     dev->destructor = veth_dev_free;
2577    
2578     dev->hw_features = VETH_FEATURES;
2579     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2580     index 8065066a6230..0232156dade3 100644
2581     --- a/drivers/net/virtio_net.c
2582     +++ b/drivers/net/virtio_net.c
2583     @@ -1621,7 +1621,8 @@ static int virtnet_probe(struct virtio_device *vdev)
2584     /* If we can receive ANY GSO packets, we must allocate large ones. */
2585     if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2586     virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2587     - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
2588     + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2589     + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2590     vi->big_packets = true;
2591    
2592     if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2593     diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
2594     index 7e2788c488ed..55d89390b4bc 100644
2595     --- a/drivers/net/vmxnet3/vmxnet3_drv.c
2596     +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
2597     @@ -1760,11 +1760,20 @@ vmxnet3_netpoll(struct net_device *netdev)
2598     {
2599     struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2600    
2601     - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2602     - vmxnet3_disable_all_intrs(adapter);
2603     -
2604     - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
2605     - vmxnet3_enable_all_intrs(adapter);
2606     + switch (adapter->intr.type) {
2607     +#ifdef CONFIG_PCI_MSI
2608     + case VMXNET3_IT_MSIX: {
2609     + int i;
2610     + for (i = 0; i < adapter->num_rx_queues; i++)
2611     + vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2612     + break;
2613     + }
2614     +#endif
2615     + case VMXNET3_IT_MSI:
2616     + default:
2617     + vmxnet3_intr(0, adapter->netdev);
2618     + break;
2619     + }
2620    
2621     }
2622     #endif /* CONFIG_NET_POLL_CONTROLLER */
2623     diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2624     index 092b9d412e7f..1078fbd7bda2 100644
2625     --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2626     +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
2627     @@ -56,7 +56,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
2628     {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
2629     {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2630     {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
2631     - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
2632     + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
2633     {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
2634     {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
2635     {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
2636     @@ -95,7 +95,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
2637     {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
2638     {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
2639     {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
2640     - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
2641     + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
2642     {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
2643     };
2644    
2645     diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
2646     index a1ab4ff46818..c2fa0e3490c7 100644
2647     --- a/drivers/net/wireless/ath/ath9k/recv.c
2648     +++ b/drivers/net/wireless/ath/ath9k/recv.c
2649     @@ -730,11 +730,18 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
2650     return NULL;
2651    
2652     /*
2653     - * mark descriptor as zero-length and set the 'more'
2654     - * flag to ensure that both buffers get discarded
2655     + * Re-check previous descriptor, in case it has been filled
2656     + * in the mean time.
2657     */
2658     - rs->rs_datalen = 0;
2659     - rs->rs_more = true;
2660     + ret = ath9k_hw_rxprocdesc(ah, ds, rs);
2661     + if (ret == -EINPROGRESS) {
2662     + /*
2663     + * mark descriptor as zero-length and set the 'more'
2664     + * flag to ensure that both buffers get discarded
2665     + */
2666     + rs->rs_datalen = 0;
2667     + rs->rs_more = true;
2668     + }
2669     }
2670    
2671     list_del(&bf->list);
2672     @@ -1093,32 +1100,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2673     struct ath_common *common = ath9k_hw_common(ah);
2674     struct ieee80211_hdr *hdr;
2675     bool discard_current = sc->rx.discard_next;
2676     - int ret = 0;
2677    
2678     /*
2679     * Discard corrupt descriptors which are marked in
2680     * ath_get_next_rx_buf().
2681     */
2682     - sc->rx.discard_next = rx_stats->rs_more;
2683     if (discard_current)
2684     - return -EINVAL;
2685     + goto corrupt;
2686     +
2687     + sc->rx.discard_next = false;
2688    
2689     /*
2690     * Discard zero-length packets.
2691     */
2692     if (!rx_stats->rs_datalen) {
2693     RX_STAT_INC(rx_len_err);
2694     - return -EINVAL;
2695     + goto corrupt;
2696     }
2697    
2698     - /*
2699     - * rs_status follows rs_datalen so if rs_datalen is too large
2700     - * we can take a hint that hardware corrupted it, so ignore
2701     - * those frames.
2702     - */
2703     + /*
2704     + * rs_status follows rs_datalen so if rs_datalen is too large
2705     + * we can take a hint that hardware corrupted it, so ignore
2706     + * those frames.
2707     + */
2708     if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
2709     RX_STAT_INC(rx_len_err);
2710     - return -EINVAL;
2711     + goto corrupt;
2712     }
2713    
2714     /* Only use status info from the last fragment */
2715     @@ -1132,10 +1139,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2716     * This is different from the other corrupt descriptor
2717     * condition handled above.
2718     */
2719     - if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
2720     - ret = -EINVAL;
2721     - goto exit;
2722     - }
2723     + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
2724     + goto corrupt;
2725    
2726     hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
2727    
2728     @@ -1151,18 +1156,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2729     if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
2730     RX_STAT_INC(rx_spectral);
2731    
2732     - ret = -EINVAL;
2733     - goto exit;
2734     + return -EINVAL;
2735     }
2736    
2737     /*
2738     * everything but the rate is checked here, the rate check is done
2739     * separately to avoid doing two lookups for a rate for each frame.
2740     */
2741     - if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
2742     - ret = -EINVAL;
2743     - goto exit;
2744     - }
2745     + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
2746     + return -EINVAL;
2747    
2748     rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
2749     if (rx_stats->is_mybeacon) {
2750     @@ -1173,15 +1175,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2751     /*
2752     * This shouldn't happen, but have a safety check anyway.
2753     */
2754     - if (WARN_ON(!ah->curchan)) {
2755     - ret = -EINVAL;
2756     - goto exit;
2757     - }
2758     + if (WARN_ON(!ah->curchan))
2759     + return -EINVAL;
2760    
2761     - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
2762     - ret =-EINVAL;
2763     - goto exit;
2764     - }
2765     + if (ath9k_process_rate(common, hw, rx_stats, rx_status))
2766     + return -EINVAL;
2767    
2768     ath9k_process_rssi(common, hw, rx_stats, rx_status);
2769    
2770     @@ -1196,9 +1194,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
2771     sc->rx.num_pkts++;
2772     #endif
2773    
2774     -exit:
2775     - sc->rx.discard_next = false;
2776     - return ret;
2777     + return 0;
2778     +
2779     +corrupt:
2780     + sc->rx.discard_next = rx_stats->rs_more;
2781     + return -EINVAL;
2782     }
2783    
2784     static void ath9k_rx_skb_postprocess(struct ath_common *common,
2785     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2786     index 7fe6b5923a9c..ba39178a94ab 100644
2787     --- a/drivers/net/wireless/ath/ath9k/xmit.c
2788     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2789     @@ -1457,14 +1457,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
2790     for (tidno = 0, tid = &an->tid[tidno];
2791     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2792    
2793     - if (!tid->sched)
2794     - continue;
2795     -
2796     ac = tid->ac;
2797     txq = ac->txq;
2798    
2799     ath_txq_lock(sc, txq);
2800    
2801     + if (!tid->sched) {
2802     + ath_txq_unlock(sc, txq);
2803     + continue;
2804     + }
2805     +
2806     buffered = ath_tid_has_buffered(tid);
2807    
2808     tid->sched = false;
2809     @@ -2199,14 +2201,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2810     txq->stopped = true;
2811     }
2812    
2813     + if (txctl->an)
2814     + tid = ath_get_skb_tid(sc, txctl->an, skb);
2815     +
2816     if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
2817     ath_txq_unlock(sc, txq);
2818     txq = sc->tx.uapsdq;
2819     ath_txq_lock(sc, txq);
2820     } else if (txctl->an &&
2821     ieee80211_is_data_present(hdr->frame_control)) {
2822     - tid = ath_get_skb_tid(sc, txctl->an, skb);
2823     -
2824     WARN_ON(tid->ac->txq != txctl->txq);
2825    
2826     if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
2827     diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2828     index cae4d3182e33..d6e6405a9b07 100644
2829     --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2830     +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
2831     @@ -704,6 +704,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2832     return ret;
2833     }
2834    
2835     +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
2836     +{
2837     + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
2838     + return false;
2839     + return true;
2840     +}
2841     +
2842     +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
2843     +{
2844     + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
2845     + return false;
2846     + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
2847     + return true;
2848     +
2849     + /* disabled by default */
2850     + return false;
2851     +}
2852     +
2853     static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2854     struct ieee80211_vif *vif,
2855     enum ieee80211_ampdu_mlme_action action,
2856     @@ -725,7 +743,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2857    
2858     switch (action) {
2859     case IEEE80211_AMPDU_RX_START:
2860     - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
2861     + if (!iwl_enable_rx_ampdu(priv->cfg))
2862     break;
2863     IWL_DEBUG_HT(priv, "start Rx\n");
2864     ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2865     @@ -737,7 +755,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2866     case IEEE80211_AMPDU_TX_START:
2867     if (!priv->trans->ops->txq_enable)
2868     break;
2869     - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
2870     + if (!iwl_enable_tx_ampdu(priv->cfg))
2871     break;
2872     IWL_DEBUG_HT(priv, "start Tx\n");
2873     ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
2874     diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
2875     index c3c13ce96eb0..e800002d6158 100644
2876     --- a/drivers/net/wireless/iwlwifi/dvm/sta.c
2877     +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
2878     @@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
2879     sizeof(priv->tid_data[sta_id][tid]));
2880    
2881     priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
2882     + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
2883    
2884     priv->num_stations--;
2885    
2886     diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
2887     index 1fef5240e6ad..e219e761f48b 100644
2888     --- a/drivers/net/wireless/iwlwifi/dvm/tx.c
2889     +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
2890     @@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
2891     struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
2892     struct iwl_ht_agg *agg;
2893     struct sk_buff_head reclaimed_skbs;
2894     - struct ieee80211_tx_info *info;
2895     - struct ieee80211_hdr *hdr;
2896     struct sk_buff *skb;
2897     int sta_id;
2898     int tid;
2899     @@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
2900     freed = 0;
2901    
2902     skb_queue_walk(&reclaimed_skbs, skb) {
2903     - hdr = (struct ieee80211_hdr *)skb->data;
2904     + struct ieee80211_hdr *hdr = (void *)skb->data;
2905     + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2906    
2907     if (ieee80211_is_data_qos(hdr->frame_control))
2908     freed++;
2909     else
2910     WARN_ON_ONCE(1);
2911    
2912     - info = IEEE80211_SKB_CB(skb);
2913     iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
2914    
2915     + memset(&info->status, 0, sizeof(info->status));
2916     + /* Packet was transmitted successfully, failures come as single
2917     + * frames because before failing a frame the firmware transmits
2918     + * it without aggregation at least once.
2919     + */
2920     + info->flags |= IEEE80211_TX_STAT_ACK;
2921     +
2922     if (freed == 1) {
2923     /* this is the first skb we deliver in this batch */
2924     /* put the rate scaling data there */
2925     info = IEEE80211_SKB_CB(skb);
2926     memset(&info->status, 0, sizeof(info->status));
2927     - info->flags |= IEEE80211_TX_STAT_ACK;
2928     info->flags |= IEEE80211_TX_STAT_AMPDU;
2929     info->status.ampdu_ack_len = ba_resp->txed_2_done;
2930     info->status.ampdu_len = ba_resp->txed;
2931     diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
2932     index 99e1da3123c9..2cdbd940575e 100644
2933     --- a/drivers/net/wireless/iwlwifi/iwl-drv.c
2934     +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
2935     @@ -1210,7 +1210,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
2936     MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2937     module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
2938     MODULE_PARM_DESC(11n_disable,
2939     - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
2940     + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
2941     module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
2942     int, S_IRUGO);
2943     MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
2944     diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
2945     index a1f580c0c6c6..4c6cff4218cb 100644
2946     --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
2947     +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
2948     @@ -79,9 +79,12 @@ enum iwl_power_level {
2949     IWL_POWER_NUM
2950     };
2951    
2952     -#define IWL_DISABLE_HT_ALL BIT(0)
2953     -#define IWL_DISABLE_HT_TXAGG BIT(1)
2954     -#define IWL_DISABLE_HT_RXAGG BIT(2)
2955     +enum iwl_disable_11n {
2956     + IWL_DISABLE_HT_ALL = BIT(0),
2957     + IWL_DISABLE_HT_TXAGG = BIT(1),
2958     + IWL_DISABLE_HT_RXAGG = BIT(2),
2959     + IWL_ENABLE_HT_TXAGG = BIT(3),
2960     +};
2961    
2962     /**
2963     * struct iwl_mod_params
2964     @@ -90,7 +93,7 @@ enum iwl_power_level {
2965     *
2966     * @sw_crypto: using hardware encryption, default = 0
2967     * @disable_11n: disable 11n capabilities, default = 0,
2968     - * use IWL_DISABLE_HT_* constants
2969     + * use IWL_[DIS,EN]ABLE_HT_* constants
2970     * @amsdu_size_8K: enable 8K amsdu size, default = 0
2971     * @restart_fw: restart firmware, default = 1
2972     * @wd_disable: enable stuck queue check, default = 0
2973     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2974     index 9833cdf6177c..5f6fd44e72f1 100644
2975     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2976     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
2977     @@ -297,6 +297,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
2978     ieee80211_free_txskb(hw, skb);
2979     }
2980    
2981     +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
2982     +{
2983     + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
2984     + return false;
2985     + return true;
2986     +}
2987     +
2988     +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
2989     +{
2990     + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
2991     + return false;
2992     + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
2993     + return true;
2994     +
2995     + /* enabled by default */
2996     + return true;
2997     +}
2998     +
2999     static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3000     struct ieee80211_vif *vif,
3001     enum ieee80211_ampdu_mlme_action action,
3002     @@ -316,7 +334,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3003    
3004     switch (action) {
3005     case IEEE80211_AMPDU_RX_START:
3006     - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
3007     + if (!iwl_enable_rx_ampdu(mvm->cfg)) {
3008     ret = -EINVAL;
3009     break;
3010     }
3011     @@ -326,7 +344,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
3012     ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
3013     break;
3014     case IEEE80211_AMPDU_TX_START:
3015     - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
3016     + if (!iwl_enable_tx_ampdu(mvm->cfg)) {
3017     ret = -EINVAL;
3018     break;
3019     }
3020     diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
3021     index b0389279cc1e..c86663ebb493 100644
3022     --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
3023     +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
3024     @@ -152,7 +152,7 @@ enum iwl_power_scheme {
3025     IWL_POWER_SCHEME_LP
3026     };
3027    
3028     -#define IWL_CONN_MAX_LISTEN_INTERVAL 70
3029     +#define IWL_CONN_MAX_LISTEN_INTERVAL 10
3030     #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
3031     IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
3032     IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
3033     diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
3034     index e05440d90319..f41add9c8093 100644
3035     --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
3036     +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
3037     @@ -819,16 +819,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
3038     struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
3039     struct sk_buff_head reclaimed_skbs;
3040     struct iwl_mvm_tid_data *tid_data;
3041     - struct ieee80211_tx_info *info;
3042     struct ieee80211_sta *sta;
3043     struct iwl_mvm_sta *mvmsta;
3044     - struct ieee80211_hdr *hdr;
3045     struct sk_buff *skb;
3046     int sta_id, tid, freed;
3047     -
3048     /* "flow" corresponds to Tx queue */
3049     u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
3050     -
3051     /* "ssn" is start of block-ack Tx window, corresponds to index
3052     * (in Tx queue's circular buffer) of first TFD/frame in window */
3053     u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
3054     @@ -885,22 +881,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
3055     freed = 0;
3056    
3057     skb_queue_walk(&reclaimed_skbs, skb) {
3058     - hdr = (struct ieee80211_hdr *)skb->data;
3059     + struct ieee80211_hdr *hdr = (void *)skb->data;
3060     + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3061    
3062     if (ieee80211_is_data_qos(hdr->frame_control))
3063     freed++;
3064     else
3065     WARN_ON_ONCE(1);
3066    
3067     - info = IEEE80211_SKB_CB(skb);
3068     iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
3069    
3070     + memset(&info->status, 0, sizeof(info->status));
3071     + /* Packet was transmitted successfully, failures come as single
3072     + * frames because before failing a frame the firmware transmits
3073     + * it without aggregation at least once.
3074     + */
3075     + info->flags |= IEEE80211_TX_STAT_ACK;
3076     +
3077     if (freed == 1) {
3078     /* this is the first skb we deliver in this batch */
3079     /* put the rate scaling data there */
3080     - info = IEEE80211_SKB_CB(skb);
3081     - memset(&info->status, 0, sizeof(info->status));
3082     - info->flags |= IEEE80211_TX_STAT_ACK;
3083     info->flags |= IEEE80211_TX_STAT_AMPDU;
3084     info->status.ampdu_ack_len = ba_notif->txed_2_done;
3085     info->status.ampdu_len = ba_notif->txed;
3086     diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
3087     index 5e0eec4d71c7..5d9a8084665d 100644
3088     --- a/drivers/net/wireless/mwifiex/11ac.c
3089     +++ b/drivers/net/wireless/mwifiex/11ac.c
3090     @@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
3091     vht_cap->header.len =
3092     cpu_to_le16(sizeof(struct ieee80211_vht_cap));
3093     memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
3094     - (u8 *)bss_desc->bcn_vht_cap +
3095     - sizeof(struct ieee_types_header),
3096     + (u8 *)bss_desc->bcn_vht_cap,
3097     le16_to_cpu(vht_cap->header.len));
3098    
3099     mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
3100     diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
3101     index 0b803c05cab3..983c10c49658 100644
3102     --- a/drivers/net/wireless/mwifiex/11n.c
3103     +++ b/drivers/net/wireless/mwifiex/11n.c
3104     @@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
3105     ht_cap->header.len =
3106     cpu_to_le16(sizeof(struct ieee80211_ht_cap));
3107     memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
3108     - (u8 *) bss_desc->bcn_ht_cap +
3109     - sizeof(struct ieee_types_header),
3110     + (u8 *)bss_desc->bcn_ht_cap,
3111     le16_to_cpu(ht_cap->header.len));
3112    
3113     mwifiex_fill_cap_info(priv, radio_type, ht_cap);
3114     diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
3115     index 52da8ee7599a..cb84edcd794b 100644
3116     --- a/drivers/net/wireless/mwifiex/pcie.c
3117     +++ b/drivers/net/wireless/mwifiex/pcie.c
3118     @@ -1212,6 +1212,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
3119     rd_index = card->rxbd_rdptr & reg->rx_mask;
3120     skb_data = card->rx_buf_list[rd_index];
3121    
3122     + /* If skb allocation was failed earlier for Rx packet,
3123     + * rx_buf_list[rd_index] would have been left with a NULL.
3124     + */
3125     + if (!skb_data)
3126     + return -ENOMEM;
3127     +
3128     MWIFIEX_SKB_PACB(skb_data, &buf_pa);
3129     pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
3130     PCI_DMA_FROMDEVICE);
3131     @@ -1526,6 +1532,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
3132     if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
3133     mwifiex_process_sleep_confirm_resp(adapter, skb->data,
3134     skb->len);
3135     + mwifiex_pcie_enable_host_int(adapter);
3136     + if (mwifiex_write_reg(adapter,
3137     + PCIE_CPU_INT_EVENT,
3138     + CPU_INTR_SLEEP_CFM_DONE)) {
3139     + dev_warn(adapter->dev,
3140     + "Write register failed\n");
3141     + return -1;
3142     + }
3143     while (reg->sleep_cookie && (count++ < 10) &&
3144     mwifiex_pcie_ok_to_access_hw(adapter))
3145     usleep_range(50, 60);
3146     @@ -1994,23 +2008,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
3147     adapter->int_status |= pcie_ireg;
3148     spin_unlock_irqrestore(&adapter->int_lock, flags);
3149    
3150     - if (pcie_ireg & HOST_INTR_CMD_DONE) {
3151     - if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
3152     - (adapter->ps_state == PS_STATE_SLEEP)) {
3153     - mwifiex_pcie_enable_host_int(adapter);
3154     - if (mwifiex_write_reg(adapter,
3155     - PCIE_CPU_INT_EVENT,
3156     - CPU_INTR_SLEEP_CFM_DONE)
3157     - ) {
3158     - dev_warn(adapter->dev,
3159     - "Write register failed\n");
3160     - return;
3161     -
3162     - }
3163     - }
3164     - } else if (!adapter->pps_uapsd_mode &&
3165     - adapter->ps_state == PS_STATE_SLEEP &&
3166     - mwifiex_pcie_ok_to_access_hw(adapter)) {
3167     + if (!adapter->pps_uapsd_mode &&
3168     + adapter->ps_state == PS_STATE_SLEEP &&
3169     + mwifiex_pcie_ok_to_access_hw(adapter)) {
3170     /* Potentially for PCIe we could get other
3171     * interrupts like shared. Don't change power
3172     * state until cookie is set */
3173     diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
3174     index ba48e64673d9..a17d4675ddc0 100644
3175     --- a/drivers/net/wireless/mwifiex/scan.c
3176     +++ b/drivers/net/wireless/mwifiex/scan.c
3177     @@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
3178     curr_bss->ht_info_offset);
3179    
3180     if (curr_bss->bcn_vht_cap)
3181     - curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
3182     - curr_bss->vht_cap_offset);
3183     + curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
3184     + curr_bss->vht_cap_offset);
3185    
3186     if (curr_bss->bcn_vht_oper)
3187     - curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
3188     - curr_bss->vht_info_offset);
3189     + curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
3190     + curr_bss->vht_info_offset);
3191    
3192     if (curr_bss->bcn_bss_co_2040)
3193     curr_bss->bcn_bss_co_2040 =
3194     diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
3195     index 1c70b8d09227..9d0b0c442c95 100644
3196     --- a/drivers/net/wireless/mwifiex/usb.c
3197     +++ b/drivers/net/wireless/mwifiex/usb.c
3198     @@ -512,13 +512,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
3199     MWIFIEX_BSS_ROLE_ANY),
3200     MWIFIEX_ASYNC_CMD);
3201    
3202     -#ifdef CONFIG_PM
3203     - /* Resume handler may be called due to remote wakeup,
3204     - * force to exit suspend anyway
3205     - */
3206     - usb_disable_autosuspend(card->udev);
3207     -#endif /* CONFIG_PM */
3208     -
3209     return 0;
3210     }
3211    
3212     @@ -555,7 +548,6 @@ static struct usb_driver mwifiex_usb_driver = {
3213     .id_table = mwifiex_usb_table,
3214     .suspend = mwifiex_usb_suspend,
3215     .resume = mwifiex_usb_resume,
3216     - .supports_autosuspend = 1,
3217     };
3218    
3219     static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
3220     diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
3221     index 95fa3599b407..35f881585962 100644
3222     --- a/drivers/net/wireless/mwifiex/wmm.c
3223     +++ b/drivers/net/wireless/mwifiex/wmm.c
3224     @@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
3225     mwifiex_wmm_delete_all_ralist(priv);
3226     memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
3227    
3228     - if (priv->adapter->if_ops.clean_pcie_ring)
3229     + if (priv->adapter->if_ops.clean_pcie_ring &&
3230     + !priv->adapter->surprise_removed)
3231     priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
3232     spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
3233     }
3234     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3235     index 863bc4bb4806..9fc3f1f4557b 100644
3236     --- a/drivers/pci/pci.c
3237     +++ b/drivers/pci/pci.c
3238     @@ -1131,6 +1131,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
3239     return err;
3240     pci_fixup_device(pci_fixup_enable, dev);
3241    
3242     + if (dev->msi_enabled || dev->msix_enabled)
3243     + return 0;
3244     +
3245     pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
3246     if (pin) {
3247     pci_read_config_word(dev, PCI_COMMAND, &cmd);
3248     @@ -1166,10 +1169,8 @@ static void pci_enable_bridge(struct pci_dev *dev)
3249     pci_enable_bridge(dev->bus->self);
3250    
3251     if (pci_is_enabled(dev)) {
3252     - if (!dev->is_busmaster) {
3253     - dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
3254     + if (!dev->is_busmaster)
3255     pci_set_master(dev);
3256     - }
3257     return;
3258     }
3259    
3260     diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
3261     index 6ebf3067bde4..b2dcde123e56 100644
3262     --- a/drivers/pinctrl/pinctrl-sunxi.c
3263     +++ b/drivers/pinctrl/pinctrl-sunxi.c
3264     @@ -14,6 +14,7 @@
3265     #include <linux/clk.h>
3266     #include <linux/gpio.h>
3267     #include <linux/irqdomain.h>
3268     +#include <linux/irqchip/chained_irq.h>
3269     #include <linux/module.h>
3270     #include <linux/of.h>
3271     #include <linux/of_address.h>
3272     @@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
3273    
3274     static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
3275     {
3276     + struct irq_chip *chip = irq_get_chip(irq);
3277     struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
3278     const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
3279    
3280     @@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
3281     if (reg) {
3282     int irqoffset;
3283    
3284     + chained_irq_enter(chip, desc);
3285     for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
3286     int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
3287     generic_handle_irq(pin_irq);
3288     }
3289     + chained_irq_exit(chip, desc);
3290     }
3291     }
3292    
3293     diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
3294     index 167f3d00c916..66977ebf13b3 100644
3295     --- a/drivers/pnp/pnpacpi/rsparser.c
3296     +++ b/drivers/pnp/pnpacpi/rsparser.c
3297     @@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
3298     struct resource r = {0};
3299     int i, flags;
3300    
3301     - if (acpi_dev_resource_memory(res, &r)
3302     - || acpi_dev_resource_io(res, &r)
3303     - || acpi_dev_resource_address_space(res, &r)
3304     + if (acpi_dev_resource_address_space(res, &r)
3305     || acpi_dev_resource_ext_address_space(res, &r)) {
3306     pnp_add_resource(dev, &r);
3307     return AE_OK;
3308     @@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
3309     }
3310    
3311     switch (res->type) {
3312     + case ACPI_RESOURCE_TYPE_MEMORY24:
3313     + case ACPI_RESOURCE_TYPE_MEMORY32:
3314     + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
3315     + if (acpi_dev_resource_memory(res, &r))
3316     + pnp_add_resource(dev, &r);
3317     + break;
3318     + case ACPI_RESOURCE_TYPE_IO:
3319     + case ACPI_RESOURCE_TYPE_FIXED_IO:
3320     + if (acpi_dev_resource_io(res, &r))
3321     + pnp_add_resource(dev, &r);
3322     + break;
3323     case ACPI_RESOURCE_TYPE_DMA:
3324     dma = &res->data.dma;
3325     if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
3326     diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
3327     index b4b0d83f9ef6..7061ac0ad428 100644
3328     --- a/drivers/rapidio/devices/tsi721.h
3329     +++ b/drivers/rapidio/devices/tsi721.h
3330     @@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
3331     struct list_head free_list;
3332     dma_cookie_t completed_cookie;
3333     struct tasklet_struct tasklet;
3334     + bool active;
3335     };
3336    
3337     #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
3338     diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
3339     index 502663f5f7c6..91245f5dbe81 100644
3340     --- a/drivers/rapidio/devices/tsi721_dma.c
3341     +++ b/drivers/rapidio/devices/tsi721_dma.c
3342     @@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
3343     {
3344     /* Disable BDMA channel interrupts */
3345     iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
3346     -
3347     - tasklet_schedule(&bdma_chan->tasklet);
3348     + if (bdma_chan->active)
3349     + tasklet_schedule(&bdma_chan->tasklet);
3350     }
3351    
3352     #ifdef CONFIG_PCI_MSI
3353     @@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
3354     }
3355     #endif /* CONFIG_PCI_MSI */
3356    
3357     - tasklet_enable(&bdma_chan->tasklet);
3358     + bdma_chan->active = true;
3359     tsi721_bdma_interrupt_enable(bdma_chan, 1);
3360    
3361     return bdma_chan->bd_num - 1;
3362     @@ -576,9 +576,7 @@ err_out:
3363     static void tsi721_free_chan_resources(struct dma_chan *dchan)
3364     {
3365     struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
3366     -#ifdef CONFIG_PCI_MSI
3367     struct tsi721_device *priv = to_tsi721(dchan->device);
3368     -#endif
3369     LIST_HEAD(list);
3370    
3371     dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
3372     @@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
3373     BUG_ON(!list_empty(&bdma_chan->active_list));
3374     BUG_ON(!list_empty(&bdma_chan->queue));
3375    
3376     - tasklet_disable(&bdma_chan->tasklet);
3377     + tsi721_bdma_interrupt_enable(bdma_chan, 0);
3378     + bdma_chan->active = false;
3379     +
3380     +#ifdef CONFIG_PCI_MSI
3381     + if (priv->flags & TSI721_USING_MSIX) {
3382     + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
3383     + bdma_chan->id].vector);
3384     + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
3385     + bdma_chan->id].vector);
3386     + } else
3387     +#endif
3388     + synchronize_irq(priv->pdev->irq);
3389     +
3390     + tasklet_kill(&bdma_chan->tasklet);
3391    
3392     spin_lock_bh(&bdma_chan->lock);
3393     list_splice_init(&bdma_chan->free_list, &list);
3394     spin_unlock_bh(&bdma_chan->lock);
3395    
3396     - tsi721_bdma_interrupt_enable(bdma_chan, 0);
3397     -
3398     #ifdef CONFIG_PCI_MSI
3399     if (priv->flags & TSI721_USING_MSIX) {
3400     free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
3401     @@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
3402     bdma_chan->dchan.cookie = 1;
3403     bdma_chan->dchan.chan_id = i;
3404     bdma_chan->id = i;
3405     + bdma_chan->active = false;
3406    
3407     spin_lock_init(&bdma_chan->lock);
3408    
3409     @@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
3410    
3411     tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
3412     (unsigned long)bdma_chan);
3413     - tasklet_disable(&bdma_chan->tasklet);
3414     list_add_tail(&bdma_chan->dchan.device_node,
3415     &mport->dma.channels);
3416     }
3417     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
3418     index a01b8b3b70ca..d97fbf4eb65b 100644
3419     --- a/drivers/regulator/core.c
3420     +++ b/drivers/regulator/core.c
3421     @@ -923,6 +923,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
3422     return 0;
3423     }
3424    
3425     +static int _regulator_do_enable(struct regulator_dev *rdev);
3426     +
3427     /**
3428     * set_machine_constraints - sets regulator constraints
3429     * @rdev: regulator source
3430     @@ -979,10 +981,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
3431     /* If the constraints say the regulator should be on at this point
3432     * and we have control then make sure it is enabled.
3433     */
3434     - if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
3435     - ops->enable) {
3436     - ret = ops->enable(rdev);
3437     - if (ret < 0) {
3438     + if (rdev->constraints->always_on || rdev->constraints->boot_on) {
3439     + ret = _regulator_do_enable(rdev);
3440     + if (ret < 0 && ret != -EINVAL) {
3441     rdev_err(rdev, "failed to enable\n");
3442     goto out;
3443     }
3444     @@ -3571,9 +3572,8 @@ int regulator_suspend_finish(void)
3445     struct regulator_ops *ops = rdev->desc->ops;
3446    
3447     mutex_lock(&rdev->mutex);
3448     - if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
3449     - ops->enable) {
3450     - error = ops->enable(rdev);
3451     + if (rdev->use_count > 0 || rdev->constraints->always_on) {
3452     + error = _regulator_do_enable(rdev);
3453     if (error)
3454     ret = error;
3455     } else {
3456     diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
3457     index f93cc32eb818..71e974738014 100644
3458     --- a/drivers/s390/char/fs3270.c
3459     +++ b/drivers/s390/char/fs3270.c
3460     @@ -564,6 +564,7 @@ static void __exit
3461     fs3270_exit(void)
3462     {
3463     raw3270_unregister_notifier(&fs3270_notifier);
3464     + device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
3465     __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
3466     }
3467    
3468     diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
3469     index 4911310a38f5..22a9bb1abae1 100644
3470     --- a/drivers/scsi/isci/host.h
3471     +++ b/drivers/scsi/isci/host.h
3472     @@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
3473     }
3474    
3475     #define for_each_isci_host(id, ihost, pdev) \
3476     - for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
3477     - id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
3478     - ihost = to_pci_info(pdev)->hosts[++id])
3479     + for (id = 0; id < SCI_MAX_CONTROLLERS && \
3480     + (ihost = to_pci_info(pdev)->hosts[id]); id++)
3481    
3482     static inline void wait_for_start(struct isci_host *ihost)
3483     {
3484     diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
3485     index 85c77f6b802b..ac879745ef80 100644
3486     --- a/drivers/scsi/isci/port_config.c
3487     +++ b/drivers/scsi/isci/port_config.c
3488     @@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
3489     SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
3490     } else {
3491     /* the phy is already the part of the port */
3492     - u32 port_state = iport->sm.current_state_id;
3493     -
3494     - /* if the PORT'S state is resetting then the link up is from
3495     - * port hard reset in this case, we need to tell the port
3496     - * that link up is recieved
3497     - */
3498     - BUG_ON(port_state != SCI_PORT_RESETTING);
3499     port_agent->phy_ready_mask |= 1 << phy_index;
3500     sci_port_link_up(iport, iphy);
3501     }
3502     diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
3503     index 0d30ca849e8f..5d6fda72d659 100644
3504     --- a/drivers/scsi/isci/task.c
3505     +++ b/drivers/scsi/isci/task.c
3506     @@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
3507     /* XXX: need to cleanup any ireqs targeting this
3508     * domain_device
3509     */
3510     - ret = TMF_RESP_FUNC_COMPLETE;
3511     + ret = -ENODEV;
3512     goto out;
3513     }
3514    
3515     diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3516     index 93db74ef3461..43acfce3a435 100644
3517     --- a/drivers/scsi/qla2xxx/qla_def.h
3518     +++ b/drivers/scsi/qla2xxx/qla_def.h
3519     @@ -2993,8 +2993,7 @@ struct qla_hw_data {
3520     IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
3521     IS_QLA8044(ha))
3522     #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3523     -#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
3524     - IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
3525     +#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
3526     #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3527     #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3528     #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
3529     diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3530     index ff9c86b1a0d8..e32fccd6580c 100644
3531     --- a/drivers/scsi/qla2xxx/qla_isr.c
3532     +++ b/drivers/scsi/qla2xxx/qla_isr.c
3533     @@ -2829,6 +2829,7 @@ static int
3534     qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3535     {
3536     #define MIN_MSIX_COUNT 2
3537     +#define ATIO_VECTOR 2
3538     int i, ret;
3539     struct msix_entry *entries;
3540     struct qla_msix_entry *qentry;
3541     @@ -2885,34 +2886,47 @@ msix_failed:
3542     }
3543    
3544     /* Enable MSI-X vectors for the base queue */
3545     - for (i = 0; i < ha->msix_count; i++) {
3546     + for (i = 0; i < 2; i++) {
3547     qentry = &ha->msix_entries[i];
3548     - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3549     - ret = request_irq(qentry->vector,
3550     - qla83xx_msix_entries[i].handler,
3551     - 0, qla83xx_msix_entries[i].name, rsp);
3552     - } else if (IS_P3P_TYPE(ha)) {
3553     + if (IS_P3P_TYPE(ha))
3554     ret = request_irq(qentry->vector,
3555     qla82xx_msix_entries[i].handler,
3556     0, qla82xx_msix_entries[i].name, rsp);
3557     - } else {
3558     + else
3559     ret = request_irq(qentry->vector,
3560     msix_entries[i].handler,
3561     0, msix_entries[i].name, rsp);
3562     - }
3563     - if (ret) {
3564     - ql_log(ql_log_fatal, vha, 0x00cb,
3565     - "MSI-X: unable to register handler -- %x/%d.\n",
3566     - qentry->vector, ret);
3567     - qla24xx_disable_msix(ha);
3568     - ha->mqenable = 0;
3569     - goto msix_out;
3570     - }
3571     + if (ret)
3572     + goto msix_register_fail;
3573     qentry->have_irq = 1;
3574     qentry->rsp = rsp;
3575     rsp->msix = qentry;
3576     }
3577    
3578     + /*
3579     + * If target mode is enable, also request the vector for the ATIO
3580     + * queue.
3581     + */
3582     + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3583     + qentry = &ha->msix_entries[ATIO_VECTOR];
3584     + ret = request_irq(qentry->vector,
3585     + qla83xx_msix_entries[ATIO_VECTOR].handler,
3586     + 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
3587     + qentry->have_irq = 1;
3588     + qentry->rsp = rsp;
3589     + rsp->msix = qentry;
3590     + }
3591     +
3592     +msix_register_fail:
3593     + if (ret) {
3594     + ql_log(ql_log_fatal, vha, 0x00cb,
3595     + "MSI-X: unable to register handler -- %x/%d.\n",
3596     + qentry->vector, ret);
3597     + qla24xx_disable_msix(ha);
3598     + ha->mqenable = 0;
3599     + goto msix_out;
3600     + }
3601     +
3602     /* Enable MSI-X vector for response queue update for queue 0 */
3603     if (IS_QLA83XX(ha)) {
3604     if (ha->msixbase && ha->mqiobase &&
3605     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3606     index 17d740427240..9969fa1ef7c4 100644
3607     --- a/drivers/scsi/storvsc_drv.c
3608     +++ b/drivers/scsi/storvsc_drv.c
3609     @@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
3610     {
3611     struct stor_mem_pools *memp = sdevice->hostdata;
3612    
3613     + if (!memp)
3614     + return;
3615     +
3616     mempool_destroy(memp->request_mempool);
3617     kmem_cache_destroy(memp->request_pool);
3618     kfree(memp);
3619     diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
3620     index 37bad952ab38..05dd69212e32 100644
3621     --- a/drivers/spi/spi-ath79.c
3622     +++ b/drivers/spi/spi-ath79.c
3623     @@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
3624    
3625     flags = GPIOF_DIR_OUT;
3626     if (spi->mode & SPI_CS_HIGH)
3627     - flags |= GPIOF_INIT_HIGH;
3628     - else
3629     flags |= GPIOF_INIT_LOW;
3630     + else
3631     + flags |= GPIOF_INIT_HIGH;
3632    
3633     status = gpio_request_one(cdata->gpio, flags,
3634     dev_name(&spi->dev));
3635     diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
3636     index cc5b75d10c38..524d112d5369 100644
3637     --- a/drivers/spi/spi-coldfire-qspi.c
3638     +++ b/drivers/spi/spi-coldfire-qspi.c
3639     @@ -539,7 +539,8 @@ static int mcfqspi_resume(struct device *dev)
3640     #ifdef CONFIG_PM_RUNTIME
3641     static int mcfqspi_runtime_suspend(struct device *dev)
3642     {
3643     - struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
3644     + struct spi_master *master = dev_get_drvdata(dev);
3645     + struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
3646    
3647     clk_disable(mcfqspi->clk);
3648    
3649     @@ -548,7 +549,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
3650    
3651     static int mcfqspi_runtime_resume(struct device *dev)
3652     {
3653     - struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
3654     + struct spi_master *master = dev_get_drvdata(dev);
3655     + struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
3656    
3657     clk_enable(mcfqspi->clk);
3658    
3659     diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
3660     index 4e44575bd87a..f1322343d789 100644
3661     --- a/drivers/spi/spi-fsl-dspi.c
3662     +++ b/drivers/spi/spi-fsl-dspi.c
3663     @@ -421,7 +421,6 @@ static int dspi_suspend(struct device *dev)
3664    
3665     static int dspi_resume(struct device *dev)
3666     {
3667     -
3668     struct spi_master *master = dev_get_drvdata(dev);
3669     struct fsl_dspi *dspi = spi_master_get_devdata(master);
3670    
3671     @@ -505,7 +504,7 @@ static int dspi_probe(struct platform_device *pdev)
3672     clk_prepare_enable(dspi->clk);
3673    
3674     init_waitqueue_head(&dspi->waitq);
3675     - platform_set_drvdata(pdev, dspi);
3676     + platform_set_drvdata(pdev, master);
3677    
3678     ret = spi_bitbang_start(&dspi->bitbang);
3679     if (ret != 0) {
3680     @@ -527,7 +526,8 @@ out_master_put:
3681    
3682     static int dspi_remove(struct platform_device *pdev)
3683     {
3684     - struct fsl_dspi *dspi = platform_get_drvdata(pdev);
3685     + struct spi_master *master = platform_get_drvdata(pdev);
3686     + struct fsl_dspi *dspi = spi_master_get_devdata(master);
3687    
3688     /* Disconnect from the SPI framework */
3689     spi_bitbang_stop(&dspi->bitbang);
3690     diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
3691     index 15323d8bd9cf..941069517423 100644
3692     --- a/drivers/spi/spi-imx.c
3693     +++ b/drivers/spi/spi-imx.c
3694     @@ -892,8 +892,8 @@ static int spi_imx_remove(struct platform_device *pdev)
3695     spi_bitbang_stop(&spi_imx->bitbang);
3696    
3697     writel(0, spi_imx->base + MXC_CSPICTRL);
3698     - clk_disable_unprepare(spi_imx->clk_ipg);
3699     - clk_disable_unprepare(spi_imx->clk_per);
3700     + clk_unprepare(spi_imx->clk_ipg);
3701     + clk_unprepare(spi_imx->clk_per);
3702     spi_master_put(master);
3703    
3704     return 0;
3705     diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
3706     index d7ac040e0dc1..d02088f7dc33 100644
3707     --- a/drivers/staging/zram/zram_drv.c
3708     +++ b/drivers/staging/zram/zram_drv.c
3709     @@ -621,6 +621,8 @@ static ssize_t disksize_store(struct device *dev,
3710    
3711     disksize = PAGE_ALIGN(disksize);
3712     meta = zram_meta_alloc(disksize);
3713     + if (!meta)
3714     + return -ENOMEM;
3715     down_write(&zram->init_lock);
3716     if (zram->init_done) {
3717     up_write(&zram->init_lock);
3718     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3719     index e12f2aab3c87..b5e574659785 100644
3720     --- a/drivers/target/iscsi/iscsi_target.c
3721     +++ b/drivers/target/iscsi/iscsi_target.c
3722     @@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
3723     spin_unlock_bh(&conn->cmd_lock);
3724    
3725     list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
3726     - list_del(&cmd->i_conn_node);
3727     + list_del_init(&cmd->i_conn_node);
3728     iscsit_free_cmd(cmd, false);
3729     }
3730     }
3731     @@ -3704,7 +3704,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
3732     break;
3733     case ISTATE_REMOVE:
3734     spin_lock_bh(&conn->cmd_lock);
3735     - list_del(&cmd->i_conn_node);
3736     + list_del_init(&cmd->i_conn_node);
3737     spin_unlock_bh(&conn->cmd_lock);
3738    
3739     iscsit_free_cmd(cmd, false);
3740     @@ -4149,7 +4149,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3741     spin_lock_bh(&conn->cmd_lock);
3742     list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
3743    
3744     - list_del(&cmd->i_conn_node);
3745     + list_del_init(&cmd->i_conn_node);
3746     spin_unlock_bh(&conn->cmd_lock);
3747    
3748     iscsit_increment_maxcmdsn(cmd, sess);
3749     @@ -4194,6 +4194,10 @@ int iscsit_close_connection(
3750     iscsit_stop_timers_for_cmds(conn);
3751     iscsit_stop_nopin_response_timer(conn);
3752     iscsit_stop_nopin_timer(conn);
3753     +
3754     + if (conn->conn_transport->iscsit_wait_conn)
3755     + conn->conn_transport->iscsit_wait_conn(conn);
3756     +
3757     iscsit_free_queue_reqs_for_conn(conn);
3758    
3759     /*
3760     diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
3761     index 33be1fb1df32..4ca8fd2a70db 100644
3762     --- a/drivers/target/iscsi/iscsi_target_erl2.c
3763     +++ b/drivers/target/iscsi/iscsi_target_erl2.c
3764     @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
3765     list_for_each_entry_safe(cmd, cmd_tmp,
3766     &cr->conn_recovery_cmd_list, i_conn_node) {
3767    
3768     - list_del(&cmd->i_conn_node);
3769     + list_del_init(&cmd->i_conn_node);
3770     cmd->conn = NULL;
3771     spin_unlock(&cr->conn_recovery_cmd_lock);
3772     iscsit_free_cmd(cmd, true);
3773     @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
3774     list_for_each_entry_safe(cmd, cmd_tmp,
3775     &cr->conn_recovery_cmd_list, i_conn_node) {
3776    
3777     - list_del(&cmd->i_conn_node);
3778     + list_del_init(&cmd->i_conn_node);
3779     cmd->conn = NULL;
3780     spin_unlock(&cr->conn_recovery_cmd_lock);
3781     iscsit_free_cmd(cmd, true);
3782     @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
3783     }
3784     cr = cmd->cr;
3785    
3786     - list_del(&cmd->i_conn_node);
3787     + list_del_init(&cmd->i_conn_node);
3788     return --cr->cmd_count;
3789     }
3790    
3791     @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
3792     if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
3793     continue;
3794    
3795     - list_del(&cmd->i_conn_node);
3796     + list_del_init(&cmd->i_conn_node);
3797    
3798     spin_unlock_bh(&conn->cmd_lock);
3799     iscsit_free_cmd(cmd, true);
3800     @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3801     /*
3802     * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
3803     * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
3804     - * list_del(&cmd->i_conn_node); to release the command to the
3805     + * list_del_init(&cmd->i_conn_node); to release the command to the
3806     * session pool and remove it from the connection's list.
3807     *
3808     * Also stop the DataOUT timer, which will be restarted after
3809     @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3810     " CID: %hu\n", cmd->iscsi_opcode,
3811     cmd->init_task_tag, cmd->cmd_sn, conn->cid);
3812    
3813     - list_del(&cmd->i_conn_node);
3814     + list_del_init(&cmd->i_conn_node);
3815     spin_unlock_bh(&conn->cmd_lock);
3816     iscsit_free_cmd(cmd, true);
3817     spin_lock_bh(&conn->cmd_lock);
3818     @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3819     */
3820     if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
3821     iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
3822     - list_del(&cmd->i_conn_node);
3823     + list_del_init(&cmd->i_conn_node);
3824     spin_unlock_bh(&conn->cmd_lock);
3825     iscsit_free_cmd(cmd, true);
3826     spin_lock_bh(&conn->cmd_lock);
3827     @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
3828    
3829     cmd->sess = conn->sess;
3830    
3831     - list_del(&cmd->i_conn_node);
3832     + list_del_init(&cmd->i_conn_node);
3833     spin_unlock_bh(&conn->cmd_lock);
3834    
3835     iscsit_free_all_datain_reqs(cmd);
3836     diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
3837     index 4faeb47fa5e1..3cf77c0b76b4 100644
3838     --- a/drivers/target/iscsi/iscsi_target_tpg.c
3839     +++ b/drivers/target/iscsi/iscsi_target_tpg.c
3840     @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
3841     list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3842    
3843     spin_lock(&tpg->tpg_state_lock);
3844     - if (tpg->tpg_state == TPG_STATE_FREE) {
3845     + if (tpg->tpg_state != TPG_STATE_ACTIVE) {
3846     spin_unlock(&tpg->tpg_state_lock);
3847     continue;
3848     }
3849     diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
3850     index 7722cb9d5a80..72573661a14a 100644
3851     --- a/drivers/thermal/x86_pkg_temp_thermal.c
3852     +++ b/drivers/thermal/x86_pkg_temp_thermal.c
3853     @@ -68,6 +68,10 @@ struct phy_dev_entry {
3854     struct thermal_zone_device *tzone;
3855     };
3856    
3857     +static const struct thermal_zone_params pkg_temp_tz_params = {
3858     + .no_hwmon = true,
3859     +};
3860     +
3861     /* List maintaining number of package instances */
3862     static LIST_HEAD(phy_dev_list);
3863     static DEFINE_MUTEX(phy_dev_list_mutex);
3864     @@ -446,7 +450,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
3865     thres_count,
3866     (thres_count == MAX_NUMBER_OF_TRIPS) ?
3867     0x03 : 0x01,
3868     - phy_dev_entry, &tzone_ops, NULL, 0, 0);
3869     + phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
3870     if (IS_ERR(phy_dev_entry->tzone)) {
3871     err = PTR_ERR(phy_dev_entry->tzone);
3872     goto err_ret_free;
3873     diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3874     index 548d1996590f..652438325197 100644
3875     --- a/drivers/usb/core/config.c
3876     +++ b/drivers/usb/core/config.c
3877     @@ -718,6 +718,10 @@ int usb_get_configuration(struct usb_device *dev)
3878     result = -ENOMEM;
3879     goto err;
3880     }
3881     +
3882     + if (dev->quirks & USB_QUIRK_DELAY_INIT)
3883     + msleep(100);
3884     +
3885     result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
3886     bigbuffer, length);
3887     if (result < 0) {
3888     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3889     index 01fe36273f3b..1053eb651b2f 100644
3890     --- a/drivers/usb/core/quirks.c
3891     +++ b/drivers/usb/core/quirks.c
3892     @@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3893     /* Microsoft LifeCam-VX700 v2.0 */
3894     { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
3895    
3896     + /* Logitech HD Pro Webcams C920 and C930e */
3897     + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
3898     + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
3899     +
3900     /* Logitech Quickcam Fusion */
3901     { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
3902    
3903     diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
3904     index 854c2ec7b699..3e86bf4371b3 100644
3905     --- a/drivers/usb/host/ehci-pci.c
3906     +++ b/drivers/usb/host/ehci-pci.c
3907     @@ -58,8 +58,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
3908     {
3909     struct ehci_hcd *ehci = hcd_to_ehci(hcd);
3910     struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
3911     - struct pci_dev *p_smbus;
3912     - u8 rev;
3913     u32 temp;
3914     int retval;
3915    
3916     @@ -175,22 +173,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
3917     /* SB600 and old version of SB700 have a bug in EHCI controller,
3918     * which causes usb devices lose response in some cases.
3919     */
3920     - if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
3921     - p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
3922     - PCI_DEVICE_ID_ATI_SBX00_SMBUS,
3923     - NULL);
3924     - if (!p_smbus)
3925     - break;
3926     - rev = p_smbus->revision;
3927     - if ((pdev->device == 0x4386) || (rev == 0x3a)
3928     - || (rev == 0x3b)) {
3929     - u8 tmp;
3930     - ehci_info(ehci, "applying AMD SB600/SB700 USB "
3931     - "freeze workaround\n");
3932     - pci_read_config_byte(pdev, 0x53, &tmp);
3933     - pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
3934     - }
3935     - pci_dev_put(p_smbus);
3936     + if ((pdev->device == 0x4386 || pdev->device == 0x4396) &&
3937     + usb_amd_hang_symptom_quirk()) {
3938     + u8 tmp;
3939     + ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n");
3940     + pci_read_config_byte(pdev, 0x53, &tmp);
3941     + pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
3942     }
3943     break;
3944     case PCI_VENDOR_ID_NETMOS:
3945     diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
3946     index ec337c2bd5e0..659cde1ed1ea 100644
3947     --- a/drivers/usb/host/ohci-pci.c
3948     +++ b/drivers/usb/host/ohci-pci.c
3949     @@ -150,28 +150,16 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
3950     static int ohci_quirk_amd700(struct usb_hcd *hcd)
3951     {
3952     struct ohci_hcd *ohci = hcd_to_ohci(hcd);
3953     - struct pci_dev *amd_smbus_dev;
3954     - u8 rev;
3955    
3956     if (usb_amd_find_chipset_info())
3957     ohci->flags |= OHCI_QUIRK_AMD_PLL;
3958    
3959     - amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
3960     - PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
3961     - if (!amd_smbus_dev)
3962     - return 0;
3963     -
3964     - rev = amd_smbus_dev->revision;
3965     -
3966     /* SB800 needs pre-fetch fix */
3967     - if ((rev >= 0x40) && (rev <= 0x4f)) {
3968     + if (usb_amd_prefetch_quirk()) {
3969     ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
3970     ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
3971     }
3972    
3973     - pci_dev_put(amd_smbus_dev);
3974     - amd_smbus_dev = NULL;
3975     -
3976     return 0;
3977     }
3978    
3979     diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
3980     index 08ef2829a7e2..463156d03140 100644
3981     --- a/drivers/usb/host/pci-quirks.c
3982     +++ b/drivers/usb/host/pci-quirks.c
3983     @@ -79,11 +79,30 @@
3984     #define USB_INTEL_USB3_PSSEN 0xD8
3985     #define USB_INTEL_USB3PRM 0xDC
3986    
3987     +/*
3988     + * amd_chipset_gen values represent AMD different chipset generations
3989     + */
3990     +enum amd_chipset_gen {
3991     + NOT_AMD_CHIPSET = 0,
3992     + AMD_CHIPSET_SB600,
3993     + AMD_CHIPSET_SB700,
3994     + AMD_CHIPSET_SB800,
3995     + AMD_CHIPSET_HUDSON2,
3996     + AMD_CHIPSET_BOLTON,
3997     + AMD_CHIPSET_YANGTZE,
3998     + AMD_CHIPSET_UNKNOWN,
3999     +};
4000     +
4001     +struct amd_chipset_type {
4002     + enum amd_chipset_gen gen;
4003     + u8 rev;
4004     +};
4005     +
4006     static struct amd_chipset_info {
4007     struct pci_dev *nb_dev;
4008     struct pci_dev *smbus_dev;
4009     int nb_type;
4010     - int sb_type;
4011     + struct amd_chipset_type sb_type;
4012     int isoc_reqs;
4013     int probe_count;
4014     int probe_result;
4015     @@ -91,6 +110,51 @@ static struct amd_chipset_info {
4016    
4017     static DEFINE_SPINLOCK(amd_lock);
4018    
4019     +/*
4020     + * amd_chipset_sb_type_init - initialize amd chipset southbridge type
4021     + *
4022     + * AMD FCH/SB generation and revision is identified by SMBus controller
4023     + * vendor, device and revision IDs.
4024     + *
4025     + * Returns: 1 if it is an AMD chipset, 0 otherwise.
4026     + */
4027     +int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
4028     +{
4029     + u8 rev = 0;
4030     + pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
4031     +
4032     + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
4033     + PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
4034     + if (pinfo->smbus_dev) {
4035     + rev = pinfo->smbus_dev->revision;
4036     + if (rev >= 0x10 && rev <= 0x1f)
4037     + pinfo->sb_type.gen = AMD_CHIPSET_SB600;
4038     + else if (rev >= 0x30 && rev <= 0x3f)
4039     + pinfo->sb_type.gen = AMD_CHIPSET_SB700;
4040     + else if (rev >= 0x40 && rev <= 0x4f)
4041     + pinfo->sb_type.gen = AMD_CHIPSET_SB800;
4042     + } else {
4043     + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
4044     + PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
4045     +
4046     + if (!pinfo->smbus_dev) {
4047     + pinfo->sb_type.gen = NOT_AMD_CHIPSET;
4048     + return 0;
4049     + }
4050     +
4051     + rev = pinfo->smbus_dev->revision;
4052     + if (rev >= 0x11 && rev <= 0x14)
4053     + pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
4054     + else if (rev >= 0x15 && rev <= 0x18)
4055     + pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
4056     + else if (rev >= 0x39 && rev <= 0x3a)
4057     + pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
4058     + }
4059     +
4060     + pinfo->sb_type.rev = rev;
4061     + return 1;
4062     +}
4063     +
4064     void sb800_prefetch(struct device *dev, int on)
4065     {
4066     u16 misc;
4067     @@ -106,7 +170,6 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
4068    
4069     int usb_amd_find_chipset_info(void)
4070     {
4071     - u8 rev = 0;
4072     unsigned long flags;
4073     struct amd_chipset_info info;
4074     int ret;
4075     @@ -122,27 +185,17 @@ int usb_amd_find_chipset_info(void)
4076     memset(&info, 0, sizeof(info));
4077     spin_unlock_irqrestore(&amd_lock, flags);
4078    
4079     - info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
4080     - if (info.smbus_dev) {
4081     - rev = info.smbus_dev->revision;
4082     - if (rev >= 0x40)
4083     - info.sb_type = 1;
4084     - else if (rev >= 0x30 && rev <= 0x3b)
4085     - info.sb_type = 3;
4086     - } else {
4087     - info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
4088     - 0x780b, NULL);
4089     - if (!info.smbus_dev) {
4090     - ret = 0;
4091     - goto commit;
4092     - }
4093     -
4094     - rev = info.smbus_dev->revision;
4095     - if (rev >= 0x11 && rev <= 0x18)
4096     - info.sb_type = 2;
4097     + if (!amd_chipset_sb_type_init(&info)) {
4098     + ret = 0;
4099     + goto commit;
4100     }
4101    
4102     - if (info.sb_type == 0) {
4103     + /* Below chipset generations needn't enable AMD PLL quirk */
4104     + if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
4105     + info.sb_type.gen == AMD_CHIPSET_SB600 ||
4106     + info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
4107     + (info.sb_type.gen == AMD_CHIPSET_SB700 &&
4108     + info.sb_type.rev > 0x3b)) {
4109     if (info.smbus_dev) {
4110     pci_dev_put(info.smbus_dev);
4111     info.smbus_dev = NULL;
4112     @@ -197,6 +250,27 @@ commit:
4113     }
4114     EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
4115    
4116     +bool usb_amd_hang_symptom_quirk(void)
4117     +{
4118     + u8 rev;
4119     +
4120     + usb_amd_find_chipset_info();
4121     + rev = amd_chipset.sb_type.rev;
4122     + /* SB600 and old version of SB700 have hang symptom bug */
4123     + return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
4124     + (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
4125     + rev >= 0x3a && rev <= 0x3b);
4126     +}
4127     +EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
4128     +
4129     +bool usb_amd_prefetch_quirk(void)
4130     +{
4131     + usb_amd_find_chipset_info();
4132     + /* SB800 needs pre-fetch fix */
4133     + return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
4134     +}
4135     +EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
4136     +
4137     /*
4138     * The hardware normally enables the A-link power management feature, which
4139     * lets the system lower the power consumption in idle states.
4140     @@ -229,7 +303,9 @@ static void usb_amd_quirk_pll(int disable)
4141     }
4142     }
4143    
4144     - if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
4145     + if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
4146     + amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
4147     + amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
4148     outb_p(AB_REG_BAR_LOW, 0xcd6);
4149     addr_low = inb_p(0xcd7);
4150     outb_p(AB_REG_BAR_HIGH, 0xcd6);
4151     @@ -240,7 +316,8 @@ static void usb_amd_quirk_pll(int disable)
4152     outl_p(0x40, AB_DATA(addr));
4153     outl_p(0x34, AB_INDX(addr));
4154     val = inl_p(AB_DATA(addr));
4155     - } else if (amd_chipset.sb_type == 3) {
4156     + } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
4157     + amd_chipset.sb_type.rev <= 0x3b) {
4158     pci_read_config_dword(amd_chipset.smbus_dev,
4159     AB_REG_BAR_SB700, &addr);
4160     outl(AX_INDXC, AB_INDX(addr));
4161     @@ -353,7 +430,7 @@ void usb_amd_dev_put(void)
4162     amd_chipset.nb_dev = NULL;
4163     amd_chipset.smbus_dev = NULL;
4164     amd_chipset.nb_type = 0;
4165     - amd_chipset.sb_type = 0;
4166     + memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
4167     amd_chipset.isoc_reqs = 0;
4168     amd_chipset.probe_result = 0;
4169    
4170     diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
4171     index ed6700d00fe6..638e88f7a28b 100644
4172     --- a/drivers/usb/host/pci-quirks.h
4173     +++ b/drivers/usb/host/pci-quirks.h
4174     @@ -5,6 +5,8 @@
4175     void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
4176     int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
4177     int usb_amd_find_chipset_info(void);
4178     +bool usb_amd_hang_symptom_quirk(void);
4179     +bool usb_amd_prefetch_quirk(void);
4180     void usb_amd_dev_put(void);
4181     void usb_amd_quirk_pll_disable(void);
4182     void usb_amd_quirk_pll_enable(void);
4183     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4184     index 82fb34183a7f..f178f762b543 100644
4185     --- a/drivers/usb/host/xhci.c
4186     +++ b/drivers/usb/host/xhci.c
4187     @@ -4730,6 +4730,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4188     /* Accept arbitrarily long scatter-gather lists */
4189     hcd->self.sg_tablesize = ~0;
4190    
4191     + /* support to build packet from discontinuous buffers */
4192     + hcd->self.no_sg_constraint = 1;
4193     +
4194     /* XHCI controllers don't stop the ep queue on short packets :| */
4195     hcd->self.no_stop_on_short = 1;
4196    
4197     @@ -4754,14 +4757,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4198     /* xHCI private pointer was set in xhci_pci_probe for the second
4199     * registered roothub.
4200     */
4201     - xhci = hcd_to_xhci(hcd);
4202     - /*
4203     - * Support arbitrarily aligned sg-list entries on hosts without
4204     - * TD fragment rules (which are currently unsupported).
4205     - */
4206     - if (xhci->hci_version < 0x100)
4207     - hcd->self.no_sg_constraint = 1;
4208     -
4209     return 0;
4210     }
4211    
4212     @@ -4788,9 +4783,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4213     if (xhci->hci_version > 0x96)
4214     xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4215    
4216     - if (xhci->hci_version < 0x100)
4217     - hcd->self.no_sg_constraint = 1;
4218     -
4219     /* Make sure the HC is halted. */
4220     retval = xhci_halt(xhci);
4221     if (retval)
4222     diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
4223     index 7f9ff75d0db2..fcb950031246 100644
4224     --- a/drivers/video/efifb.c
4225     +++ b/drivers/video/efifb.c
4226     @@ -108,8 +108,8 @@ static int efifb_setup(char *options)
4227     if (!*this_opt) continue;
4228    
4229     for (i = 0; i < M_UNKNOWN; i++) {
4230     - if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
4231     - efifb_dmi_list[i].base != 0) {
4232     + if (efifb_dmi_list[i].base != 0 &&
4233     + !strcmp(this_opt, efifb_dmi_list[i].optname)) {
4234     screen_info.lfb_base = efifb_dmi_list[i].base;
4235     screen_info.lfb_linelength = efifb_dmi_list[i].stride;
4236     screen_info.lfb_width = efifb_dmi_list[i].width;
4237     diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
4238     index 76273c1d26a6..b5ee393e2e8d 100644
4239     --- a/fs/bio-integrity.c
4240     +++ b/fs/bio-integrity.c
4241     @@ -316,7 +316,7 @@ static void bio_integrity_generate(struct bio *bio)
4242     bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
4243     bix.sector_size = bi->sector_size;
4244    
4245     - bio_for_each_segment(bv, bio, i) {
4246     + bio_for_each_segment_all(bv, bio, i) {
4247     void *kaddr = kmap_atomic(bv->bv_page);
4248     bix.data_buf = kaddr + bv->bv_offset;
4249     bix.data_size = bv->bv_len;
4250     diff --git a/fs/eventpoll.c b/fs/eventpoll.c
4251     index 810c28fb8c3c..d76c9744c774 100644
4252     --- a/fs/eventpoll.c
4253     +++ b/fs/eventpoll.c
4254     @@ -41,6 +41,7 @@
4255     #include <linux/proc_fs.h>
4256     #include <linux/seq_file.h>
4257     #include <linux/compat.h>
4258     +#include <linux/rculist.h>
4259    
4260     /*
4261     * LOCKING:
4262     @@ -133,8 +134,12 @@ struct nested_calls {
4263     * of these on a server and we do not want this to take another cache line.
4264     */
4265     struct epitem {
4266     - /* RB tree node used to link this structure to the eventpoll RB tree */
4267     - struct rb_node rbn;
4268     + union {
4269     + /* RB tree node links this structure to the eventpoll RB tree */
4270     + struct rb_node rbn;
4271     + /* Used to free the struct epitem */
4272     + struct rcu_head rcu;
4273     + };
4274    
4275     /* List header used to link this structure to the eventpoll ready list */
4276     struct list_head rdllink;
4277     @@ -580,14 +585,14 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
4278     * @sproc: Pointer to the scan callback.
4279     * @priv: Private opaque data passed to the @sproc callback.
4280     * @depth: The current depth of recursive f_op->poll calls.
4281     + * @ep_locked: caller already holds ep->mtx
4282     *
4283     * Returns: The same integer error code returned by the @sproc callback.
4284     */
4285     static int ep_scan_ready_list(struct eventpoll *ep,
4286     int (*sproc)(struct eventpoll *,
4287     struct list_head *, void *),
4288     - void *priv,
4289     - int depth)
4290     + void *priv, int depth, bool ep_locked)
4291     {
4292     int error, pwake = 0;
4293     unsigned long flags;
4294     @@ -598,7 +603,9 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4295     * We need to lock this because we could be hit by
4296     * eventpoll_release_file() and epoll_ctl().
4297     */
4298     - mutex_lock_nested(&ep->mtx, depth);
4299     +
4300     + if (!ep_locked)
4301     + mutex_lock_nested(&ep->mtx, depth);
4302    
4303     /*
4304     * Steal the ready list, and re-init the original one to the
4305     @@ -662,7 +669,8 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4306     }
4307     spin_unlock_irqrestore(&ep->lock, flags);
4308    
4309     - mutex_unlock(&ep->mtx);
4310     + if (!ep_locked)
4311     + mutex_unlock(&ep->mtx);
4312    
4313     /* We have to call this outside the lock */
4314     if (pwake)
4315     @@ -671,6 +679,12 @@ static int ep_scan_ready_list(struct eventpoll *ep,
4316     return error;
4317     }
4318    
4319     +static void epi_rcu_free(struct rcu_head *head)
4320     +{
4321     + struct epitem *epi = container_of(head, struct epitem, rcu);
4322     + kmem_cache_free(epi_cache, epi);
4323     +}
4324     +
4325     /*
4326     * Removes a "struct epitem" from the eventpoll RB tree and deallocates
4327     * all the associated resources. Must be called with "mtx" held.
4328     @@ -692,8 +706,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
4329    
4330     /* Remove the current item from the list of epoll hooks */
4331     spin_lock(&file->f_lock);
4332     - if (ep_is_linked(&epi->fllink))
4333     - list_del_init(&epi->fllink);
4334     + list_del_rcu(&epi->fllink);
4335     spin_unlock(&file->f_lock);
4336    
4337     rb_erase(&epi->rbn, &ep->rbr);
4338     @@ -704,9 +717,14 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
4339     spin_unlock_irqrestore(&ep->lock, flags);
4340    
4341     wakeup_source_unregister(ep_wakeup_source(epi));
4342     -
4343     - /* At this point it is safe to free the eventpoll item */
4344     - kmem_cache_free(epi_cache, epi);
4345     + /*
4346     + * At this point it is safe to free the eventpoll item. Use the union
4347     + * field epi->rcu, since we are trying to minimize the size of
4348     + * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
4349     + * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
4350     + * use of the rbn field.
4351     + */
4352     + call_rcu(&epi->rcu, epi_rcu_free);
4353    
4354     atomic_long_dec(&ep->user->epoll_watches);
4355    
4356     @@ -807,15 +825,34 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
4357     return 0;
4358     }
4359    
4360     +static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
4361     + poll_table *pt);
4362     +
4363     +struct readyevents_arg {
4364     + struct eventpoll *ep;
4365     + bool locked;
4366     +};
4367     +
4368     static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
4369     {
4370     - return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
4371     + struct readyevents_arg *arg = priv;
4372     +
4373     + return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
4374     + call_nests + 1, arg->locked);
4375     }
4376    
4377     static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
4378     {
4379     int pollflags;
4380     struct eventpoll *ep = file->private_data;
4381     + struct readyevents_arg arg;
4382     +
4383     + /*
4384     + * During ep_insert() we already hold the ep->mtx for the tfile.
4385     + * Prevent re-aquisition.
4386     + */
4387     + arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
4388     + arg.ep = ep;
4389    
4390     /* Insert inside our poll wait queue */
4391     poll_wait(file, &ep->poll_wait, wait);
4392     @@ -827,7 +864,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
4393     * could re-enter here.
4394     */
4395     pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
4396     - ep_poll_readyevents_proc, ep, ep, current);
4397     + ep_poll_readyevents_proc, &arg, ep, current);
4398    
4399     return pollflags != -1 ? pollflags : 0;
4400     }
4401     @@ -872,7 +909,6 @@ static const struct file_operations eventpoll_fops = {
4402     */
4403     void eventpoll_release_file(struct file *file)
4404     {
4405     - struct list_head *lsthead = &file->f_ep_links;
4406     struct eventpoll *ep;
4407     struct epitem *epi;
4408    
4409     @@ -890,17 +926,12 @@ void eventpoll_release_file(struct file *file)
4410     * Besides, ep_remove() acquires the lock, so we can't hold it here.
4411     */
4412     mutex_lock(&epmutex);
4413     -
4414     - while (!list_empty(lsthead)) {
4415     - epi = list_first_entry(lsthead, struct epitem, fllink);
4416     -
4417     + list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
4418     ep = epi->ep;
4419     - list_del_init(&epi->fllink);
4420     mutex_lock_nested(&ep->mtx, 0);
4421     ep_remove(ep, epi);
4422     mutex_unlock(&ep->mtx);
4423     }
4424     -
4425     mutex_unlock(&epmutex);
4426     }
4427    
4428     @@ -1138,7 +1169,9 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
4429     struct file *child_file;
4430     struct epitem *epi;
4431    
4432     - list_for_each_entry(epi, &file->f_ep_links, fllink) {
4433     + /* CTL_DEL can remove links here, but that can't increase our count */
4434     + rcu_read_lock();
4435     + list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
4436     child_file = epi->ep->file;
4437     if (is_file_epoll(child_file)) {
4438     if (list_empty(&child_file->f_ep_links)) {
4439     @@ -1160,6 +1193,7 @@ static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
4440     "file is not an ep!\n");
4441     }
4442     }
4443     + rcu_read_unlock();
4444     return error;
4445     }
4446    
4447     @@ -1231,7 +1265,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
4448     * Must be called with "mtx" held.
4449     */
4450     static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4451     - struct file *tfile, int fd)
4452     + struct file *tfile, int fd, int full_check)
4453     {
4454     int error, revents, pwake = 0;
4455     unsigned long flags;
4456     @@ -1286,7 +1320,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4457    
4458     /* Add the current item to the list of active epoll hook for this file */
4459     spin_lock(&tfile->f_lock);
4460     - list_add_tail(&epi->fllink, &tfile->f_ep_links);
4461     + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
4462     spin_unlock(&tfile->f_lock);
4463    
4464     /*
4465     @@ -1297,7 +1331,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4466    
4467     /* now check if we've created too many backpaths */
4468     error = -EINVAL;
4469     - if (reverse_path_check())
4470     + if (full_check && reverse_path_check())
4471     goto error_remove_epi;
4472    
4473     /* We have to drop the new item inside our item list to keep track of it */
4474     @@ -1327,8 +1361,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
4475    
4476     error_remove_epi:
4477     spin_lock(&tfile->f_lock);
4478     - if (ep_is_linked(&epi->fllink))
4479     - list_del_init(&epi->fllink);
4480     + list_del_rcu(&epi->fllink);
4481     spin_unlock(&tfile->f_lock);
4482    
4483     rb_erase(&epi->rbn, &ep->rbr);
4484     @@ -1521,7 +1554,7 @@ static int ep_send_events(struct eventpoll *ep,
4485     esed.maxevents = maxevents;
4486     esed.events = events;
4487    
4488     - return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
4489     + return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
4490     }
4491    
4492     static inline struct timespec ep_set_mstimeout(long ms)
4493     @@ -1791,11 +1824,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4494     struct epoll_event __user *, event)
4495     {
4496     int error;
4497     - int did_lock_epmutex = 0;
4498     + int full_check = 0;
4499     struct fd f, tf;
4500     struct eventpoll *ep;
4501     struct epitem *epi;
4502     struct epoll_event epds;
4503     + struct eventpoll *tep = NULL;
4504    
4505     error = -EFAULT;
4506     if (ep_op_has_event(op) &&
4507     @@ -1844,27 +1878,37 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4508     * and hang them on the tfile_check_list, so we can check that we
4509     * haven't created too many possible wakeup paths.
4510     *
4511     - * We need to hold the epmutex across both ep_insert and ep_remove
4512     - * b/c we want to make sure we are looking at a coherent view of
4513     - * epoll network.
4514     + * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
4515     + * the epoll file descriptor is attaching directly to a wakeup source,
4516     + * unless the epoll file descriptor is nested. The purpose of taking the
4517     + * 'epmutex' on add is to prevent complex toplogies such as loops and
4518     + * deep wakeup paths from forming in parallel through multiple
4519     + * EPOLL_CTL_ADD operations.
4520     */
4521     - if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
4522     - mutex_lock(&epmutex);
4523     - did_lock_epmutex = 1;
4524     - }
4525     + mutex_lock_nested(&ep->mtx, 0);
4526     if (op == EPOLL_CTL_ADD) {
4527     - if (is_file_epoll(tf.file)) {
4528     - error = -ELOOP;
4529     - if (ep_loop_check(ep, tf.file) != 0) {
4530     - clear_tfile_check_list();
4531     - goto error_tgt_fput;
4532     + if (!list_empty(&f.file->f_ep_links) ||
4533     + is_file_epoll(tf.file)) {
4534     + full_check = 1;
4535     + mutex_unlock(&ep->mtx);
4536     + mutex_lock(&epmutex);
4537     + if (is_file_epoll(tf.file)) {
4538     + error = -ELOOP;
4539     + if (ep_loop_check(ep, tf.file) != 0) {
4540     + clear_tfile_check_list();
4541     + goto error_tgt_fput;
4542     + }
4543     + } else
4544     + list_add(&tf.file->f_tfile_llink,
4545     + &tfile_check_list);
4546     + mutex_lock_nested(&ep->mtx, 0);
4547     + if (is_file_epoll(tf.file)) {
4548     + tep = tf.file->private_data;
4549     + mutex_lock_nested(&tep->mtx, 1);
4550     }
4551     - } else
4552     - list_add(&tf.file->f_tfile_llink, &tfile_check_list);
4553     + }
4554     }
4555    
4556     - mutex_lock_nested(&ep->mtx, 0);
4557     -
4558     /*
4559     * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
4560     * above, we can be sure to be able to use the item looked up by
4561     @@ -1877,10 +1921,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4562     case EPOLL_CTL_ADD:
4563     if (!epi) {
4564     epds.events |= POLLERR | POLLHUP;
4565     - error = ep_insert(ep, &epds, tf.file, fd);
4566     + error = ep_insert(ep, &epds, tf.file, fd, full_check);
4567     } else
4568     error = -EEXIST;
4569     - clear_tfile_check_list();
4570     + if (full_check)
4571     + clear_tfile_check_list();
4572     break;
4573     case EPOLL_CTL_DEL:
4574     if (epi)
4575     @@ -1896,10 +1941,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
4576     error = -ENOENT;
4577     break;
4578     }
4579     + if (tep != NULL)
4580     + mutex_unlock(&tep->mtx);
4581     mutex_unlock(&ep->mtx);
4582    
4583     error_tgt_fput:
4584     - if (did_lock_epmutex)
4585     + if (full_check)
4586     mutex_unlock(&epmutex);
4587    
4588     fdput(tf);
4589     diff --git a/fs/namei.c b/fs/namei.c
4590     index 23ac50f4ee40..187cacf1c83c 100644
4591     --- a/fs/namei.c
4592     +++ b/fs/namei.c
4593     @@ -3924,6 +3924,7 @@ retry:
4594     out_dput:
4595     done_path_create(&new_path, new_dentry);
4596     if (retry_estale(error, how)) {
4597     + path_put(&old_path);
4598     how |= LOOKUP_REVAL;
4599     goto retry;
4600     }
4601     diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
4602     index ef792f29f831..5d8ccecf5f5c 100644
4603     --- a/fs/nfs/delegation.c
4604     +++ b/fs/nfs/delegation.c
4605     @@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
4606    
4607     rcu_read_lock();
4608     delegation = rcu_dereference(NFS_I(inode)->delegation);
4609     + if (delegation == NULL)
4610     + goto out_enoent;
4611    
4612     - if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
4613     - rcu_read_unlock();
4614     - return -ENOENT;
4615     - }
4616     + if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
4617     + goto out_enoent;
4618     nfs_mark_return_delegation(server, delegation);
4619     rcu_read_unlock();
4620    
4621     nfs_delegation_run_state_manager(clp);
4622     return 0;
4623     +out_enoent:
4624     + rcu_read_unlock();
4625     + return -ENOENT;
4626     }
4627    
4628     static struct inode *
4629     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4630     index d3d7766f55e3..a53651743d4d 100644
4631     --- a/fs/nfs/nfs4proc.c
4632     +++ b/fs/nfs/nfs4proc.c
4633     @@ -3972,8 +3972,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4634     {
4635     nfs4_stateid current_stateid;
4636    
4637     - if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
4638     - return false;
4639     + /* If the current stateid represents a lost lock, then exit */
4640     + if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4641     + return true;
4642     return nfs4_stateid_match(stateid, &current_stateid);
4643     }
4644    
4645     diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
4646     index d71903c6068b..f07941160515 100644
4647     --- a/fs/ocfs2/file.c
4648     +++ b/fs/ocfs2/file.c
4649     @@ -2371,8 +2371,8 @@ out_dio:
4650    
4651     if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
4652     ((file->f_flags & O_DIRECT) && !direct_io)) {
4653     - ret = filemap_fdatawrite_range(file->f_mapping, pos,
4654     - pos + count - 1);
4655     + ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
4656     + *ppos + count - 1);
4657     if (ret < 0)
4658     written = ret;
4659    
4660     @@ -2385,8 +2385,8 @@ out_dio:
4661     }
4662    
4663     if (!ret)
4664     - ret = filemap_fdatawait_range(file->f_mapping, pos,
4665     - pos + count - 1);
4666     + ret = filemap_fdatawait_range(file->f_mapping, *ppos,
4667     + *ppos + count - 1);
4668     }
4669    
4670     /*
4671     diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
4672     index aaa50611ec66..d7b5108789e2 100644
4673     --- a/fs/ocfs2/quota_global.c
4674     +++ b/fs/ocfs2/quota_global.c
4675     @@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
4676     */
4677     if (status < 0)
4678     mlog_errno(status);
4679     + /*
4680     + * Clear dq_off so that we search for the structure in quota file next
4681     + * time we acquire it. The structure might be deleted and reallocated
4682     + * elsewhere by another node while our dquot structure is on freelist.
4683     + */
4684     + dquot->dq_off = 0;
4685     clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
4686     out_trans:
4687     ocfs2_commit_trans(osb, handle);
4688     @@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
4689     status = ocfs2_lock_global_qf(info, 1);
4690     if (status < 0)
4691     goto out;
4692     - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
4693     - status = ocfs2_qinfo_lock(info, 0);
4694     - if (status < 0)
4695     - goto out_dq;
4696     - status = qtree_read_dquot(&info->dqi_gi, dquot);
4697     - ocfs2_qinfo_unlock(info, 0);
4698     - if (status < 0)
4699     - goto out_dq;
4700     - }
4701     - set_bit(DQ_READ_B, &dquot->dq_flags);
4702     + status = ocfs2_qinfo_lock(info, 0);
4703     + if (status < 0)
4704     + goto out_dq;
4705     + /*
4706     + * We always want to read dquot structure from disk because we don't
4707     + * know what happened with it while it was on freelist.
4708     + */
4709     + status = qtree_read_dquot(&info->dqi_gi, dquot);
4710     + ocfs2_qinfo_unlock(info, 0);
4711     + if (status < 0)
4712     + goto out_dq;
4713    
4714     OCFS2_DQUOT(dquot)->dq_use_count++;
4715     OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
4716     diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
4717     index 2e4344be3b96..2001862bf2b1 100644
4718     --- a/fs/ocfs2/quota_local.c
4719     +++ b/fs/ocfs2/quota_local.c
4720     @@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
4721     ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
4722    
4723     out:
4724     - /* Clear the read bit so that next time someone uses this
4725     - * dquot he reads fresh info from disk and allocates local
4726     - * dquot structure */
4727     - clear_bit(DQ_READ_B, &dquot->dq_flags);
4728     return status;
4729     }
4730    
4731     diff --git a/fs/proc/base.c b/fs/proc/base.c
4732     index 1485e38daaa3..c35eaa404933 100644
4733     --- a/fs/proc/base.c
4734     +++ b/fs/proc/base.c
4735     @@ -1813,6 +1813,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
4736     if (rc)
4737     goto out_mmput;
4738    
4739     + rc = -ENOENT;
4740     down_read(&mm->mmap_sem);
4741     vma = find_exact_vma(mm, vm_start, vm_end);
4742     if (vma && vma->vm_file) {
4743     diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
4744     index 7c2e030e72f1..a12f6ed91c84 100644
4745     --- a/include/linux/blktrace_api.h
4746     +++ b/include/linux/blktrace_api.h
4747     @@ -5,6 +5,7 @@
4748     #include <linux/relay.h>
4749     #include <linux/compat.h>
4750     #include <uapi/linux/blktrace_api.h>
4751     +#include <linux/list.h>
4752    
4753     #if defined(CONFIG_BLK_DEV_IO_TRACE)
4754    
4755     @@ -23,6 +24,7 @@ struct blk_trace {
4756     struct dentry *dir;
4757     struct dentry *dropped_file;
4758     struct dentry *msg_file;
4759     + struct list_head running_list;
4760     atomic_t dropped;
4761     };
4762    
4763     diff --git a/include/linux/firewire.h b/include/linux/firewire.h
4764     index 5d7782e42b8f..c3683bdf28fe 100644
4765     --- a/include/linux/firewire.h
4766     +++ b/include/linux/firewire.h
4767     @@ -200,6 +200,7 @@ struct fw_device {
4768     unsigned irmc:1;
4769     unsigned bc_implemented:2;
4770    
4771     + work_func_t workfn;
4772     struct delayed_work work;
4773     struct fw_attribute_group attribute_group;
4774     };
4775     diff --git a/include/linux/mm.h b/include/linux/mm.h
4776     index fed08c0c543b..648bcb007eba 100644
4777     --- a/include/linux/mm.h
4778     +++ b/include/linux/mm.h
4779     @@ -161,7 +161,7 @@ extern unsigned int kobjsize(const void *objp);
4780     * Special vmas that are non-mergable, non-mlock()able.
4781     * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
4782     */
4783     -#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
4784     +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
4785    
4786     /*
4787     * mapping from the currently active vm_flags protection bits (the
4788     diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
4789     index ebeab360d851..0ecc46e7af3d 100644
4790     --- a/include/linux/tracepoint.h
4791     +++ b/include/linux/tracepoint.h
4792     @@ -60,6 +60,12 @@ struct tp_module {
4793     unsigned int num_tracepoints;
4794     struct tracepoint * const *tracepoints_ptrs;
4795     };
4796     +bool trace_module_has_bad_taint(struct module *mod);
4797     +#else
4798     +static inline bool trace_module_has_bad_taint(struct module *mod)
4799     +{
4800     + return false;
4801     +}
4802     #endif /* CONFIG_MODULES */
4803    
4804     struct tracepoint_iter {
4805     diff --git a/include/net/tcp.h b/include/net/tcp.h
4806     index b1aa324c5e65..51dcc6faa561 100644
4807     --- a/include/net/tcp.h
4808     +++ b/include/net/tcp.h
4809     @@ -482,6 +482,24 @@ extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4810     extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
4811     struct ip_options *opt);
4812     #ifdef CONFIG_SYN_COOKIES
4813     +#include <linux/ktime.h>
4814     +
4815     +/* Syncookies use a monotonic timer which increments every 64 seconds.
4816     + * This counter is used both as a hash input and partially encoded into
4817     + * the cookie value. A cookie is only validated further if the delta
4818     + * between the current counter value and the encoded one is less than this,
4819     + * i.e. a sent cookie is valid only at most for 128 seconds (or less if
4820     + * the counter advances immediately after a cookie is generated).
4821     + */
4822     +#define MAX_SYNCOOKIE_AGE 2
4823     +
4824     +static inline u32 tcp_cookie_time(void)
4825     +{
4826     + struct timespec now;
4827     + getnstimeofday(&now);
4828     + return now.tv_sec >> 6; /* 64 seconds granularity */
4829     +}
4830     +
4831     extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
4832     const struct tcphdr *th, u16 *mssp);
4833     extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
4834     @@ -1303,7 +1321,8 @@ struct tcp_fastopen_request {
4835     /* Fast Open cookie. Size 0 means a cookie request */
4836     struct tcp_fastopen_cookie cookie;
4837     struct msghdr *data; /* data in MSG_FASTOPEN */
4838     - u16 copied; /* queued in tcp_connect() */
4839     + size_t size;
4840     + int copied; /* queued in tcp_connect() */
4841     };
4842     void tcp_free_fastopen_req(struct tcp_sock *tp);
4843    
4844     diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
4845     index a12589c4ee92..361bd0f04018 100644
4846     --- a/include/target/iscsi/iscsi_transport.h
4847     +++ b/include/target/iscsi/iscsi_transport.h
4848     @@ -12,6 +12,7 @@ struct iscsit_transport {
4849     int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
4850     int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
4851     void (*iscsit_free_np)(struct iscsi_np *);
4852     + void (*iscsit_wait_conn)(struct iscsi_conn *);
4853     void (*iscsit_free_conn)(struct iscsi_conn *);
4854     int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
4855     int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
4856     diff --git a/ipc/msg.c b/ipc/msg.c
4857     index 558aa91186b6..52770bfde2a5 100644
4858     --- a/ipc/msg.c
4859     +++ b/ipc/msg.c
4860     @@ -885,6 +885,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
4861     return -EINVAL;
4862    
4863     if (msgflg & MSG_COPY) {
4864     + if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
4865     + return -EINVAL;
4866     copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
4867     if (IS_ERR(copy))
4868     return PTR_ERR(copy);
4869     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4870     index 4772034b4b17..5ae9f950e024 100644
4871     --- a/kernel/cpuset.c
4872     +++ b/kernel/cpuset.c
4873     @@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
4874     * Temporarilly set tasks mems_allowed to target nodes of migration,
4875     * so that the migration code can allocate pages on these nodes.
4876     *
4877     - * Call holding cpuset_mutex, so current's cpuset won't change
4878     - * during this call, as manage_mutex holds off any cpuset_attach()
4879     - * calls. Therefore we don't need to take task_lock around the
4880     - * call to guarantee_online_mems(), as we know no one is changing
4881     - * our task's cpuset.
4882     - *
4883     * While the mm_struct we are migrating is typically from some
4884     * other task, the task_struct mems_allowed that we are hacking
4885     * is for our current task, which must allocate new pages for that
4886     @@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
4887    
4888     do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
4889    
4890     + rcu_read_lock();
4891     mems_cs = effective_nodemask_cpuset(task_cs(tsk));
4892     guarantee_online_mems(mems_cs, &tsk->mems_allowed);
4893     + rcu_read_unlock();
4894     }
4895    
4896     /*
4897     @@ -2511,9 +2507,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
4898    
4899     task_lock(current);
4900     cs = nearest_hardwall_ancestor(task_cs(current));
4901     + allowed = node_isset(node, cs->mems_allowed);
4902     task_unlock(current);
4903    
4904     - allowed = node_isset(node, cs->mems_allowed);
4905     mutex_unlock(&callback_mutex);
4906     return allowed;
4907     }
4908     diff --git a/kernel/futex.c b/kernel/futex.c
4909     index 221a58fc62f7..231754863a87 100644
4910     --- a/kernel/futex.c
4911     +++ b/kernel/futex.c
4912     @@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
4913     return -EINVAL;
4914     address -= key->both.offset;
4915    
4916     + if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
4917     + return -EFAULT;
4918     +
4919     /*
4920     * PROCESS_PRIVATE futexes are fast.
4921     * As the mm cannot disappear under us and the 'key' only needs
4922     @@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
4923     * but access_ok() should be faster than find_vma()
4924     */
4925     if (!fshared) {
4926     - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
4927     - return -EFAULT;
4928     key->private.mm = mm;
4929     key->private.address = address;
4930     get_futex_key_refs(key);
4931     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4932     index 3e59f951d42f..4c84746a840b 100644
4933     --- a/kernel/irq/manage.c
4934     +++ b/kernel/irq/manage.c
4935     @@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
4936    
4937     static void wake_threads_waitq(struct irq_desc *desc)
4938     {
4939     - if (atomic_dec_and_test(&desc->threads_active) &&
4940     - waitqueue_active(&desc->wait_for_threads))
4941     + if (atomic_dec_and_test(&desc->threads_active))
4942     wake_up(&desc->wait_for_threads);
4943     }
4944    
4945     diff --git a/kernel/rcutree.h b/kernel/rcutree.h
4946     index 5f97eab602cd..52be957c9fe2 100644
4947     --- a/kernel/rcutree.h
4948     +++ b/kernel/rcutree.h
4949     @@ -104,6 +104,8 @@ struct rcu_dynticks {
4950     /* idle-period nonlazy_posted snapshot. */
4951     unsigned long last_accelerate;
4952     /* Last jiffy CBs were accelerated. */
4953     + unsigned long last_advance_all;
4954     + /* Last jiffy CBs were all advanced. */
4955     int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
4956     #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
4957     };
4958     diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
4959     index 130c97b027f2..511e6b47c594 100644
4960     --- a/kernel/rcutree_plugin.h
4961     +++ b/kernel/rcutree_plugin.h
4962     @@ -1627,20 +1627,26 @@ module_param(rcu_idle_gp_delay, int, 0644);
4963     static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
4964     module_param(rcu_idle_lazy_gp_delay, int, 0644);
4965    
4966     -extern int tick_nohz_enabled;
4967     +extern int tick_nohz_active;
4968    
4969     /*
4970     - * Try to advance callbacks for all flavors of RCU on the current CPU.
4971     - * Afterwards, if there are any callbacks ready for immediate invocation,
4972     - * return true.
4973     + * Try to advance callbacks for all flavors of RCU on the current CPU, but
4974     + * only if it has been awhile since the last time we did so. Afterwards,
4975     + * if there are any callbacks ready for immediate invocation, return true.
4976     */
4977     static bool rcu_try_advance_all_cbs(void)
4978     {
4979     bool cbs_ready = false;
4980     struct rcu_data *rdp;
4981     + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
4982     struct rcu_node *rnp;
4983     struct rcu_state *rsp;
4984    
4985     + /* Exit early if we advanced recently. */
4986     + if (jiffies == rdtp->last_advance_all)
4987     + return 0;
4988     + rdtp->last_advance_all = jiffies;
4989     +
4990     for_each_rcu_flavor(rsp) {
4991     rdp = this_cpu_ptr(rsp->rda);
4992     rnp = rdp->mynode;
4993     @@ -1718,7 +1724,7 @@ static void rcu_prepare_for_idle(int cpu)
4994     int tne;
4995    
4996     /* Handle nohz enablement switches conservatively. */
4997     - tne = ACCESS_ONCE(tick_nohz_enabled);
4998     + tne = ACCESS_ONCE(tick_nohz_active);
4999     if (tne != rdtp->tick_nohz_enabled_snap) {
5000     if (rcu_cpu_has_callbacks(cpu, NULL))
5001     invoke_rcu_core(); /* force nohz to see update. */
5002     @@ -1739,6 +1745,8 @@ static void rcu_prepare_for_idle(int cpu)
5003     */
5004     if (rdtp->all_lazy &&
5005     rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
5006     + rdtp->all_lazy = false;
5007     + rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
5008     invoke_rcu_core();
5009     return;
5010     }
5011     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5012     index ceae65e69a64..a494ace683e3 100644
5013     --- a/kernel/sched/core.c
5014     +++ b/kernel/sched/core.c
5015     @@ -5119,10 +5119,13 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5016     DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5017     DEFINE_PER_CPU(int, sd_llc_size);
5018     DEFINE_PER_CPU(int, sd_llc_id);
5019     +DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5020     +DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5021    
5022     static void update_top_cache_domain(int cpu)
5023     {
5024     struct sched_domain *sd;
5025     + struct sched_domain *busy_sd = NULL;
5026     int id = cpu;
5027     int size = 1;
5028    
5029     @@ -5130,11 +5133,16 @@ static void update_top_cache_domain(int cpu)
5030     if (sd) {
5031     id = cpumask_first(sched_domain_span(sd));
5032     size = cpumask_weight(sched_domain_span(sd));
5033     + busy_sd = sd->parent; /* sd_busy */
5034     }
5035     + rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5036    
5037     rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5038     per_cpu(sd_llc_size, cpu) = size;
5039     per_cpu(sd_llc_id, cpu) = id;
5040     +
5041     + sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5042     + rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5043     }
5044    
5045     /*
5046     @@ -5325,6 +5333,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5047     * die on a /0 trap.
5048     */
5049     sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
5050     + sg->sgp->power_orig = sg->sgp->power;
5051    
5052     /*
5053     * Make sure the first group of this domain contains the
5054     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5055     index 411732334906..790e2fc808da 100644
5056     --- a/kernel/sched/fair.c
5057     +++ b/kernel/sched/fair.c
5058     @@ -5598,16 +5598,16 @@ static inline void nohz_balance_exit_idle(int cpu)
5059     static inline void set_cpu_sd_state_busy(void)
5060     {
5061     struct sched_domain *sd;
5062     + int cpu = smp_processor_id();
5063    
5064     rcu_read_lock();
5065     - sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5066     + sd = rcu_dereference(per_cpu(sd_busy, cpu));
5067    
5068     if (!sd || !sd->nohz_idle)
5069     goto unlock;
5070     sd->nohz_idle = 0;
5071    
5072     - for (; sd; sd = sd->parent)
5073     - atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5074     + atomic_inc(&sd->groups->sgp->nr_busy_cpus);
5075     unlock:
5076     rcu_read_unlock();
5077     }
5078     @@ -5615,16 +5615,16 @@ unlock:
5079     void set_cpu_sd_state_idle(void)
5080     {
5081     struct sched_domain *sd;
5082     + int cpu = smp_processor_id();
5083    
5084     rcu_read_lock();
5085     - sd = rcu_dereference_check_sched_domain(this_rq()->sd);
5086     + sd = rcu_dereference(per_cpu(sd_busy, cpu));
5087    
5088     if (!sd || sd->nohz_idle)
5089     goto unlock;
5090     sd->nohz_idle = 1;
5091    
5092     - for (; sd; sd = sd->parent)
5093     - atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5094     + atomic_dec(&sd->groups->sgp->nr_busy_cpus);
5095     unlock:
5096     rcu_read_unlock();
5097     }
5098     @@ -5807,6 +5807,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5099     {
5100     unsigned long now = jiffies;
5101     struct sched_domain *sd;
5102     + struct sched_group_power *sgp;
5103     + int nr_busy;
5104    
5105     if (unlikely(idle_cpu(cpu)))
5106     return 0;
5107     @@ -5832,22 +5834,22 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5108     goto need_kick;
5109    
5110     rcu_read_lock();
5111     - for_each_domain(cpu, sd) {
5112     - struct sched_group *sg = sd->groups;
5113     - struct sched_group_power *sgp = sg->sgp;
5114     - int nr_busy = atomic_read(&sgp->nr_busy_cpus);
5115     + sd = rcu_dereference(per_cpu(sd_busy, cpu));
5116    
5117     - if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
5118     - goto need_kick_unlock;
5119     + if (sd) {
5120     + sgp = sd->groups->sgp;
5121     + nr_busy = atomic_read(&sgp->nr_busy_cpus);
5122    
5123     - if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5124     - && (cpumask_first_and(nohz.idle_cpus_mask,
5125     - sched_domain_span(sd)) < cpu))
5126     + if (nr_busy > 1)
5127     goto need_kick_unlock;
5128     -
5129     - if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5130     - break;
5131     }
5132     +
5133     + sd = rcu_dereference(per_cpu(sd_asym, cpu));
5134     +
5135     + if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
5136     + sched_domain_span(sd)) < cpu))
5137     + goto need_kick_unlock;
5138     +
5139     rcu_read_unlock();
5140     return 0;
5141    
5142     @@ -6013,15 +6015,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
5143     struct cfs_rq *cfs_rq = cfs_rq_of(se);
5144    
5145     /*
5146     - * Ensure the task's vruntime is normalized, so that when its
5147     + * Ensure the task's vruntime is normalized, so that when it's
5148     * switched back to the fair class the enqueue_entity(.flags=0) will
5149     * do the right thing.
5150     *
5151     - * If it was on_rq, then the dequeue_entity(.flags=0) will already
5152     - * have normalized the vruntime, if it was !on_rq, then only when
5153     + * If it's on_rq, then the dequeue_entity(.flags=0) will already
5154     + * have normalized the vruntime, if it's !on_rq, then only when
5155     * the task is sleeping will it still have non-normalized vruntime.
5156     */
5157     - if (!se->on_rq && p->state != TASK_RUNNING) {
5158     + if (!p->on_rq && p->state != TASK_RUNNING) {
5159     /*
5160     * Fix up our vruntime so that the current sleep doesn't
5161     * cause 'unlimited' sleep bonus.
5162     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
5163     index 417b1b3fd7e9..ff04e1a06412 100644
5164     --- a/kernel/sched/rt.c
5165     +++ b/kernel/sched/rt.c
5166     @@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
5167     * if we should look at the mask. It would be a shame
5168     * if we looked at the mask, but the mask was not
5169     * updated yet.
5170     + *
5171     + * Matched by the barrier in pull_rt_task().
5172     */
5173     - wmb();
5174     + smp_wmb();
5175     atomic_inc(&rq->rd->rto_count);
5176     }
5177    
5178     @@ -1227,8 +1229,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
5179     */
5180     if (curr && unlikely(rt_task(curr)) &&
5181     (curr->nr_cpus_allowed < 2 ||
5182     - curr->prio <= p->prio) &&
5183     - (p->nr_cpus_allowed > 1)) {
5184     + curr->prio <= p->prio)) {
5185     int target = find_lowest_rq(p);
5186    
5187     if (target != -1)
5188     @@ -1644,6 +1645,12 @@ static int pull_rt_task(struct rq *this_rq)
5189     if (likely(!rt_overloaded(this_rq)))
5190     return 0;
5191    
5192     + /*
5193     + * Match the barrier from rt_set_overloaded; this guarantees that if we
5194     + * see overloaded we must also see the rto_mask bit.
5195     + */
5196     + smp_rmb();
5197     +
5198     for_each_cpu(cpu, this_rq->rd->rto_mask) {
5199     if (this_cpu == cpu)
5200     continue;
5201     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
5202     index a6208afd80e7..4f310592b1ba 100644
5203     --- a/kernel/sched/sched.h
5204     +++ b/kernel/sched/sched.h
5205     @@ -596,6 +596,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
5206     DECLARE_PER_CPU(struct sched_domain *, sd_llc);
5207     DECLARE_PER_CPU(int, sd_llc_size);
5208     DECLARE_PER_CPU(int, sd_llc_id);
5209     +DECLARE_PER_CPU(struct sched_domain *, sd_busy);
5210     +DECLARE_PER_CPU(struct sched_domain *, sd_asym);
5211    
5212     struct sched_group_power {
5213     atomic_t ref;
5214     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
5215     index 3612fc77f834..ea20f7d1ac2c 100644
5216     --- a/kernel/time/tick-sched.c
5217     +++ b/kernel/time/tick-sched.c
5218     @@ -361,8 +361,8 @@ void __init tick_nohz_init(void)
5219     /*
5220     * NO HZ enabled ?
5221     */
5222     -int tick_nohz_enabled __read_mostly = 1;
5223     -
5224     +static int tick_nohz_enabled __read_mostly = 1;
5225     +int tick_nohz_active __read_mostly;
5226     /*
5227     * Enable / Disable tickless mode
5228     */
5229     @@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
5230     struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
5231     ktime_t now, idle;
5232    
5233     - if (!tick_nohz_enabled)
5234     + if (!tick_nohz_active)
5235     return -1;
5236    
5237     now = ktime_get();
5238     @@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
5239     struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
5240     ktime_t now, iowait;
5241    
5242     - if (!tick_nohz_enabled)
5243     + if (!tick_nohz_active)
5244     return -1;
5245    
5246     now = ktime_get();
5247     @@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
5248     return false;
5249     }
5250    
5251     - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
5252     + if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
5253     + ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
5254     return false;
5255     + }
5256    
5257     if (need_resched())
5258     return false;
5259     @@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void)
5260     local_irq_disable();
5261    
5262     ts = &__get_cpu_var(tick_cpu_sched);
5263     - /*
5264     - * set ts->inidle unconditionally. even if the system did not
5265     - * switch to nohz mode the cpu frequency governers rely on the
5266     - * update of the idle time accounting in tick_nohz_start_idle().
5267     - */
5268     ts->inidle = 1;
5269     __tick_nohz_idle_enter(ts);
5270    
5271     @@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void)
5272     struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
5273     ktime_t next;
5274    
5275     - if (!tick_nohz_enabled)
5276     + if (!tick_nohz_active)
5277     return;
5278    
5279     local_irq_disable();
5280     @@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void)
5281     local_irq_enable();
5282     return;
5283     }
5284     -
5285     + tick_nohz_active = 1;
5286     ts->nohz_mode = NOHZ_MODE_LOWRES;
5287    
5288     /*
5289     @@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void)
5290     }
5291    
5292     #ifdef CONFIG_NO_HZ_COMMON
5293     - if (tick_nohz_enabled)
5294     + if (tick_nohz_enabled) {
5295     ts->nohz_mode = NOHZ_MODE_HIGHRES;
5296     + tick_nohz_active = 1;
5297     + }
5298     #endif
5299     }
5300     #endif /* HIGH_RES_TIMERS */
5301     diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5302     index b8b8560bfb95..7f727b34280d 100644
5303     --- a/kernel/trace/blktrace.c
5304     +++ b/kernel/trace/blktrace.c
5305     @@ -26,6 +26,7 @@
5306     #include <linux/export.h>
5307     #include <linux/time.h>
5308     #include <linux/uaccess.h>
5309     +#include <linux/list.h>
5310    
5311     #include <trace/events/block.h>
5312    
5313     @@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1;
5314     static struct trace_array *blk_tr;
5315     static bool blk_tracer_enabled __read_mostly;
5316    
5317     +static LIST_HEAD(running_trace_list);
5318     +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
5319     +
5320     /* Select an alternative, minimalistic output than the original one */
5321     #define TRACE_BLK_OPT_CLASSIC 0x1
5322    
5323     @@ -107,10 +111,18 @@ record_it:
5324     * Send out a notify for this process, if we haven't done so since a trace
5325     * started
5326     */
5327     -static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
5328     +static void trace_note_tsk(struct task_struct *tsk)
5329     {
5330     + unsigned long flags;
5331     + struct blk_trace *bt;
5332     +
5333     tsk->btrace_seq = blktrace_seq;
5334     - trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
5335     + spin_lock_irqsave(&running_trace_lock, flags);
5336     + list_for_each_entry(bt, &running_trace_list, running_list) {
5337     + trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
5338     + sizeof(tsk->comm));
5339     + }
5340     + spin_unlock_irqrestore(&running_trace_lock, flags);
5341     }
5342    
5343     static void trace_note_time(struct blk_trace *bt)
5344     @@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
5345     goto record_it;
5346     }
5347    
5348     + if (unlikely(tsk->btrace_seq != blktrace_seq))
5349     + trace_note_tsk(tsk);
5350     +
5351     /*
5352     * A word about the locking here - we disable interrupts to reserve
5353     * some space in the relay per-cpu buffer, to prevent an irq
5354     * from coming in and stepping on our toes.
5355     */
5356     local_irq_save(flags);
5357     -
5358     - if (unlikely(tsk->btrace_seq != blktrace_seq))
5359     - trace_note_tsk(bt, tsk);
5360     -
5361     t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
5362     if (t) {
5363     sequence = per_cpu_ptr(bt->sequence, cpu);
5364     @@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
5365     bt->dir = dir;
5366     bt->dev = dev;
5367     atomic_set(&bt->dropped, 0);
5368     + INIT_LIST_HEAD(&bt->running_list);
5369    
5370     ret = -EIO;
5371     bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
5372     @@ -601,6 +613,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
5373     blktrace_seq++;
5374     smp_mb();
5375     bt->trace_state = Blktrace_running;
5376     + spin_lock_irq(&running_trace_lock);
5377     + list_add(&bt->running_list, &running_trace_list);
5378     + spin_unlock_irq(&running_trace_lock);
5379    
5380     trace_note_time(bt);
5381     ret = 0;
5382     @@ -608,6 +623,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
5383     } else {
5384     if (bt->trace_state == Blktrace_running) {
5385     bt->trace_state = Blktrace_stopped;
5386     + spin_lock_irq(&running_trace_lock);
5387     + list_del_init(&bt->running_list);
5388     + spin_unlock_irq(&running_trace_lock);
5389     relay_flush(bt->rchan);
5390     ret = 0;
5391     }
5392     @@ -1472,6 +1490,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
5393     if (atomic_dec_and_test(&blk_probes_ref))
5394     blk_unregister_tracepoints();
5395    
5396     + spin_lock_irq(&running_trace_lock);
5397     + list_del(&bt->running_list);
5398     + spin_unlock_irq(&running_trace_lock);
5399     blk_trace_free(bt);
5400     return 0;
5401     }
5402     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
5403     index 368a4d50cc30..b03b1f897b5e 100644
5404     --- a/kernel/trace/trace_events.c
5405     +++ b/kernel/trace/trace_events.c
5406     @@ -1763,6 +1763,16 @@ static void trace_module_add_events(struct module *mod)
5407     {
5408     struct ftrace_event_call **call, **start, **end;
5409    
5410     + if (!mod->num_trace_events)
5411     + return;
5412     +
5413     + /* Don't add infrastructure for mods without tracepoints */
5414     + if (trace_module_has_bad_taint(mod)) {
5415     + pr_err("%s: module has bad taint, not creating trace events\n",
5416     + mod->name);
5417     + return;
5418     + }
5419     +
5420     start = mod->trace_events;
5421     end = mod->trace_events + mod->num_trace_events;
5422    
5423     diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
5424     index 29f26540e9c9..031cc5655a51 100644
5425     --- a/kernel/tracepoint.c
5426     +++ b/kernel/tracepoint.c
5427     @@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
5428     EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
5429    
5430     #ifdef CONFIG_MODULES
5431     +bool trace_module_has_bad_taint(struct module *mod)
5432     +{
5433     + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
5434     +}
5435     +
5436     static int tracepoint_module_coming(struct module *mod)
5437     {
5438     struct tp_module *tp_mod, *iter;
5439     @@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
5440     * module headers (for forced load), to make sure we don't cause a crash.
5441     * Staging and out-of-tree GPL modules are fine.
5442     */
5443     - if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
5444     + if (trace_module_has_bad_taint(mod))
5445     return 0;
5446     mutex_lock(&tracepoints_mutex);
5447     tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
5448     diff --git a/lib/show_mem.c b/lib/show_mem.c
5449     index b7c72311ad0c..5847a4921b8e 100644
5450     --- a/lib/show_mem.c
5451     +++ b/lib/show_mem.c
5452     @@ -12,8 +12,7 @@
5453     void show_mem(unsigned int filter)
5454     {
5455     pg_data_t *pgdat;
5456     - unsigned long total = 0, reserved = 0, shared = 0,
5457     - nonshared = 0, highmem = 0;
5458     + unsigned long total = 0, reserved = 0, highmem = 0;
5459    
5460     printk("Mem-Info:\n");
5461     show_free_areas(filter);
5462     @@ -22,43 +21,27 @@ void show_mem(unsigned int filter)
5463     return;
5464    
5465     for_each_online_pgdat(pgdat) {
5466     - unsigned long i, flags;
5467     + unsigned long flags;
5468     + int zoneid;
5469    
5470     pgdat_resize_lock(pgdat, &flags);
5471     - for (i = 0; i < pgdat->node_spanned_pages; i++) {
5472     - struct page *page;
5473     - unsigned long pfn = pgdat->node_start_pfn + i;
5474     -
5475     - if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
5476     - touch_nmi_watchdog();
5477     -
5478     - if (!pfn_valid(pfn))
5479     + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
5480     + struct zone *zone = &pgdat->node_zones[zoneid];
5481     + if (!populated_zone(zone))
5482     continue;
5483    
5484     - page = pfn_to_page(pfn);
5485     -
5486     - if (PageHighMem(page))
5487     - highmem++;
5488     + total += zone->present_pages;
5489     + reserved = zone->present_pages - zone->managed_pages;
5490    
5491     - if (PageReserved(page))
5492     - reserved++;
5493     - else if (page_count(page) == 1)
5494     - nonshared++;
5495     - else if (page_count(page) > 1)
5496     - shared += page_count(page) - 1;
5497     -
5498     - total++;
5499     + if (is_highmem_idx(zoneid))
5500     + highmem += zone->present_pages;
5501     }
5502     pgdat_resize_unlock(pgdat, &flags);
5503     }
5504    
5505     printk("%lu pages RAM\n", total);
5506     -#ifdef CONFIG_HIGHMEM
5507     - printk("%lu pages HighMem\n", highmem);
5508     -#endif
5509     + printk("%lu pages HighMem/MovableOnly\n", highmem);
5510     printk("%lu pages reserved\n", reserved);
5511     - printk("%lu pages shared\n", shared);
5512     - printk("%lu pages non-shared\n", nonshared);
5513     #ifdef CONFIG_QUICKLIST
5514     printk("%lu pages in pagetable cache\n",
5515     quicklist_total_size());
5516     diff --git a/mm/compaction.c b/mm/compaction.c
5517     index 74ad00908c79..d2c6751879dc 100644
5518     --- a/mm/compaction.c
5519     +++ b/mm/compaction.c
5520     @@ -252,7 +252,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5521     {
5522     int nr_scanned = 0, total_isolated = 0;
5523     struct page *cursor, *valid_page = NULL;
5524     - unsigned long nr_strict_required = end_pfn - blockpfn;
5525     unsigned long flags;
5526     bool locked = false;
5527    
5528     @@ -265,11 +264,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5529    
5530     nr_scanned++;
5531     if (!pfn_valid_within(blockpfn))
5532     - continue;
5533     + goto isolate_fail;
5534     +
5535     if (!valid_page)
5536     valid_page = page;
5537     if (!PageBuddy(page))
5538     - continue;
5539     + goto isolate_fail;
5540    
5541     /*
5542     * The zone lock must be held to isolate freepages.
5543     @@ -290,12 +290,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5544    
5545     /* Recheck this is a buddy page under lock */
5546     if (!PageBuddy(page))
5547     - continue;
5548     + goto isolate_fail;
5549    
5550     /* Found a free page, break it into order-0 pages */
5551     isolated = split_free_page(page);
5552     - if (!isolated && strict)
5553     - break;
5554     total_isolated += isolated;
5555     for (i = 0; i < isolated; i++) {
5556     list_add(&page->lru, freelist);
5557     @@ -306,7 +304,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5558     if (isolated) {
5559     blockpfn += isolated - 1;
5560     cursor += isolated - 1;
5561     + continue;
5562     }
5563     +
5564     +isolate_fail:
5565     + if (strict)
5566     + break;
5567     + else
5568     + continue;
5569     +
5570     }
5571    
5572     trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
5573     @@ -316,7 +322,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
5574     * pages requested were isolated. If there were any failures, 0 is
5575     * returned and CMA will fail.
5576     */
5577     - if (strict && nr_strict_required > total_isolated)
5578     + if (strict && blockpfn < end_pfn)
5579     total_isolated = 0;
5580    
5581     if (locked)
5582     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5583     index dd7789ce7572..389973fd6bb7 100644
5584     --- a/mm/huge_memory.c
5585     +++ b/mm/huge_memory.c
5586     @@ -1897,7 +1897,7 @@ out:
5587     return ret;
5588     }
5589    
5590     -#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
5591     +#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
5592    
5593     int hugepage_madvise(struct vm_area_struct *vma,
5594     unsigned long *vm_flags, int advice)
5595     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5596     index 8e7adcba8176..15429b92ff98 100644
5597     --- a/mm/memcontrol.c
5598     +++ b/mm/memcontrol.c
5599     @@ -1089,8 +1089,8 @@ skip_node:
5600     * skipping css reference should be safe.
5601     */
5602     if (next_css) {
5603     - if ((next_css->flags & CSS_ONLINE) &&
5604     - (next_css == &root->css || css_tryget(next_css)))
5605     + if ((next_css == &root->css) ||
5606     + ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
5607     return mem_cgroup_from_css(next_css);
5608    
5609     prev_css = next_css;
5610     @@ -6346,11 +6346,24 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
5611     static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5612     {
5613     struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5614     + struct cgroup_subsys_state *iter;
5615    
5616     kmem_cgroup_css_offline(memcg);
5617    
5618     mem_cgroup_invalidate_reclaim_iterators(memcg);
5619     - mem_cgroup_reparent_charges(memcg);
5620     +
5621     + /*
5622     + * This requires that offlining is serialized. Right now that is
5623     + * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
5624     + */
5625     + rcu_read_lock();
5626     + css_for_each_descendant_post(iter, css) {
5627     + rcu_read_unlock();
5628     + mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
5629     + rcu_read_lock();
5630     + }
5631     + rcu_read_unlock();
5632     +
5633     mem_cgroup_destroy_all_caches(memcg);
5634     vmpressure_cleanup(&memcg->vmpressure);
5635     }
5636     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5637     index 317ea747d2cd..06f847933eeb 100644
5638     --- a/mm/page_alloc.c
5639     +++ b/mm/page_alloc.c
5640     @@ -1217,6 +1217,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
5641     }
5642     local_irq_restore(flags);
5643     }
5644     +static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5645     +{
5646     + return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
5647     +}
5648     +#else
5649     +static bool gfp_thisnode_allocation(gfp_t gfp_mask)
5650     +{
5651     + return false;
5652     +}
5653     #endif
5654    
5655     /*
5656     @@ -1553,7 +1562,13 @@ again:
5657     get_pageblock_migratetype(page));
5658     }
5659    
5660     - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5661     + /*
5662     + * NOTE: GFP_THISNODE allocations do not partake in the kswapd
5663     + * aging protocol, so they can't be fair.
5664     + */
5665     + if (!gfp_thisnode_allocation(gfp_flags))
5666     + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
5667     +
5668     __count_zone_vm_events(PGALLOC, zone, 1 << order);
5669     zone_statistics(preferred_zone, zone, gfp_flags);
5670     local_irq_restore(flags);
5671     @@ -1925,8 +1940,12 @@ zonelist_scan:
5672     * ultimately fall back to remote zones that do not
5673     * partake in the fairness round-robin cycle of this
5674     * zonelist.
5675     + *
5676     + * NOTE: GFP_THISNODE allocations do not partake in
5677     + * the kswapd aging protocol, so they can't be fair.
5678     */
5679     - if (alloc_flags & ALLOC_WMARK_LOW) {
5680     + if ((alloc_flags & ALLOC_WMARK_LOW) &&
5681     + !gfp_thisnode_allocation(gfp_mask)) {
5682     if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
5683     continue;
5684     if (!zone_local(preferred_zone, zone))
5685     @@ -2492,8 +2511,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5686     * allowed per node queues are empty and that nodes are
5687     * over allocated.
5688     */
5689     - if (IS_ENABLED(CONFIG_NUMA) &&
5690     - (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
5691     + if (gfp_thisnode_allocation(gfp_mask))
5692     goto nopage;
5693    
5694     restart:
5695     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
5696     index 11af243bf92f..467e3e071832 100644
5697     --- a/net/core/neighbour.c
5698     +++ b/net/core/neighbour.c
5699     @@ -764,9 +764,6 @@ static void neigh_periodic_work(struct work_struct *work)
5700     nht = rcu_dereference_protected(tbl->nht,
5701     lockdep_is_held(&tbl->lock));
5702    
5703     - if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
5704     - goto out;
5705     -
5706     /*
5707     * periodically recompute ReachableTime from random function
5708     */
5709     @@ -779,6 +776,9 @@ static void neigh_periodic_work(struct work_struct *work)
5710     neigh_rand_reach_time(p->base_reachable_time);
5711     }
5712    
5713     + if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
5714     + goto out;
5715     +
5716     for (i = 0 ; i < (1 << nht->hash_shift); i++) {
5717     np = &nht->hash_buckets[i];
5718    
5719     diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
5720     index c31e3ad98ef2..ba22cc3a5a53 100644
5721     --- a/net/ipv4/ip_tunnel_core.c
5722     +++ b/net/ipv4/ip_tunnel_core.c
5723     @@ -109,7 +109,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
5724     secpath_reset(skb);
5725     if (!skb->l4_rxhash)
5726     skb->rxhash = 0;
5727     - skb_dst_drop(skb);
5728     skb->vlan_tci = 0;
5729     skb_set_queue_mapping(skb, 0);
5730     skb->pkt_type = PACKET_HOST;
5731     diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
5732     index 14a15c49129d..15e024105f91 100644
5733     --- a/net/ipv4/syncookies.c
5734     +++ b/net/ipv4/syncookies.c
5735     @@ -89,8 +89,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
5736    
5737    
5738     static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5739     - __be16 dport, __u32 sseq, __u32 count,
5740     - __u32 data)
5741     + __be16 dport, __u32 sseq, __u32 data)
5742     {
5743     /*
5744     * Compute the secure sequence number.
5745     @@ -102,7 +101,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5746     * As an extra hack, we add a small "data" value that encodes the
5747     * MSS into the second hash value.
5748     */
5749     -
5750     + u32 count = tcp_cookie_time();
5751     return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
5752     sseq + (count << COOKIEBITS) +
5753     ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
5754     @@ -114,22 +113,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
5755     * If the syncookie is bad, the data returned will be out of
5756     * range. This must be checked by the caller.
5757     *
5758     - * The count value used to generate the cookie must be within
5759     - * "maxdiff" if the current (passed-in) "count". The return value
5760     - * is (__u32)-1 if this test fails.
5761     + * The count value used to generate the cookie must be less than
5762     + * MAX_SYNCOOKIE_AGE minutes in the past.
5763     + * The return value (__u32)-1 if this test fails.
5764     */
5765     static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
5766     - __be16 sport, __be16 dport, __u32 sseq,
5767     - __u32 count, __u32 maxdiff)
5768     + __be16 sport, __be16 dport, __u32 sseq)
5769     {
5770     - __u32 diff;
5771     + u32 diff, count = tcp_cookie_time();
5772    
5773     /* Strip away the layers from the cookie */
5774     cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
5775    
5776     /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
5777     diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
5778     - if (diff >= maxdiff)
5779     + if (diff >= MAX_SYNCOOKIE_AGE)
5780     return (__u32)-1;
5781    
5782     return (cookie -
5783     @@ -138,22 +136,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
5784     }
5785    
5786     /*
5787     - * MSS Values are taken from the 2009 paper
5788     - * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
5789     - * - values 1440 to 1460 accounted for 80% of observed mss values
5790     - * - values outside the 536-1460 range are rare (<0.2%).
5791     + * MSS Values are chosen based on the 2011 paper
5792     + * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
5793     + * Values ..
5794     + * .. lower than 536 are rare (< 0.2%)
5795     + * .. between 537 and 1299 account for less than < 1.5% of observed values
5796     + * .. in the 1300-1349 range account for about 15 to 20% of observed mss values
5797     + * .. exceeding 1460 are very rare (< 0.04%)
5798     *
5799     - * Table must be sorted.
5800     + * 1460 is the single most frequently announced mss value (30 to 46% depending
5801     + * on monitor location). Table must be sorted.
5802     */
5803     static __u16 const msstab[] = {
5804     - 64,
5805     - 512,
5806     536,
5807     - 1024,
5808     - 1440,
5809     + 1300,
5810     + 1440, /* 1440, 1452: PPPoE */
5811     1460,
5812     - 4312,
5813     - 8960,
5814     };
5815    
5816     /*
5817     @@ -173,7 +171,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5818    
5819     return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
5820     th->source, th->dest, ntohl(th->seq),
5821     - jiffies / (HZ * 60), mssind);
5822     + mssind);
5823     }
5824     EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
5825    
5826     @@ -189,13 +187,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
5827     }
5828    
5829     /*
5830     - * This (misnamed) value is the age of syncookie which is permitted.
5831     - * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
5832     - * sysctl_tcp_retries1. It's a rather complicated formula (exponential
5833     - * backoff) to compute at runtime so it's currently hardcoded here.
5834     - */
5835     -#define COUNTER_TRIES 4
5836     -/*
5837     * Check if a ack sequence number is a valid syncookie.
5838     * Return the decoded mss if it is, or 0 if not.
5839     */
5840     @@ -204,9 +195,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
5841     {
5842     __u32 seq = ntohl(th->seq) - 1;
5843     __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
5844     - th->source, th->dest, seq,
5845     - jiffies / (HZ * 60),
5846     - COUNTER_TRIES);
5847     + th->source, th->dest, seq);
5848    
5849     return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
5850     }
5851     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5852     index be5246e1d5b6..531ab5721d79 100644
5853     --- a/net/ipv4/tcp.c
5854     +++ b/net/ipv4/tcp.c
5855     @@ -1000,7 +1000,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
5856     }
5857     }
5858    
5859     -static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
5860     +static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
5861     + int *copied, size_t size)
5862     {
5863     struct tcp_sock *tp = tcp_sk(sk);
5864     int err, flags;
5865     @@ -1015,11 +1016,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
5866     if (unlikely(tp->fastopen_req == NULL))
5867     return -ENOBUFS;
5868     tp->fastopen_req->data = msg;
5869     + tp->fastopen_req->size = size;
5870    
5871     flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
5872     err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
5873     msg->msg_namelen, flags);
5874     - *size = tp->fastopen_req->copied;
5875     + *copied = tp->fastopen_req->copied;
5876     tcp_free_fastopen_req(tp);
5877     return err;
5878     }
5879     @@ -1039,7 +1041,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5880    
5881     flags = msg->msg_flags;
5882     if (flags & MSG_FASTOPEN) {
5883     - err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
5884     + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
5885     if (err == -EINPROGRESS && copied_syn > 0)
5886     goto out;
5887     else if (err)
5888     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5889     index fb8227a8c004..e088932bcfae 100644
5890     --- a/net/ipv4/tcp_output.c
5891     +++ b/net/ipv4/tcp_output.c
5892     @@ -2902,7 +2902,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
5893     space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
5894     MAX_TCP_OPTION_SPACE;
5895    
5896     - syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
5897     + space = min_t(size_t, space, fo->size);
5898     +
5899     + /* limit to order-0 allocations */
5900     + space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
5901     +
5902     + syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
5903     sk->sk_allocation);
5904     if (syn_data == NULL)
5905     goto fallback;
5906     diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
5907     index 140748debc4a..8af3eb57f438 100644
5908     --- a/net/ipv6/exthdrs_core.c
5909     +++ b/net/ipv6/exthdrs_core.c
5910     @@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
5911     found = (nexthdr == target);
5912    
5913     if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
5914     - if (target < 0)
5915     + if (target < 0 || found)
5916     break;
5917     return -ENOENT;
5918     }
5919     diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
5920     index bf63ac8a49b9..d703218a653b 100644
5921     --- a/net/ipv6/syncookies.c
5922     +++ b/net/ipv6/syncookies.c
5923     @@ -24,26 +24,21 @@
5924     #define COOKIEBITS 24 /* Upper bits store count */
5925     #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
5926    
5927     -/* Table must be sorted. */
5928     +/* RFC 2460, Section 8.3:
5929     + * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
5930     + *
5931     + * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
5932     + * using higher values than ipv4 tcp syncookies.
5933     + * The other values are chosen based on ethernet (1500 and 9k MTU), plus
5934     + * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
5935     + */
5936     static __u16 const msstab[] = {
5937     - 64,
5938     - 512,
5939     - 536,
5940     - 1280 - 60,
5941     + 1280 - 60, /* IPV6_MIN_MTU - 60 */
5942     1480 - 60,
5943     1500 - 60,
5944     - 4460 - 60,
5945     9000 - 60,
5946     };
5947    
5948     -/*
5949     - * This (misnamed) value is the age of syncookie which is permitted.
5950     - * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
5951     - * sysctl_tcp_retries1. It's a rather complicated formula (exponential
5952     - * backoff) to compute at runtime so it's currently hardcoded here.
5953     - */
5954     -#define COUNTER_TRIES 4
5955     -
5956     static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
5957     struct request_sock *req,
5958     struct dst_entry *dst)
5959     @@ -86,8 +81,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
5960     static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
5961     const struct in6_addr *daddr,
5962     __be16 sport, __be16 dport, __u32 sseq,
5963     - __u32 count, __u32 data)
5964     + __u32 data)
5965     {
5966     + u32 count = tcp_cookie_time();
5967     return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
5968     sseq + (count << COOKIEBITS) +
5969     ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
5970     @@ -96,15 +92,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
5971    
5972     static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
5973     const struct in6_addr *daddr, __be16 sport,
5974     - __be16 dport, __u32 sseq, __u32 count,
5975     - __u32 maxdiff)
5976     + __be16 dport, __u32 sseq)
5977     {
5978     - __u32 diff;
5979     + __u32 diff, count = tcp_cookie_time();
5980    
5981     cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
5982    
5983     diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
5984     - if (diff >= maxdiff)
5985     + if (diff >= MAX_SYNCOOKIE_AGE)
5986     return (__u32)-1;
5987    
5988     return (cookie -
5989     @@ -125,8 +120,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
5990     *mssp = msstab[mssind];
5991    
5992     return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
5993     - th->dest, ntohl(th->seq),
5994     - jiffies / (HZ * 60), mssind);
5995     + th->dest, ntohl(th->seq), mssind);
5996     }
5997     EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
5998    
5999     @@ -146,8 +140,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
6000     {
6001     __u32 seq = ntohl(th->seq) - 1;
6002     __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
6003     - th->source, th->dest, seq,
6004     - jiffies / (HZ * 60), COUNTER_TRIES);
6005     + th->source, th->dest, seq);
6006    
6007     return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
6008     }
6009     diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
6010     index 06556d6e1a4d..ab4569df9cef 100644
6011     --- a/net/ipv6/udp_offload.c
6012     +++ b/net/ipv6/udp_offload.c
6013     @@ -111,7 +111,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
6014     fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
6015     fptr->nexthdr = nexthdr;
6016     fptr->reserved = 0;
6017     - ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
6018     + fptr->identification = skb_shinfo(skb)->ip6_frag_id;
6019    
6020     /* Fragment the skb. ipv6 header and the remaining fields of the
6021     * fragment header are updated in ipv6_gso_segment()
6022     diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
6023     index 22290a929b94..641f43219a48 100644
6024     --- a/net/mac80211/mesh_ps.c
6025     +++ b/net/mac80211/mesh_ps.c
6026     @@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
6027     sdata->vif.addr);
6028     nullfunc->frame_control = fc;
6029     nullfunc->duration_id = 0;
6030     + nullfunc->seq_ctrl = 0;
6031     /* no address resolution for this frame -> set addr 1 immediately */
6032     memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
6033     memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
6034     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
6035     index 86e4ad56b573..8d7f4abe65ba 100644
6036     --- a/net/mac80211/mlme.c
6037     +++ b/net/mac80211/mlme.c
6038     @@ -282,6 +282,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
6039     switch (vht_oper->chan_width) {
6040     case IEEE80211_VHT_CHANWIDTH_USE_HT:
6041     vht_chandef.width = chandef->width;
6042     + vht_chandef.center_freq1 = chandef->center_freq1;
6043     break;
6044     case IEEE80211_VHT_CHANWIDTH_80MHZ:
6045     vht_chandef.width = NL80211_CHAN_WIDTH_80;
6046     @@ -331,6 +332,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
6047     ret = 0;
6048    
6049     out:
6050     + /*
6051     + * When tracking the current AP, don't do any further checks if the
6052     + * new chandef is identical to the one we're currently using for the
6053     + * connection. This keeps us from playing ping-pong with regulatory,
6054     + * without it the following can happen (for example):
6055     + * - connect to an AP with 80 MHz, world regdom allows 80 MHz
6056     + * - AP advertises regdom US
6057     + * - CRDA loads regdom US with 80 MHz prohibited (old database)
6058     + * - the code below detects an unsupported channel, downgrades, and
6059     + * we disconnect from the AP in the caller
6060     + * - disconnect causes CRDA to reload world regdomain and the game
6061     + * starts anew.
6062     + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
6063     + *
6064     + * It seems possible that there are still scenarios with CSA or real
6065     + * bandwidth changes where a this could happen, but those cases are
6066     + * less common and wouldn't completely prevent using the AP.
6067     + */
6068     + if (tracking &&
6069     + cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
6070     + return ret;
6071     +
6072     /* don't print the message below for VHT mismatch if VHT is disabled */
6073     if (ret & IEEE80211_STA_DISABLE_VHT)
6074     vht_chandef = *chandef;
6075     diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
6076     index aeb967a0aeed..db41c190e76d 100644
6077     --- a/net/mac80211/sta_info.c
6078     +++ b/net/mac80211/sta_info.c
6079     @@ -340,6 +340,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
6080     return NULL;
6081    
6082     spin_lock_init(&sta->lock);
6083     + spin_lock_init(&sta->ps_lock);
6084     INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
6085     INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
6086     mutex_init(&sta->ampdu_mlme.mtx);
6087     @@ -1049,6 +1050,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
6088    
6089     skb_queue_head_init(&pending);
6090    
6091     + /* sync with ieee80211_tx_h_unicast_ps_buf */
6092     + spin_lock(&sta->ps_lock);
6093     /* Send all buffered frames to the station */
6094     for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
6095     int count = skb_queue_len(&pending), tmp;
6096     @@ -1068,6 +1071,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
6097     }
6098    
6099     ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
6100     + spin_unlock(&sta->ps_lock);
6101    
6102     local->total_ps_buffered -= buffered;
6103    
6104     @@ -1114,6 +1118,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
6105     memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
6106     memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
6107     memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
6108     + nullfunc->seq_ctrl = 0;
6109    
6110     skb->priority = tid;
6111     skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
6112     diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
6113     index 4208dbd5861f..492d59cbf289 100644
6114     --- a/net/mac80211/sta_info.h
6115     +++ b/net/mac80211/sta_info.h
6116     @@ -245,6 +245,7 @@ struct sta_ampdu_mlme {
6117     * @drv_unblock_wk: used for driver PS unblocking
6118     * @listen_interval: listen interval of this station, when we're acting as AP
6119     * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
6120     + * @ps_lock: used for powersave (when mac80211 is the AP) related locking
6121     * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
6122     * when it leaves power saving state or polls
6123     * @tx_filtered: buffers (per AC) of frames we already tried to
6124     @@ -328,10 +329,8 @@ struct sta_info {
6125     /* use the accessors defined below */
6126     unsigned long _flags;
6127    
6128     - /*
6129     - * STA powersave frame queues, no more than the internal
6130     - * locking required.
6131     - */
6132     + /* STA powersave lock and frame queues */
6133     + spinlock_t ps_lock;
6134     struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
6135     struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
6136     unsigned long driver_buffered_tids;
6137     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
6138     index 81dca92176c7..d6a47e76efff 100644
6139     --- a/net/mac80211/tx.c
6140     +++ b/net/mac80211/tx.c
6141     @@ -477,6 +477,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
6142     sta->sta.addr, sta->sta.aid, ac);
6143     if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
6144     purge_old_ps_buffers(tx->local);
6145     +
6146     + /* sync with ieee80211_sta_ps_deliver_wakeup */
6147     + spin_lock(&sta->ps_lock);
6148     + /*
6149     + * STA woke up the meantime and all the frames on ps_tx_buf have
6150     + * been queued to pending queue. No reordering can happen, go
6151     + * ahead and Tx the packet.
6152     + */
6153     + if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
6154     + !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
6155     + spin_unlock(&sta->ps_lock);
6156     + return TX_CONTINUE;
6157     + }
6158     +
6159     if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
6160     struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
6161     ps_dbg(tx->sdata,
6162     @@ -490,6 +504,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
6163     info->control.vif = &tx->sdata->vif;
6164     info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
6165     skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
6166     + spin_unlock(&sta->ps_lock);
6167    
6168     if (!timer_pending(&local->sta_cleanup))
6169     mod_timer(&local->sta_cleanup,
6170     diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
6171     index afba19cb6f87..a282fddf8b00 100644
6172     --- a/net/mac80211/wme.c
6173     +++ b/net/mac80211/wme.c
6174     @@ -153,6 +153,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
6175     return IEEE80211_AC_BE;
6176     }
6177    
6178     + if (skb->protocol == sdata->control_port_protocol) {
6179     + skb->priority = 7;
6180     + return ieee80211_downgrade_queue(sdata, skb);
6181     + }
6182     +
6183     /* use the data classifier to determine what 802.1d tag the
6184     * data frame has */
6185     skb->priority = cfg80211_classify8021d(skb);
6186     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
6187     index dfe3f36ff2aa..56ebe71cfe13 100644
6188     --- a/net/sctp/sm_statefuns.c
6189     +++ b/net/sctp/sm_statefuns.c
6190     @@ -759,6 +759,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
6191     struct sctp_chunk auth;
6192     sctp_ierror_t ret;
6193    
6194     + /* Make sure that we and the peer are AUTH capable */
6195     + if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
6196     + kfree_skb(chunk->auth_chunk);
6197     + sctp_association_free(new_asoc);
6198     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
6199     + }
6200     +
6201     /* set-up our fake chunk so that we can process it */
6202     auth.skb = chunk->auth_chunk;
6203     auth.asoc = chunk->asoc;
6204     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
6205     index 83a1daa642bb..1d034825fcc3 100644
6206     --- a/net/sunrpc/xprtsock.c
6207     +++ b/net/sunrpc/xprtsock.c
6208     @@ -853,6 +853,8 @@ static void xs_close(struct rpc_xprt *xprt)
6209    
6210     dprintk("RPC: xs_close xprt %p\n", xprt);
6211    
6212     + cancel_delayed_work_sync(&transport->connect_worker);
6213     +
6214     xs_reset_transport(transport);
6215     xprt->reestablish_timeout = 0;
6216    
6217     @@ -887,12 +889,8 @@ static void xs_local_destroy(struct rpc_xprt *xprt)
6218     */
6219     static void xs_destroy(struct rpc_xprt *xprt)
6220     {
6221     - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
6222     -
6223     dprintk("RPC: xs_destroy xprt %p\n", xprt);
6224    
6225     - cancel_delayed_work_sync(&transport->connect_worker);
6226     -
6227     xs_local_destroy(xprt);
6228     }
6229    
6230     @@ -1834,6 +1832,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
6231     }
6232     #endif
6233    
6234     +static void xs_dummy_setup_socket(struct work_struct *work)
6235     +{
6236     +}
6237     +
6238     static struct socket *xs_create_sock(struct rpc_xprt *xprt,
6239     struct sock_xprt *transport, int family, int type, int protocol)
6240     {
6241     @@ -2673,6 +2675,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
6242     xprt->ops = &xs_local_ops;
6243     xprt->timeout = &xs_local_default_timeout;
6244    
6245     + INIT_DELAYED_WORK(&transport->connect_worker,
6246     + xs_dummy_setup_socket);
6247     +
6248     switch (sun->sun_family) {
6249     case AF_LOCAL:
6250     if (sun->sun_path[0] != '/') {
6251     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6252     index a427623ee574..d7c1ac621a90 100644
6253     --- a/net/unix/af_unix.c
6254     +++ b/net/unix/af_unix.c
6255     @@ -161,9 +161,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
6256    
6257     static inline unsigned int unix_hash_fold(__wsum n)
6258     {
6259     - unsigned int hash = (__force unsigned int)n;
6260     + unsigned int hash = (__force unsigned int)csum_fold(n);
6261    
6262     - hash ^= hash>>16;
6263     hash ^= hash>>8;
6264     return hash&(UNIX_HASH_SIZE-1);
6265     }
6266     diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
6267     index 2906d520eea7..3be02b680268 100644
6268     --- a/net/xfrm/xfrm_ipcomp.c
6269     +++ b/net/xfrm/xfrm_ipcomp.c
6270     @@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
6271     const int plen = skb->len;
6272     int dlen = IPCOMP_SCRATCH_SIZE;
6273     u8 *start = skb->data;
6274     - const int cpu = get_cpu();
6275     - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
6276     - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
6277     + struct crypto_comp *tfm;
6278     + u8 *scratch;
6279     int err;
6280    
6281     local_bh_disable();
6282     + scratch = *this_cpu_ptr(ipcomp_scratches);
6283     + tfm = *this_cpu_ptr(ipcd->tfms);
6284     err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
6285     - local_bh_enable();
6286     if (err)
6287     goto out;
6288    
6289     @@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
6290     }
6291    
6292     memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
6293     - put_cpu();
6294     + local_bh_enable();
6295    
6296     pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
6297     return 0;
6298    
6299     out:
6300     - put_cpu();
6301     + local_bh_enable();
6302     return err;
6303     }
6304    
6305     diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
6306     index 30f119b1d1ec..820313a04d49 100644
6307     --- a/security/selinux/ss/ebitmap.c
6308     +++ b/security/selinux/ss/ebitmap.c
6309     @@ -213,7 +213,12 @@ netlbl_import_failure:
6310     }
6311     #endif /* CONFIG_NETLABEL */
6312    
6313     -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
6314     +/*
6315     + * Check to see if all the bits set in e2 are also set in e1. Optionally,
6316     + * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
6317     + * last_e2bit.
6318     + */
6319     +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
6320     {
6321     struct ebitmap_node *n1, *n2;
6322     int i;
6323     @@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
6324    
6325     n1 = e1->node;
6326     n2 = e2->node;
6327     +
6328     while (n1 && n2 && (n1->startbit <= n2->startbit)) {
6329     if (n1->startbit < n2->startbit) {
6330     n1 = n1->next;
6331     continue;
6332     }
6333     - for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
6334     + for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
6335     + i--; /* Skip trailing NULL map entries */
6336     + if (last_e2bit && (i >= 0)) {
6337     + u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
6338     + __fls(n2->maps[i]);
6339     + if (lastsetbit > last_e2bit)
6340     + return 0;
6341     + }
6342     +
6343     + while (i >= 0) {
6344     if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
6345     return 0;
6346     + i--;
6347     }
6348    
6349     n1 = n1->next;
6350     diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
6351     index 922f8afa89dd..712c8a7b8e8b 100644
6352     --- a/security/selinux/ss/ebitmap.h
6353     +++ b/security/selinux/ss/ebitmap.h
6354     @@ -16,7 +16,13 @@
6355    
6356     #include <net/netlabel.h>
6357    
6358     -#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
6359     +#ifdef CONFIG_64BIT
6360     +#define EBITMAP_NODE_SIZE 64
6361     +#else
6362     +#define EBITMAP_NODE_SIZE 32
6363     +#endif
6364     +
6365     +#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
6366     / sizeof(unsigned long))
6367     #define EBITMAP_UNIT_SIZE BITS_PER_LONG
6368     #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
6369     @@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
6370    
6371     int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
6372     int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
6373     -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
6374     +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
6375     int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
6376     int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
6377     void ebitmap_destroy(struct ebitmap *e);
6378     diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
6379     index 40de8d3f208e..c85bc1ec040c 100644
6380     --- a/security/selinux/ss/mls.c
6381     +++ b/security/selinux/ss/mls.c
6382     @@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
6383     int mls_level_isvalid(struct policydb *p, struct mls_level *l)
6384     {
6385     struct level_datum *levdatum;
6386     - struct ebitmap_node *node;
6387     - int i;
6388    
6389     if (!l->sens || l->sens > p->p_levels.nprim)
6390     return 0;
6391     @@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
6392     if (!levdatum)
6393     return 0;
6394    
6395     - ebitmap_for_each_positive_bit(&l->cat, node, i) {
6396     - if (i > p->p_cats.nprim)
6397     - return 0;
6398     - if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
6399     - /*
6400     - * Category may not be associated with
6401     - * sensitivity.
6402     - */
6403     - return 0;
6404     - }
6405     - }
6406     -
6407     - return 1;
6408     + /*
6409     + * Return 1 iff all the bits set in l->cat are also be set in
6410     + * levdatum->level->cat and no bit in l->cat is larger than
6411     + * p->p_cats.nprim.
6412     + */
6413     + return ebitmap_contains(&levdatum->level->cat, &l->cat,
6414     + p->p_cats.nprim);
6415     }
6416    
6417     int mls_range_isvalid(struct policydb *p, struct mls_range *r)
6418     diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
6419     index 03bed52a8052..e93648774137 100644
6420     --- a/security/selinux/ss/mls_types.h
6421     +++ b/security/selinux/ss/mls_types.h
6422     @@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
6423     static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
6424     {
6425     return ((l1->sens >= l2->sens) &&
6426     - ebitmap_contains(&l1->cat, &l2->cat));
6427     + ebitmap_contains(&l1->cat, &l2->cat, 0));
6428     }
6429    
6430     #define mls_level_incomp(l1, l2) \
6431     diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
6432     index d0d7ac1e99d2..f62356c2f54c 100644
6433     --- a/sound/pci/hda/hda_eld.c
6434     +++ b/sound/pci/hda/hda_eld.c
6435     @@ -478,10 +478,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
6436     snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
6437     }
6438    
6439     -static void hdmi_print_eld_info(struct snd_info_entry *entry,
6440     - struct snd_info_buffer *buffer)
6441     +void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
6442     + struct snd_info_buffer *buffer)
6443     {
6444     - struct hdmi_eld *eld = entry->private_data;
6445     struct parsed_hdmi_eld *e = &eld->info;
6446     char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
6447     int i;
6448     @@ -500,13 +499,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
6449     [4 ... 7] = "reserved"
6450     };
6451    
6452     - mutex_lock(&eld->lock);
6453     snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
6454     snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
6455     - if (!eld->eld_valid) {
6456     - mutex_unlock(&eld->lock);
6457     + if (!eld->eld_valid)
6458     return;
6459     - }
6460     snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
6461     snd_iprintf(buffer, "connection_type\t\t%s\n",
6462     eld_connection_type_names[e->conn_type]);
6463     @@ -528,13 +524,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
6464    
6465     for (i = 0; i < e->sad_count; i++)
6466     hdmi_print_sad_info(i, e->sad + i, buffer);
6467     - mutex_unlock(&eld->lock);
6468     }
6469    
6470     -static void hdmi_write_eld_info(struct snd_info_entry *entry,
6471     - struct snd_info_buffer *buffer)
6472     +void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
6473     + struct snd_info_buffer *buffer)
6474     {
6475     - struct hdmi_eld *eld = entry->private_data;
6476     struct parsed_hdmi_eld *e = &eld->info;
6477     char line[64];
6478     char name[64];
6479     @@ -542,7 +536,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
6480     long long val;
6481     unsigned int n;
6482    
6483     - mutex_lock(&eld->lock);
6484     while (!snd_info_get_line(buffer, line, sizeof(line))) {
6485     if (sscanf(line, "%s %llx", name, &val) != 2)
6486     continue;
6487     @@ -594,38 +587,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
6488     e->sad_count = n + 1;
6489     }
6490     }
6491     - mutex_unlock(&eld->lock);
6492     -}
6493     -
6494     -
6495     -int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
6496     - int index)
6497     -{
6498     - char name[32];
6499     - struct snd_info_entry *entry;
6500     - int err;
6501     -
6502     - snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
6503     - err = snd_card_proc_new(codec->bus->card, name, &entry);
6504     - if (err < 0)
6505     - return err;
6506     -
6507     - snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
6508     - entry->c.text.write = hdmi_write_eld_info;
6509     - entry->mode |= S_IWUSR;
6510     - eld->proc_entry = entry;
6511     -
6512     - return 0;
6513     -}
6514     -
6515     -void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
6516     -{
6517     - if (!codec->bus->shutdown && eld->proc_entry) {
6518     - snd_device_free(codec->bus->card, eld->proc_entry);
6519     - eld->proc_entry = NULL;
6520     - }
6521     }
6522     -
6523     #endif /* CONFIG_PROC_FS */
6524    
6525     /* update PCM info based on ELD */
6526     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6527     index f7e76619f7c9..ccf5eb6b3d37 100644
6528     --- a/sound/pci/hda/hda_intel.c
6529     +++ b/sound/pci/hda/hda_intel.c
6530     @@ -169,6 +169,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
6531     "{Intel, PPT},"
6532     "{Intel, LPT},"
6533     "{Intel, LPT_LP},"
6534     + "{Intel, WPT_LP},"
6535     "{Intel, HPT},"
6536     "{Intel, PBG},"
6537     "{Intel, SCH},"
6538     @@ -568,6 +569,7 @@ enum {
6539     AZX_DRIVER_ICH,
6540     AZX_DRIVER_PCH,
6541     AZX_DRIVER_SCH,
6542     + AZX_DRIVER_HDMI,
6543     AZX_DRIVER_ATI,
6544     AZX_DRIVER_ATIHDMI,
6545     AZX_DRIVER_ATIHDMI_NS,
6546     @@ -647,6 +649,7 @@ static char *driver_short_names[] = {
6547     [AZX_DRIVER_ICH] = "HDA Intel",
6548     [AZX_DRIVER_PCH] = "HDA Intel PCH",
6549     [AZX_DRIVER_SCH] = "HDA Intel MID",
6550     + [AZX_DRIVER_HDMI] = "HDA Intel HDMI",
6551     [AZX_DRIVER_ATI] = "HDA ATI SB",
6552     [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
6553     [AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI",
6554     @@ -3994,13 +3997,16 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
6555     /* Lynx Point-LP */
6556     { PCI_DEVICE(0x8086, 0x9c21),
6557     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6558     + /* Wildcat Point-LP */
6559     + { PCI_DEVICE(0x8086, 0x9ca0),
6560     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6561     /* Haswell */
6562     { PCI_DEVICE(0x8086, 0x0a0c),
6563     - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6564     + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6565     { PCI_DEVICE(0x8086, 0x0c0c),
6566     - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6567     + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6568     { PCI_DEVICE(0x8086, 0x0d0c),
6569     - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_HASWELL },
6570     + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6571     /* 5 Series/3400 */
6572     { PCI_DEVICE(0x8086, 0x3b56),
6573     .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
6574     @@ -4080,6 +4086,22 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
6575     .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6576     { PCI_DEVICE(0x1002, 0xaa48),
6577     .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6578     + { PCI_DEVICE(0x1002, 0xaa50),
6579     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6580     + { PCI_DEVICE(0x1002, 0xaa58),
6581     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6582     + { PCI_DEVICE(0x1002, 0xaa60),
6583     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6584     + { PCI_DEVICE(0x1002, 0xaa68),
6585     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6586     + { PCI_DEVICE(0x1002, 0xaa80),
6587     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6588     + { PCI_DEVICE(0x1002, 0xaa88),
6589     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6590     + { PCI_DEVICE(0x1002, 0xaa90),
6591     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6592     + { PCI_DEVICE(0x1002, 0xaa98),
6593     + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
6594     { PCI_DEVICE(0x1002, 0x9902),
6595     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI },
6596     { PCI_DEVICE(0x1002, 0xaaa0),
6597     diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
6598     index 2e7493ef8ee0..040d93324f32 100644
6599     --- a/sound/pci/hda/hda_local.h
6600     +++ b/sound/pci/hda/hda_local.h
6601     @@ -751,10 +751,6 @@ struct hdmi_eld {
6602     int eld_size;
6603     char eld_buffer[ELD_MAX_SIZE];
6604     struct parsed_hdmi_eld info;
6605     - struct mutex lock;
6606     -#ifdef CONFIG_PROC_FS
6607     - struct snd_info_entry *proc_entry;
6608     -#endif
6609     };
6610    
6611     int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
6612     @@ -767,20 +763,10 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
6613     struct hda_pcm_stream *hinfo);
6614    
6615     #ifdef CONFIG_PROC_FS
6616     -int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
6617     - int index);
6618     -void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
6619     -#else
6620     -static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
6621     - struct hdmi_eld *eld,
6622     - int index)
6623     -{
6624     - return 0;
6625     -}
6626     -static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
6627     - struct hdmi_eld *eld)
6628     -{
6629     -}
6630     +void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
6631     + struct snd_info_buffer *buffer);
6632     +void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
6633     + struct snd_info_buffer *buffer);
6634     #endif
6635    
6636     #define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
6637     diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
6638     index 7fc15814c618..41ebdd8812b1 100644
6639     --- a/sound/pci/hda/patch_analog.c
6640     +++ b/sound/pci/hda/patch_analog.c
6641     @@ -1085,6 +1085,7 @@ static int patch_ad1884(struct hda_codec *codec)
6642     spec = codec->spec;
6643    
6644     spec->gen.mixer_nid = 0x20;
6645     + spec->gen.mixer_merge_nid = 0x21;
6646     spec->gen.beep_nid = 0x10;
6647     set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
6648    
6649     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6650     index 417e0fc2d119..adb374babd18 100644
6651     --- a/sound/pci/hda/patch_hdmi.c
6652     +++ b/sound/pci/hda/patch_hdmi.c
6653     @@ -45,6 +45,7 @@ module_param(static_hdmi_pcm, bool, 0644);
6654     MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
6655    
6656     #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
6657     +#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
6658    
6659     struct hdmi_spec_per_cvt {
6660     hda_nid_t cvt_nid;
6661     @@ -63,9 +64,11 @@ struct hdmi_spec_per_pin {
6662     hda_nid_t pin_nid;
6663     int num_mux_nids;
6664     hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
6665     + hda_nid_t cvt_nid;
6666    
6667     struct hda_codec *codec;
6668     struct hdmi_eld sink_eld;
6669     + struct mutex lock;
6670     struct delayed_work work;
6671     struct snd_kcontrol *eld_ctl;
6672     int repoll_count;
6673     @@ -75,6 +78,9 @@ struct hdmi_spec_per_pin {
6674     bool chmap_set; /* channel-map override by ALSA API? */
6675     unsigned char chmap[8]; /* ALSA API channel-map */
6676     char pcm_name[8]; /* filled in build_pcm callbacks */
6677     +#ifdef CONFIG_PROC_FS
6678     + struct snd_info_entry *proc_entry;
6679     +#endif
6680     };
6681    
6682     struct hdmi_spec {
6683     @@ -351,17 +357,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
6684     {
6685     struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
6686     struct hdmi_spec *spec = codec->spec;
6687     + struct hdmi_spec_per_pin *per_pin;
6688     struct hdmi_eld *eld;
6689     int pin_idx;
6690    
6691     uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
6692    
6693     pin_idx = kcontrol->private_value;
6694     - eld = &get_pin(spec, pin_idx)->sink_eld;
6695     + per_pin = get_pin(spec, pin_idx);
6696     + eld = &per_pin->sink_eld;
6697    
6698     - mutex_lock(&eld->lock);
6699     + mutex_lock(&per_pin->lock);
6700     uinfo->count = eld->eld_valid ? eld->eld_size : 0;
6701     - mutex_unlock(&eld->lock);
6702     + mutex_unlock(&per_pin->lock);
6703    
6704     return 0;
6705     }
6706     @@ -371,15 +379,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
6707     {
6708     struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
6709     struct hdmi_spec *spec = codec->spec;
6710     + struct hdmi_spec_per_pin *per_pin;
6711     struct hdmi_eld *eld;
6712     int pin_idx;
6713    
6714     pin_idx = kcontrol->private_value;
6715     - eld = &get_pin(spec, pin_idx)->sink_eld;
6716     + per_pin = get_pin(spec, pin_idx);
6717     + eld = &per_pin->sink_eld;
6718    
6719     - mutex_lock(&eld->lock);
6720     + mutex_lock(&per_pin->lock);
6721     if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
6722     - mutex_unlock(&eld->lock);
6723     + mutex_unlock(&per_pin->lock);
6724     snd_BUG();
6725     return -EINVAL;
6726     }
6727     @@ -389,7 +399,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
6728     if (eld->eld_valid)
6729     memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
6730     eld->eld_size);
6731     - mutex_unlock(&eld->lock);
6732     + mutex_unlock(&per_pin->lock);
6733    
6734     return 0;
6735     }
6736     @@ -490,6 +500,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
6737     AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
6738     }
6739    
6740     +/*
6741     + * ELD proc files
6742     + */
6743     +
6744     +#ifdef CONFIG_PROC_FS
6745     +static void print_eld_info(struct snd_info_entry *entry,
6746     + struct snd_info_buffer *buffer)
6747     +{
6748     + struct hdmi_spec_per_pin *per_pin = entry->private_data;
6749     +
6750     + mutex_lock(&per_pin->lock);
6751     + snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
6752     + mutex_unlock(&per_pin->lock);
6753     +}
6754     +
6755     +static void write_eld_info(struct snd_info_entry *entry,
6756     + struct snd_info_buffer *buffer)
6757     +{
6758     + struct hdmi_spec_per_pin *per_pin = entry->private_data;
6759     +
6760     + mutex_lock(&per_pin->lock);
6761     + snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
6762     + mutex_unlock(&per_pin->lock);
6763     +}
6764     +
6765     +static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
6766     +{
6767     + char name[32];
6768     + struct hda_codec *codec = per_pin->codec;
6769     + struct snd_info_entry *entry;
6770     + int err;
6771     +
6772     + snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
6773     + err = snd_card_proc_new(codec->bus->card, name, &entry);
6774     + if (err < 0)
6775     + return err;
6776     +
6777     + snd_info_set_text_ops(entry, per_pin, print_eld_info);
6778     + entry->c.text.write = write_eld_info;
6779     + entry->mode |= S_IWUSR;
6780     + per_pin->proc_entry = entry;
6781     +
6782     + return 0;
6783     +}
6784     +
6785     +static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
6786     +{
6787     + if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
6788     + snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
6789     + per_pin->proc_entry = NULL;
6790     + }
6791     +}
6792     +#else
6793     +static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
6794     + int index)
6795     +{
6796     + return 0;
6797     +}
6798     +static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
6799     +{
6800     +}
6801     +#endif
6802    
6803     /*
6804     * Channel mapping routines
6805     @@ -608,25 +680,35 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
6806     bool non_pcm,
6807     int ca)
6808     {
6809     + struct cea_channel_speaker_allocation *ch_alloc;
6810     int i;
6811     int err;
6812     int order;
6813     int non_pcm_mapping[8];
6814    
6815     order = get_channel_allocation_order(ca);
6816     + ch_alloc = &channel_allocations[order];
6817    
6818     if (hdmi_channel_mapping[ca][1] == 0) {
6819     - for (i = 0; i < channel_allocations[order].channels; i++)
6820     - hdmi_channel_mapping[ca][i] = i | (i << 4);
6821     - for (; i < 8; i++)
6822     - hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
6823     + int hdmi_slot = 0;
6824     + /* fill actual channel mappings in ALSA channel (i) order */
6825     + for (i = 0; i < ch_alloc->channels; i++) {
6826     + while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
6827     + hdmi_slot++; /* skip zero slots */
6828     +
6829     + hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
6830     + }
6831     + /* fill the rest of the slots with ALSA channel 0xf */
6832     + for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
6833     + if (!ch_alloc->speakers[7 - hdmi_slot])
6834     + hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
6835     }
6836    
6837     if (non_pcm) {
6838     - for (i = 0; i < channel_allocations[order].channels; i++)
6839     - non_pcm_mapping[i] = i | (i << 4);
6840     + for (i = 0; i < ch_alloc->channels; i++)
6841     + non_pcm_mapping[i] = (i << 4) | i;
6842     for (; i < 8; i++)
6843     - non_pcm_mapping[i] = 0xf | (i << 4);
6844     + non_pcm_mapping[i] = (0xf << 4) | i;
6845     }
6846    
6847     for (i = 0; i < 8; i++) {
6848     @@ -639,25 +721,31 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
6849     break;
6850     }
6851     }
6852     -
6853     - hdmi_debug_channel_mapping(codec, pin_nid);
6854     }
6855    
6856     struct channel_map_table {
6857     unsigned char map; /* ALSA API channel map position */
6858     - unsigned char cea_slot; /* CEA slot value */
6859     int spk_mask; /* speaker position bit mask */
6860     };
6861    
6862     static struct channel_map_table map_tables[] = {
6863     - { SNDRV_CHMAP_FL, 0x00, FL },
6864     - { SNDRV_CHMAP_FR, 0x01, FR },
6865     - { SNDRV_CHMAP_RL, 0x04, RL },
6866     - { SNDRV_CHMAP_RR, 0x05, RR },
6867     - { SNDRV_CHMAP_LFE, 0x02, LFE },
6868     - { SNDRV_CHMAP_FC, 0x03, FC },
6869     - { SNDRV_CHMAP_RLC, 0x06, RLC },
6870     - { SNDRV_CHMAP_RRC, 0x07, RRC },
6871     + { SNDRV_CHMAP_FL, FL },
6872     + { SNDRV_CHMAP_FR, FR },
6873     + { SNDRV_CHMAP_RL, RL },
6874     + { SNDRV_CHMAP_RR, RR },
6875     + { SNDRV_CHMAP_LFE, LFE },
6876     + { SNDRV_CHMAP_FC, FC },
6877     + { SNDRV_CHMAP_RLC, RLC },
6878     + { SNDRV_CHMAP_RRC, RRC },
6879     + { SNDRV_CHMAP_RC, RC },
6880     + { SNDRV_CHMAP_FLC, FLC },
6881     + { SNDRV_CHMAP_FRC, FRC },
6882     + { SNDRV_CHMAP_FLH, FLH },
6883     + { SNDRV_CHMAP_FRH, FRH },
6884     + { SNDRV_CHMAP_FLW, FLW },
6885     + { SNDRV_CHMAP_FRW, FRW },
6886     + { SNDRV_CHMAP_TC, TC },
6887     + { SNDRV_CHMAP_FCH, FCH },
6888     {} /* terminator */
6889     };
6890    
6891     @@ -673,25 +761,19 @@ static int to_spk_mask(unsigned char c)
6892     }
6893    
6894     /* from ALSA API channel position to CEA slot */
6895     -static int to_cea_slot(unsigned char c)
6896     +static int to_cea_slot(int ordered_ca, unsigned char pos)
6897     {
6898     - struct channel_map_table *t = map_tables;
6899     - for (; t->map; t++) {
6900     - if (t->map == c)
6901     - return t->cea_slot;
6902     - }
6903     - return 0x0f;
6904     -}
6905     + int mask = to_spk_mask(pos);
6906     + int i;
6907    
6908     -/* from CEA slot to ALSA API channel position */
6909     -static int from_cea_slot(unsigned char c)
6910     -{
6911     - struct channel_map_table *t = map_tables;
6912     - for (; t->map; t++) {
6913     - if (t->cea_slot == c)
6914     - return t->map;
6915     + if (mask) {
6916     + for (i = 0; i < 8; i++) {
6917     + if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
6918     + return i;
6919     + }
6920     }
6921     - return 0;
6922     +
6923     + return -1;
6924     }
6925    
6926     /* from speaker bit mask to ALSA API channel position */
6927     @@ -705,6 +787,14 @@ static int spk_to_chmap(int spk)
6928     return 0;
6929     }
6930    
6931     +/* from CEA slot to ALSA API channel position */
6932     +static int from_cea_slot(int ordered_ca, unsigned char slot)
6933     +{
6934     + int mask = channel_allocations[ordered_ca].speakers[7 - slot];
6935     +
6936     + return spk_to_chmap(mask);
6937     +}
6938     +
6939     /* get the CA index corresponding to the given ALSA API channel map */
6940     static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
6941     {
6942     @@ -731,16 +821,27 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
6943     /* set up the channel slots for the given ALSA API channel map */
6944     static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
6945     hda_nid_t pin_nid,
6946     - int chs, unsigned char *map)
6947     + int chs, unsigned char *map,
6948     + int ca)
6949     {
6950     - int i;
6951     - for (i = 0; i < 8; i++) {
6952     + int ordered_ca = get_channel_allocation_order(ca);
6953     + int alsa_pos, hdmi_slot;
6954     + int assignments[8] = {[0 ... 7] = 0xf};
6955     +
6956     + for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
6957     +
6958     + hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
6959     +
6960     + if (hdmi_slot < 0)
6961     + continue; /* unassigned channel */
6962     +
6963     + assignments[hdmi_slot] = alsa_pos;
6964     + }
6965     +
6966     + for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
6967     int val, err;
6968     - if (i < chs)
6969     - val = to_cea_slot(map[i]);
6970     - else
6971     - val = 0xf;
6972     - val |= (i << 4);
6973     +
6974     + val = (assignments[hdmi_slot] << 4) | hdmi_slot;
6975     err = snd_hda_codec_write(codec, pin_nid, 0,
6976     AC_VERB_SET_HDMI_CHAN_SLOT, val);
6977     if (err)
6978     @@ -756,7 +857,7 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
6979     int ordered_ca = get_channel_allocation_order(ca);
6980     for (i = 0; i < 8; i++) {
6981     if (i < channel_allocations[ordered_ca].channels)
6982     - map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
6983     + map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
6984     else
6985     map[i] = 0;
6986     }
6987     @@ -769,11 +870,13 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
6988     {
6989     if (!non_pcm && chmap_set) {
6990     hdmi_manual_setup_channel_mapping(codec, pin_nid,
6991     - channels, map);
6992     + channels, map, ca);
6993     } else {
6994     hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
6995     hdmi_setup_fake_chmap(map, ca);
6996     }
6997     +
6998     + hdmi_debug_channel_mapping(codec, pin_nid);
6999     }
7000    
7001     /*
7002     @@ -903,8 +1006,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7003     {
7004     hda_nid_t pin_nid = per_pin->pin_nid;
7005     int channels = per_pin->channels;
7006     + int active_channels;
7007     struct hdmi_eld *eld;
7008     - int ca;
7009     + int ca, ordered_ca;
7010     union audio_infoframe ai;
7011    
7012     if (!channels)
7013     @@ -926,6 +1030,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7014     if (ca < 0)
7015     ca = 0;
7016    
7017     + ordered_ca = get_channel_allocation_order(ca);
7018     + active_channels = channel_allocations[ordered_ca].channels;
7019     +
7020     + hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
7021     +
7022     memset(&ai, 0, sizeof(ai));
7023     if (eld->info.conn_type == 0) { /* HDMI */
7024     struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
7025     @@ -933,7 +1042,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7026     hdmi_ai->type = 0x84;
7027     hdmi_ai->ver = 0x01;
7028     hdmi_ai->len = 0x0a;
7029     - hdmi_ai->CC02_CT47 = channels - 1;
7030     + hdmi_ai->CC02_CT47 = active_channels - 1;
7031     hdmi_ai->CA = ca;
7032     hdmi_checksum_audio_infoframe(hdmi_ai);
7033     } else if (eld->info.conn_type == 1) { /* DisplayPort */
7034     @@ -942,7 +1051,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7035     dp_ai->type = 0x84;
7036     dp_ai->len = 0x1b;
7037     dp_ai->ver = 0x11 << 2;
7038     - dp_ai->CC02_CT47 = channels - 1;
7039     + dp_ai->CC02_CT47 = active_channels - 1;
7040     dp_ai->CA = ca;
7041     } else {
7042     snd_printd("HDMI: unknown connection type at pin %d\n",
7043     @@ -966,9 +1075,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7044     if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
7045     sizeof(ai))) {
7046     snd_printdd("hdmi_setup_audio_infoframe: "
7047     - "pin=%d channels=%d\n",
7048     + "pin=%d channels=%d ca=0x%02x\n",
7049     pin_nid,
7050     - channels);
7051     + active_channels, ca);
7052     hdmi_stop_infoframe_trans(codec, pin_nid);
7053     hdmi_fill_audio_infoframe(codec, pin_nid,
7054     ai.bytes, sizeof(ai));
7055     @@ -983,7 +1092,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
7056     * Unsolicited events
7057     */
7058    
7059     -static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
7060     +static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
7061    
7062     static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
7063     {
7064     @@ -1009,8 +1118,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
7065     if (pin_idx < 0)
7066     return;
7067    
7068     - hdmi_present_sense(get_pin(spec, pin_idx), 1);
7069     - snd_hda_jack_report_sync(codec);
7070     + if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
7071     + snd_hda_jack_report_sync(codec);
7072     }
7073    
7074     static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
7075     @@ -1160,7 +1269,16 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
7076     return 0;
7077     }
7078    
7079     -static void haswell_config_cvts(struct hda_codec *codec,
7080     +/* Intel HDMI workaround to fix audio routing issue:
7081     + * For some Intel display codecs, pins share the same connection list.
7082     + * So a conveter can be selected by multiple pins and playback on any of these
7083     + * pins will generate sound on the external display, because audio flows from
7084     + * the same converter to the display pipeline. Also muting one pin may make
7085     + * other pins have no sound output.
7086     + * So this function assures that an assigned converter for a pin is not selected
7087     + * by any other pins.
7088     + */
7089     +static void intel_not_share_assigned_cvt(struct hda_codec *codec,
7090     hda_nid_t pin_nid, int mux_idx)
7091     {
7092     struct hdmi_spec *spec = codec->spec;
7093     @@ -1231,6 +1349,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
7094     per_cvt = get_cvt(spec, cvt_idx);
7095     /* Claim converter */
7096     per_cvt->assigned = 1;
7097     + per_pin->cvt_nid = per_cvt->cvt_nid;
7098     hinfo->nid = per_cvt->cvt_nid;
7099    
7100     snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
7101     @@ -1238,8 +1357,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
7102     mux_idx);
7103    
7104     /* configure unused pins to choose other converters */
7105     - if (is_haswell(codec))
7106     - haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
7107     + if (is_haswell(codec) || is_valleyview(codec))
7108     + intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
7109    
7110     snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
7111    
7112     @@ -1297,7 +1416,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
7113     return 0;
7114     }
7115    
7116     -static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7117     +static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7118     {
7119     struct hda_codec *codec = per_pin->codec;
7120     struct hdmi_spec *spec = codec->spec;
7121     @@ -1312,10 +1431,15 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7122     * specification worked this way. Hence, we just ignore the data in
7123     * the unsolicited response to avoid custom WARs.
7124     */
7125     - int present = snd_hda_pin_sense(codec, pin_nid);
7126     + int present;
7127     bool update_eld = false;
7128     bool eld_changed = false;
7129     + bool ret;
7130    
7131     + snd_hda_power_up(codec);
7132     + present = snd_hda_pin_sense(codec, pin_nid);
7133     +
7134     + mutex_lock(&per_pin->lock);
7135     pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
7136     if (pin_eld->monitor_present)
7137     eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
7138     @@ -1345,11 +1469,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7139     queue_delayed_work(codec->bus->workq,
7140     &per_pin->work,
7141     msecs_to_jiffies(300));
7142     - return;
7143     + goto unlock;
7144     }
7145     }
7146    
7147     - mutex_lock(&pin_eld->lock);
7148     if (pin_eld->eld_valid && !eld->eld_valid) {
7149     update_eld = true;
7150     eld_changed = true;
7151     @@ -1374,12 +1497,19 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
7152     hdmi_setup_audio_infoframe(codec, per_pin,
7153     per_pin->non_pcm);
7154     }
7155     - mutex_unlock(&pin_eld->lock);
7156    
7157     if (eld_changed)
7158     snd_ctl_notify(codec->bus->card,
7159     SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
7160     &per_pin->eld_ctl->id);
7161     + unlock:
7162     + if ((codec->vendor_id & 0xffff0000) == 0x10020000)
7163     + ret = true; /* AMD codecs create ELD by itself */
7164     + else
7165     + ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
7166     + mutex_unlock(&per_pin->lock);
7167     + snd_hda_power_down(codec);
7168     + return ret;
7169     }
7170    
7171     static void hdmi_repoll_eld(struct work_struct *work)
7172     @@ -1390,7 +1520,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
7173     if (per_pin->repoll_count++ > 6)
7174     per_pin->repoll_count = 0;
7175    
7176     - hdmi_present_sense(per_pin, per_pin->repoll_count);
7177     + if (hdmi_present_sense(per_pin, per_pin->repoll_count))
7178     + snd_hda_jack_report_sync(per_pin->codec);
7179     }
7180    
7181     static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
7182     @@ -1551,12 +1682,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
7183     int pinctl;
7184    
7185     non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
7186     + mutex_lock(&per_pin->lock);
7187     per_pin->channels = substream->runtime->channels;
7188     per_pin->setup = true;
7189    
7190     - hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
7191     -
7192     hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
7193     + mutex_unlock(&per_pin->lock);
7194    
7195     if (spec->dyn_pin_out) {
7196     pinctl = snd_hda_codec_read(codec, pin_nid, 0,
7197     @@ -1611,11 +1742,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
7198     }
7199    
7200     snd_hda_spdif_ctls_unassign(codec, pin_idx);
7201     +
7202     + mutex_lock(&per_pin->lock);
7203     per_pin->chmap_set = false;
7204     memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
7205    
7206     per_pin->setup = false;
7207     per_pin->channels = 0;
7208     + mutex_unlock(&per_pin->lock);
7209     }
7210    
7211     return 0;
7212     @@ -1650,8 +1784,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
7213     struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
7214     struct hda_codec *codec = info->private_data;
7215     struct hdmi_spec *spec = codec->spec;
7216     - const unsigned int valid_mask =
7217     - FL | FR | RL | RR | LFE | FC | RLC | RRC;
7218     unsigned int __user *dst;
7219     int chs, count = 0;
7220    
7221     @@ -1669,8 +1801,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
7222     int chs_bytes = chs * 4;
7223     if (cap->channels != chs)
7224     continue;
7225     - if (cap->spk_mask & ~valid_mask)
7226     - continue;
7227     if (size < 8)
7228     return -ENOMEM;
7229     if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
7230     @@ -1748,10 +1878,12 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
7231     ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
7232     if (ca < 0)
7233     return -EINVAL;
7234     + mutex_lock(&per_pin->lock);
7235     per_pin->chmap_set = true;
7236     memcpy(per_pin->chmap, chmap, sizeof(chmap));
7237     if (prepared)
7238     hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
7239     + mutex_unlock(&per_pin->lock);
7240    
7241     return 0;
7242     }
7243     @@ -1868,12 +2000,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
7244    
7245     for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
7246     struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
7247     - struct hdmi_eld *eld = &per_pin->sink_eld;
7248    
7249     per_pin->codec = codec;
7250     - mutex_init(&eld->lock);
7251     + mutex_init(&per_pin->lock);
7252     INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
7253     - snd_hda_eld_proc_new(codec, eld, pin_idx);
7254     + eld_proc_new(per_pin, pin_idx);
7255     }
7256     return 0;
7257     }
7258     @@ -1914,10 +2045,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
7259    
7260     for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
7261     struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
7262     - struct hdmi_eld *eld = &per_pin->sink_eld;
7263    
7264     cancel_delayed_work(&per_pin->work);
7265     - snd_hda_eld_proc_free(codec, eld);
7266     + eld_proc_free(per_pin);
7267     }
7268    
7269     flush_workqueue(codec->bus->workq);
7270     @@ -2717,6 +2847,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
7271     { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
7272     { .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
7273     { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
7274     +{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
7275     { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
7276     {} /* terminator */
7277     };
7278     @@ -2771,6 +2902,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
7279     MODULE_ALIAS("snd-hda-codec-id:80862806");
7280     MODULE_ALIAS("snd-hda-codec-id:80862807");
7281     MODULE_ALIAS("snd-hda-codec-id:80862880");
7282     +MODULE_ALIAS("snd-hda-codec-id:80862882");
7283     MODULE_ALIAS("snd-hda-codec-id:808629fb");
7284    
7285     MODULE_LICENSE("GPL");
7286     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7287     index 1be437f533a6..deddee9c1565 100644
7288     --- a/sound/pci/hda/patch_realtek.c
7289     +++ b/sound/pci/hda/patch_realtek.c
7290     @@ -3464,6 +3464,19 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
7291     alc_fixup_headset_mode(codec, fix, action);
7292     }
7293    
7294     +static void alc_no_shutup(struct hda_codec *codec)
7295     +{
7296     +}
7297     +
7298     +static void alc_fixup_no_shutup(struct hda_codec *codec,
7299     + const struct hda_fixup *fix, int action)
7300     +{
7301     + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7302     + struct alc_spec *spec = codec->spec;
7303     + spec->shutup = alc_no_shutup;
7304     + }
7305     +}
7306     +
7307     static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
7308     const struct hda_fixup *fix, int action)
7309     {
7310     @@ -3674,6 +3687,7 @@ enum {
7311     ALC269_FIXUP_HP_GPIO_LED,
7312     ALC269_FIXUP_INV_DMIC,
7313     ALC269_FIXUP_LENOVO_DOCK,
7314     + ALC269_FIXUP_NO_SHUTUP,
7315     ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
7316     ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
7317     ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
7318     @@ -3840,6 +3854,10 @@ static const struct hda_fixup alc269_fixups[] = {
7319     .type = HDA_FIXUP_FUNC,
7320     .v.func = alc_fixup_inv_dmic_0x12,
7321     },
7322     + [ALC269_FIXUP_NO_SHUTUP] = {
7323     + .type = HDA_FIXUP_FUNC,
7324     + .v.func = alc_fixup_no_shutup,
7325     + },
7326     [ALC269_FIXUP_LENOVO_DOCK] = {
7327     .type = HDA_FIXUP_PINS,
7328     .v.pins = (const struct hda_pintbl[]) {
7329     @@ -4000,6 +4018,7 @@ static const struct hda_fixup alc269_fixups[] = {
7330     };
7331    
7332     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7333     + SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
7334     SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
7335     SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
7336     SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
7337     @@ -4089,6 +4108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7338     SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7339     SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7340     SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7341     + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
7342     SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7343     SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
7344     SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7345     diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
7346     index 77acd790ea47..eb7ad7706205 100644
7347     --- a/sound/pci/oxygen/xonar_dg.c
7348     +++ b/sound/pci/oxygen/xonar_dg.c
7349     @@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
7350     oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
7351     data->output_sel == 1 ? GPIO_HP_REAR : 0,
7352     GPIO_HP_REAR);
7353     + oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
7354     + data->output_sel == 0 ?
7355     + OXYGEN_PLAY_MUTE01 :
7356     + OXYGEN_PLAY_MUTE23 |
7357     + OXYGEN_PLAY_MUTE45 |
7358     + OXYGEN_PLAY_MUTE67,
7359     + OXYGEN_PLAY_MUTE01 |
7360     + OXYGEN_PLAY_MUTE23 |
7361     + OXYGEN_PLAY_MUTE45 |
7362     + OXYGEN_PLAY_MUTE67);
7363     }
7364     mutex_unlock(&chip->mutex);
7365     return changed;
7366     @@ -596,7 +606,7 @@ struct oxygen_model model_xonar_dg = {
7367     .model_data_size = sizeof(struct dg),
7368     .device_config = PLAYBACK_0_TO_I2S |
7369     PLAYBACK_1_TO_SPDIF |
7370     - CAPTURE_0_FROM_I2S_2 |
7371     + CAPTURE_0_FROM_I2S_1 |
7372     CAPTURE_1_FROM_SPDIF,
7373     .dac_channels_pcm = 6,
7374     .dac_channels_mixer = 0,
7375     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
7376     index 95558ef4a7a0..be4db47cb2d9 100644
7377     --- a/sound/usb/mixer.c
7378     +++ b/sound/usb/mixer.c
7379     @@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
7380     }
7381     break;
7382    
7383     + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
7384     case USB_ID(0x046d, 0x0808):
7385     case USB_ID(0x046d, 0x0809):
7386     case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */