Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.39-r2/0100-2.6.39.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1424 - (show annotations) (download)
Mon Jul 18 14:29:21 2011 UTC (12 years, 9 months ago) by niro
File size: 219887 byte(s)
-2.6.39-magellan-r2: -using linux-2.6.39.3 and removed deprecated acpi procfs options from config
1 diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
2 index 5ebf5af..5aa5337 100644
3 --- a/Documentation/i2c/writing-clients
4 +++ b/Documentation/i2c/writing-clients
5 @@ -38,7 +38,7 @@ static struct i2c_driver foo_driver = {
6 .name = "foo",
7 },
8
9 - .id_table = foo_ids,
10 + .id_table = foo_idtable,
11 .probe = foo_probe,
12 .remove = foo_remove,
13 /* if device autodetection is needed: */
14 diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf
15 index 612e722..37a02ce 100644
16 --- a/Documentation/usb/linux-cdc-acm.inf
17 +++ b/Documentation/usb/linux-cdc-acm.inf
18 @@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
19 [SourceDisksFiles]
20 [SourceDisksNames]
21 [DeviceList]
22 -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02
23 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
24
25 [DeviceList.NTamd64]
26 -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02
27 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
28
29
30 ;------------------------------------------------------------------------------
31 diff --git a/Documentation/usb/linux.inf b/Documentation/usb/linux.inf
32 index 4dee958..4ffa715b0 100644
33 --- a/Documentation/usb/linux.inf
34 +++ b/Documentation/usb/linux.inf
35 @@ -18,15 +18,15 @@ DriverVer = 06/21/2006,6.0.6000.16384
36
37 ; Decoration for x86 architecture
38 [LinuxDevices.NTx86]
39 -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
40 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
41
42 ; Decoration for x64 architecture
43 [LinuxDevices.NTamd64]
44 -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
45 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
46
47 ; Decoration for ia64 architecture
48 [LinuxDevices.NTia64]
49 -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
50 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
51
52 ;@@@ This is the common setting for setup
53 [ControlFlags]
54 diff --git a/Makefile b/Makefile
55 index 123d858..045b186 100644
56 --- a/Makefile
57 +++ b/Makefile
58 @@ -1374,7 +1374,7 @@ endif # KBUILD_EXTMOD
59 clean: $(clean-dirs)
60 $(call cmd,rmdirs)
61 $(call cmd,rmfiles)
62 - @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
63 + @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
64 \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
65 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66 -o -name '*.symtypes' -o -name 'modules.order' \
67 diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
68 index c96fa1b..73b4a8b 100644
69 --- a/arch/arm/mm/cache-v6.S
70 +++ b/arch/arm/mm/cache-v6.S
71 @@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
72 */
73 ENTRY(v6_flush_kern_dcache_area)
74 add r1, r0, r1
75 + bic r0, r0, #D_CACHE_LINE_SIZE - 1
76 1:
77 #ifdef HARVARD_CACHE
78 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
79 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
80 index dc18d81..d32f02b 100644
81 --- a/arch/arm/mm/cache-v7.S
82 +++ b/arch/arm/mm/cache-v7.S
83 @@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
84 ENTRY(v7_flush_kern_dcache_area)
85 dcache_line_size r2, r3
86 add r1, r0, r1
87 + sub r3, r2, #1
88 + bic r0, r0, r3
89 1:
90 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
91 add r0, r0, r2
92 diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
93 index 9b8393d..c54cca87 100644
94 --- a/arch/m68k/kernel/syscalltable.S
95 +++ b/arch/m68k/kernel/syscalltable.S
96 @@ -319,8 +319,8 @@ ENTRY(sys_call_table)
97 .long sys_readlinkat
98 .long sys_fchmodat
99 .long sys_faccessat /* 300 */
100 - .long sys_ni_syscall /* Reserved for pselect6 */
101 - .long sys_ni_syscall /* Reserved for ppoll */
102 + .long sys_pselect6
103 + .long sys_ppoll
104 .long sys_unshare
105 .long sys_set_robust_list
106 .long sys_get_robust_list /* 305 */
107 diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
108 index 3eb82c2..9cbc2c3 100644
109 --- a/arch/parisc/include/asm/unistd.h
110 +++ b/arch/parisc/include/asm/unistd.h
111 @@ -814,8 +814,14 @@
112 #define __NR_recvmmsg (__NR_Linux + 319)
113 #define __NR_accept4 (__NR_Linux + 320)
114 #define __NR_prlimit64 (__NR_Linux + 321)
115 -
116 -#define __NR_Linux_syscalls (__NR_prlimit64 + 1)
117 +#define __NR_fanotify_init (__NR_Linux + 322)
118 +#define __NR_fanotify_mark (__NR_Linux + 323)
119 +#define __NR_clock_adjtime (__NR_Linux + 324)
120 +#define __NR_name_to_handle_at (__NR_Linux + 325)
121 +#define __NR_open_by_handle_at (__NR_Linux + 326)
122 +#define __NR_syncfs (__NR_Linux + 327)
123 +
124 +#define __NR_Linux_syscalls (__NR_syncfs + 1)
125
126
127 #define __IGNORE_select /* newselect */
128 diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
129 index 88a0ad1..dc9a624 100644
130 --- a/arch/parisc/kernel/sys_parisc32.c
131 +++ b/arch/parisc/kernel/sys_parisc32.c
132 @@ -228,3 +228,11 @@ asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
133 return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
134 ((loff_t)lenhi << 32) | lenlo);
135 }
136 +
137 +asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
138 + u32 mask_lo, int fd,
139 + const char __user *pathname)
140 +{
141 + return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
142 + fd, pathname);
143 +}
144 diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
145 index 4be85ee..a5b02ce 100644
146 --- a/arch/parisc/kernel/syscall_table.S
147 +++ b/arch/parisc/kernel/syscall_table.S
148 @@ -420,6 +420,12 @@
149 ENTRY_COMP(recvmmsg)
150 ENTRY_SAME(accept4) /* 320 */
151 ENTRY_SAME(prlimit64)
152 + ENTRY_SAME(fanotify_init)
153 + ENTRY_COMP(fanotify_mark)
154 + ENTRY_COMP(clock_adjtime)
155 + ENTRY_SAME(name_to_handle_at) /* 325 */
156 + ENTRY_COMP(open_by_handle_at)
157 + ENTRY_SAME(syncfs)
158
159 /* Nothing yet */
160
161 diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
162 index 5b5e1f0..c37ff6b 100644
163 --- a/arch/powerpc/kernel/crash.c
164 +++ b/arch/powerpc/kernel/crash.c
165 @@ -170,7 +170,7 @@ static void crash_kexec_wait_realmode(int cpu)
166 int i;
167
168 msecs = 10000;
169 - for (i=0; i < NR_CPUS && msecs > 0; i++) {
170 + for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
171 if (i == cpu)
172 continue;
173
174 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
175 index 206a321..e89df59 100644
176 --- a/arch/powerpc/kernel/misc_64.S
177 +++ b/arch/powerpc/kernel/misc_64.S
178 @@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
179 * wait for the flag to change, indicating this kernel is going away but
180 * the slave code for the next one is at addresses 0 to 100.
181 *
182 - * This is used by all slaves.
183 + * This is used by all slaves, even those that did not find a matching
184 + * paca in the secondary startup code.
185 *
186 * Physical (hardware) cpu id should be in r3.
187 */
188 @@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
189 1: mflr r5
190 addi r5,r5,kexec_flag-1b
191
192 - li r4,KEXEC_STATE_REAL_MODE
193 - stb r4,PACAKEXECSTATE(r13)
194 - SYNC
195 -
196 99: HMT_LOW
197 #ifdef CONFIG_KEXEC /* use no memory without kexec */
198 lwz r4,0(r5)
199 @@ -499,11 +496,17 @@ kexec_flag:
200 *
201 * get phys id from paca
202 * switch to real mode
203 + * mark the paca as no longer used
204 * join other cpus in kexec_wait(phys_id)
205 */
206 _GLOBAL(kexec_smp_wait)
207 lhz r3,PACAHWCPUID(r13)
208 bl real_mode
209 +
210 + li r4,KEXEC_STATE_REAL_MODE
211 + stb r4,PACAKEXECSTATE(r13)
212 + SYNC
213 +
214 b .kexec_wait
215
216 /*
217 diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
218 index 8ee51a2..e6bec74 100644
219 --- a/arch/powerpc/oprofile/op_model_power4.c
220 +++ b/arch/powerpc/oprofile/op_model_power4.c
221 @@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
222 return is_kernel;
223 }
224
225 +static bool pmc_overflow(unsigned long val)
226 +{
227 + if ((int)val < 0)
228 + return true;
229 +
230 + /*
231 + * Events on POWER7 can roll back if a speculative event doesn't
232 + * eventually complete. Unfortunately in some rare cases they will
233 + * raise a performance monitor exception. We need to catch this to
234 + * ensure we reset the PMC. In all cases the PMC will be 256 or less
235 + * cycles from overflow.
236 + *
237 + * We only do this if the first pass fails to find any overflowing
238 + * PMCs because a user might set a period of less than 256 and we
239 + * don't want to mistakenly reset them.
240 + */
241 + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
242 + return true;
243 +
244 + return false;
245 +}
246 +
247 static void power4_handle_interrupt(struct pt_regs *regs,
248 struct op_counter_config *ctr)
249 {
250 @@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
251
252 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
253 val = classic_ctr_read(i);
254 - if (val < 0) {
255 + if (pmc_overflow(val)) {
256 if (oprofile_running && ctr[i].enabled) {
257 oprofile_add_ext_sample(pc, regs, i, is_kernel);
258 classic_ctr_write(i, reset_value[i]);
259 diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
260 index d49c213..ae95935 100644
261 --- a/arch/sh/kernel/cpu/Makefile
262 +++ b/arch/sh/kernel/cpu/Makefile
263 @@ -17,7 +17,5 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
264
265 obj-$(CONFIG_SH_ADC) += adc.o
266 obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
267 -obj-$(CONFIG_SH_FPU) += fpu.o
268 -obj-$(CONFIG_SH_FPU_EMU) += fpu.o
269
270 -obj-y += irq/ init.o clock.o hwblk.o proc.o
271 +obj-y += irq/ init.o clock.o fpu.o hwblk.o proc.o
272 diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
273 index a9da516..795ea8e 100644
274 --- a/arch/um/Kconfig.x86
275 +++ b/arch/um/Kconfig.x86
276 @@ -29,10 +29,10 @@ config X86_64
277 def_bool 64BIT
278
279 config RWSEM_XCHGADD_ALGORITHM
280 - def_bool X86_XADD
281 + def_bool X86_XADD && 64BIT
282
283 config RWSEM_GENERIC_SPINLOCK
284 - def_bool !X86_XADD
285 + def_bool !RWSEM_XCHGADD_ALGORITHM
286
287 config 3_LEVEL_PGTABLES
288 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
289 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
290 index 91f3e087..cc5b052 100644
291 --- a/arch/x86/include/asm/cpufeature.h
292 +++ b/arch/x86/include/asm/cpufeature.h
293 @@ -125,7 +125,7 @@
294 #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
295 #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
296 #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
297 -#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
298 +#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
299 #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
300
301 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
302 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
303 index abd3e0e..99f0ad7 100644
304 --- a/arch/x86/include/asm/uaccess.h
305 +++ b/arch/x86/include/asm/uaccess.h
306 @@ -42,7 +42,7 @@
307 * Returns 0 if the range is valid, nonzero otherwise.
308 *
309 * This is equivalent to the following test:
310 - * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
311 + * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
312 *
313 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
314 */
315 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
316 index 45fd33d..df63620 100644
317 --- a/arch/x86/kernel/apic/io_apic.c
318 +++ b/arch/x86/kernel/apic/io_apic.c
319 @@ -621,14 +621,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
320 struct IO_APIC_route_entry **ioapic_entries;
321
322 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
323 - GFP_KERNEL);
324 + GFP_ATOMIC);
325 if (!ioapic_entries)
326 return 0;
327
328 for (apic = 0; apic < nr_ioapics; apic++) {
329 ioapic_entries[apic] =
330 kzalloc(sizeof(struct IO_APIC_route_entry) *
331 - nr_ioapic_registers[apic], GFP_KERNEL);
332 + nr_ioapic_registers[apic], GFP_ATOMIC);
333 if (!ioapic_entries[apic])
334 goto nomem;
335 }
336 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
337 index 6f9d1f6..b13ed39 100644
338 --- a/arch/x86/kernel/cpu/amd.c
339 +++ b/arch/x86/kernel/cpu/amd.c
340 @@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
341 }
342 #endif
343
344 - /* As a rule processors have APIC timer running in deep C states */
345 - if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
346 + /*
347 + * Family 0x12 and above processors have APIC timer
348 + * running in deep C states.
349 + */
350 + if (c->x86 > 0x11)
351 set_cpu_cap(c, X86_FEATURE_ARAT);
352
353 /*
354 @@ -629,10 +632,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
355 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
356 */
357 u64 mask;
358 + int err;
359
360 - rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
361 - mask |= (1 << 10);
362 - wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
363 + err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
364 + if (err == 0) {
365 + mask |= (1 << 10);
366 + checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
367 + }
368 }
369 }
370
371 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
372 index e2ced00..173f3a3 100644
373 --- a/arch/x86/kernel/cpu/common.c
374 +++ b/arch/x86/kernel/cpu/common.c
375 @@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
376
377 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
378
379 - if (eax > 0)
380 - c->x86_capability[9] = ebx;
381 + c->x86_capability[9] = ebx;
382 }
383
384 /* AMD-defined flags: level 0x80000001 */
385 diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
386 index 755a31e..907c8e6 100644
387 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
388 +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
389 @@ -39,7 +39,7 @@
390
391 #include <acpi/processor.h>
392
393 -#define PCC_VERSION "1.00.00"
394 +#define PCC_VERSION "1.10.00"
395 #define POLL_LOOPS 300
396
397 #define CMD_COMPLETE 0x1
398 @@ -102,7 +102,7 @@ static struct acpi_generic_address doorbell;
399 static u64 doorbell_preserve;
400 static u64 doorbell_write;
401
402 -static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f,
403 +static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
404 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
405
406 struct pcc_cpu {
407 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
408 index 4be9b39..c6724e4 100644
409 --- a/arch/x86/kernel/setup.c
410 +++ b/arch/x86/kernel/setup.c
411 @@ -912,6 +912,13 @@ void __init setup_arch(char **cmdline_p)
412 memblock.current_limit = get_max_mapped();
413 memblock_x86_fill();
414
415 + /*
416 + * The EFI specification says that boot service code won't be called
417 + * after ExitBootServices(). This is, in fact, a lie.
418 + */
419 + if (efi_enabled)
420 + efi_reserve_boot_services();
421 +
422 /* preallocate 4k for mptable mpc */
423 early_reserve_e820_mpc_new();
424
425 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
426 index 99e4826..a73397f 100644
427 --- a/arch/x86/lib/copy_user_64.S
428 +++ b/arch/x86/lib/copy_user_64.S
429 @@ -72,7 +72,7 @@ ENTRY(_copy_to_user)
430 addq %rdx,%rcx
431 jc bad_to_user
432 cmpq TI_addr_limit(%rax),%rcx
433 - jae bad_to_user
434 + ja bad_to_user
435 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
436 CFI_ENDPROC
437 ENDPROC(_copy_to_user)
438 @@ -85,7 +85,7 @@ ENTRY(_copy_from_user)
439 addq %rdx,%rcx
440 jc bad_from_user
441 cmpq TI_addr_limit(%rax),%rcx
442 - jae bad_from_user
443 + ja bad_from_user
444 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
445 CFI_ENDPROC
446 ENDPROC(_copy_from_user)
447 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
448 index c3b8e24..9fd8a56 100644
449 --- a/arch/x86/oprofile/op_model_amd.c
450 +++ b/arch/x86/oprofile/op_model_amd.c
451 @@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
452 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
453 }
454
455 -static inline int eilvt_is_available(int offset)
456 +static inline int get_eilvt(int offset)
457 {
458 - /* check if we may assign a vector */
459 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
460 }
461
462 +static inline int put_eilvt(int offset)
463 +{
464 + return !setup_APIC_eilvt(offset, 0, 0, 1);
465 +}
466 +
467 static inline int ibs_eilvt_valid(void)
468 {
469 int offset;
470 u64 val;
471 + int valid = 0;
472 +
473 + preempt_disable();
474
475 rdmsrl(MSR_AMD64_IBSCTL, val);
476 offset = val & IBSCTL_LVT_OFFSET_MASK;
477 @@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
478 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
479 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
480 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
481 - return 0;
482 + goto out;
483 }
484
485 - if (!eilvt_is_available(offset)) {
486 + if (!get_eilvt(offset)) {
487 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
488 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
489 - return 0;
490 + goto out;
491 }
492
493 - return 1;
494 + valid = 1;
495 +out:
496 + preempt_enable();
497 +
498 + return valid;
499 }
500
501 static inline int get_ibs_offset(void)
502 @@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
503
504 static int force_ibs_eilvt_setup(void)
505 {
506 - int i;
507 + int offset;
508 int ret;
509
510 - /* find the next free available EILVT entry */
511 - for (i = 1; i < 4; i++) {
512 - if (!eilvt_is_available(i))
513 - continue;
514 - ret = setup_ibs_ctl(i);
515 - if (ret)
516 - return ret;
517 - pr_err(FW_BUG "using offset %d for IBS interrupts\n", i);
518 - return 0;
519 + /*
520 + * find the next free available EILVT entry, skip offset 0,
521 + * pin search to this cpu
522 + */
523 + preempt_disable();
524 + for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
525 + if (get_eilvt(offset))
526 + break;
527 }
528 + preempt_enable();
529
530 - printk(KERN_DEBUG "No EILVT entry available\n");
531 -
532 - return -EBUSY;
533 -}
534 -
535 -static int __init_ibs_nmi(void)
536 -{
537 - int ret;
538 -
539 - if (ibs_eilvt_valid())
540 - return 0;
541 + if (offset == APIC_EILVT_NR_MAX) {
542 + printk(KERN_DEBUG "No EILVT entry available\n");
543 + return -EBUSY;
544 + }
545
546 - ret = force_ibs_eilvt_setup();
547 + ret = setup_ibs_ctl(offset);
548 if (ret)
549 - return ret;
550 + goto out;
551
552 - if (!ibs_eilvt_valid())
553 - return -EFAULT;
554 + if (!ibs_eilvt_valid()) {
555 + ret = -EFAULT;
556 + goto out;
557 + }
558
559 + pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
560 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
561
562 return 0;
563 +out:
564 + preempt_disable();
565 + put_eilvt(offset);
566 + preempt_enable();
567 + return ret;
568 }
569
570 /*
571 * check and reserve APIC extended interrupt LVT offset for IBS if
572 * available
573 - *
574 - * init_ibs() preforms implicitly cpu-local operations, so pin this
575 - * thread to its current CPU
576 */
577
578 static void init_ibs(void)
579 {
580 - preempt_disable();
581 -
582 ibs_caps = get_ibs_caps();
583 +
584 if (!ibs_caps)
585 + return;
586 +
587 + if (ibs_eilvt_valid())
588 goto out;
589
590 - if (__init_ibs_nmi() < 0)
591 - ibs_caps = 0;
592 - else
593 - printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
594 + if (!force_ibs_eilvt_setup())
595 + goto out;
596 +
597 + /* Failed to setup ibs */
598 + ibs_caps = 0;
599 + return;
600
601 out:
602 - preempt_enable();
603 + printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
604 }
605
606 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
607 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
608 index 0fe27d7..b00c4ea 100644
609 --- a/arch/x86/platform/efi/efi.c
610 +++ b/arch/x86/platform/efi/efi.c
611 @@ -315,6 +315,40 @@ static void __init print_efi_memmap(void)
612 }
613 #endif /* EFI_DEBUG */
614
615 +void __init efi_reserve_boot_services(void)
616 +{
617 + void *p;
618 +
619 + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
620 + efi_memory_desc_t *md = p;
621 + unsigned long long start = md->phys_addr;
622 + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
623 +
624 + if (md->type != EFI_BOOT_SERVICES_CODE &&
625 + md->type != EFI_BOOT_SERVICES_DATA)
626 + continue;
627 +
628 + memblock_x86_reserve_range(start, start + size, "EFI Boot");
629 + }
630 +}
631 +
632 +static void __init efi_free_boot_services(void)
633 +{
634 + void *p;
635 +
636 + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
637 + efi_memory_desc_t *md = p;
638 + unsigned long long start = md->phys_addr;
639 + unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
640 +
641 + if (md->type != EFI_BOOT_SERVICES_CODE &&
642 + md->type != EFI_BOOT_SERVICES_DATA)
643 + continue;
644 +
645 + free_bootmem_late(start, size);
646 + }
647 +}
648 +
649 void __init efi_init(void)
650 {
651 efi_config_table_t *config_tables;
652 @@ -507,7 +541,9 @@ void __init efi_enter_virtual_mode(void)
653 efi.systab = NULL;
654 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
655 md = p;
656 - if (!(md->attribute & EFI_MEMORY_RUNTIME))
657 + if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
658 + md->type != EFI_BOOT_SERVICES_CODE &&
659 + md->type != EFI_BOOT_SERVICES_DATA)
660 continue;
661
662 size = md->num_pages << EFI_PAGE_SHIFT;
663 @@ -558,6 +594,13 @@ void __init efi_enter_virtual_mode(void)
664 }
665
666 /*
667 + * Thankfully, it does seem that no runtime services other than
668 + * SetVirtualAddressMap() will touch boot services code, so we can
669 + * get rid of it all at this point
670 + */
671 + efi_free_boot_services();
672 +
673 + /*
674 * Now that EFI is in virtual mode, update the function
675 * pointers in the runtime service table to the new virtual addresses.
676 *
677 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
678 index ac0621a..641264c 100644
679 --- a/arch/x86/platform/efi/efi_64.c
680 +++ b/arch/x86/platform/efi/efi_64.c
681 @@ -64,10 +64,11 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
682 if (!(__supported_pte_mask & _PAGE_NX))
683 return;
684
685 - /* Make EFI runtime service code area executable */
686 + /* Make EFI service code area executable */
687 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
688 md = p;
689 - if (md->type == EFI_RUNTIME_SERVICES_CODE) {
690 + if (md->type == EFI_RUNTIME_SERVICES_CODE ||
691 + md->type == EFI_BOOT_SERVICES_CODE) {
692 unsigned long end;
693 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
694 early_mapping_set_exec(md->phys_addr, end, executable);
695 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
696 index 0684f3c..f298bd7 100644
697 --- a/arch/x86/xen/mmu.c
698 +++ b/arch/x86/xen/mmu.c
699 @@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info)
700
701 active_mm = percpu_read(cpu_tlbstate.active_mm);
702
703 - if (active_mm == mm)
704 + if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
705 leave_mm(smp_processor_id());
706
707 /* If this cpu still has a stale cr3 reference, then make sure
708 diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
709 index 141eb0d..c881ae4 100644
710 --- a/arch/x86/xen/p2m.c
711 +++ b/arch/x86/xen/p2m.c
712 @@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
713 /* Boundary cross-over for the edges: */
714 if (idx) {
715 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
716 + unsigned long *mid_mfn_p;
717
718 p2m_init(p2m);
719
720 p2m_top[topidx][mididx] = p2m;
721
722 + /* For save/restore we need to MFN of the P2M saved */
723 +
724 + mid_mfn_p = p2m_top_mfn_p[topidx];
725 + WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
726 + "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
727 + topidx, mididx);
728 + mid_mfn_p[mididx] = virt_to_mfn(p2m);
729 +
730 }
731 return idx != 0;
732 }
733 @@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
734 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
735 {
736 unsigned topidx = p2m_top_index(pfn);
737 - if (p2m_top[topidx] == p2m_mid_missing) {
738 - unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
739 + unsigned long *mid_mfn_p;
740 + unsigned long **mid;
741 +
742 + mid = p2m_top[topidx];
743 + mid_mfn_p = p2m_top_mfn_p[topidx];
744 + if (mid == p2m_mid_missing) {
745 + mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
746
747 p2m_mid_init(mid);
748
749 p2m_top[topidx] = mid;
750 +
751 + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
752 + }
753 + /* And the save/restore P2M tables.. */
754 + if (mid_mfn_p == p2m_mid_missing_mfn) {
755 + mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
756 + p2m_mid_mfn_init(mid_mfn_p);
757 +
758 + p2m_top_mfn_p[topidx] = mid_mfn_p;
759 + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
760 + /* Note: we don't set mid_mfn_p[midix] here,
761 + * look in __early_alloc_p2m */
762 }
763 }
764
765 diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
766 index 90bac0a..ca6297b 100644
767 --- a/arch/x86/xen/setup.c
768 +++ b/arch/x86/xen/setup.c
769 @@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
770 if (last > end)
771 continue;
772
773 - if (entry->type == E820_RAM) {
774 + if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
775 if (start > start_pci)
776 identity += set_phys_range_identity(
777 PFN_UP(start_pci), PFN_DOWN(start));
778 @@ -227,7 +227,11 @@ char * __init xen_memory_setup(void)
779
780 memcpy(map_raw, map, sizeof(map));
781 e820.nr_map = 0;
782 +#ifdef CONFIG_X86_32
783 + xen_extra_mem_start = mem_end;
784 +#else
785 xen_extra_mem_start = max((1ULL << 32), mem_end);
786 +#endif
787 for (i = 0; i < memmap.nr_entries; i++) {
788 unsigned long long end;
789
790 diff --git a/block/blk-flush.c b/block/blk-flush.c
791 index 6c9b5e1..bb21e4c 100644
792 --- a/block/blk-flush.c
793 +++ b/block/blk-flush.c
794 @@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
795 }
796
797 /*
798 - * Moving a request silently to empty queue_head may stall the
799 - * queue. Kick the queue in those cases. This function is called
800 - * from request completion path and calling directly into
801 - * request_fn may confuse the driver. Always use kblockd.
802 + * Kick the queue to avoid stall for two cases:
803 + * 1. Moving a request silently to empty queue_head may stall the
804 + * queue.
805 + * 2. When flush request is running in non-queueable queue, the
806 + * queue is hold. Restart the queue after flush request is finished
807 + * to avoid stall.
808 + * This function is called from request completion path and calling
809 + * directly into request_fn may confuse the driver. Always use
810 + * kblockd.
811 */
812 - if (queued)
813 + if (queued || q->flush_queue_delayed)
814 blk_run_queue_async(q);
815 + q->flush_queue_delayed = 0;
816 }
817
818 /**
819 diff --git a/block/blk-settings.c b/block/blk-settings.c
820 index 1fa7692..fa1eb04 100644
821 --- a/block/blk-settings.c
822 +++ b/block/blk-settings.c
823 @@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim)
824 lim->discard_granularity = 0;
825 lim->discard_alignment = 0;
826 lim->discard_misaligned = 0;
827 - lim->discard_zeroes_data = -1;
828 + lim->discard_zeroes_data = 1;
829 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
830 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
831 lim->alignment_offset = 0;
832 @@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
833
834 blk_set_default_limits(&q->limits);
835 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
836 + q->limits.discard_zeroes_data = 0;
837
838 /*
839 * by default assume old behaviour and bounce for any highmem page
840 @@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
841 }
842 EXPORT_SYMBOL_GPL(blk_queue_flush);
843
844 +void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
845 +{
846 + q->flush_not_queueable = !queueable;
847 +}
848 +EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
849 +
850 static int __init blk_settings_init(void)
851 {
852 blk_max_low_pfn = max_low_pfn - 1;
853 diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
854 index bd23631..d935bd8 100644
855 --- a/block/blk-sysfs.c
856 +++ b/block/blk-sysfs.c
857 @@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
858
859 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
860 {
861 - return queue_var_show(q->limits.max_discard_sectors << 9, page);
862 + return sprintf(page, "%llu\n",
863 + (unsigned long long)q->limits.max_discard_sectors << 9);
864 }
865
866 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
867 diff --git a/block/blk.h b/block/blk.h
868 index 6126346..1566e8d 100644
869 --- a/block/blk.h
870 +++ b/block/blk.h
871 @@ -61,8 +61,28 @@ static inline struct request *__elv_next_request(struct request_queue *q)
872 rq = list_entry_rq(q->queue_head.next);
873 return rq;
874 }
875 -
876 - if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
877 + /*
878 + * Flush request is running and flush request isn't queueable
879 + * in the drive, we can hold the queue till flush request is
880 + * finished. Even we don't do this, driver can't dispatch next
881 + * requests and will requeue them. And this can improve
882 + * throughput too. For example, we have request flush1, write1,
883 + * flush 2. flush1 is dispatched, then queue is hold, write1
884 + * isn't inserted to queue. After flush1 is finished, flush2
885 + * will be dispatched. Since disk cache is already clean,
886 + * flush2 will be finished very soon, so looks like flush2 is
887 + * folded to flush1.
888 + * Since the queue is hold, a flag is set to indicate the queue
889 + * should be restarted later. Please see flush_end_io() for
890 + * details.
891 + */
892 + if (q->flush_pending_idx != q->flush_running_idx &&
893 + !queue_flush_queueable(q)) {
894 + q->flush_queue_delayed = 1;
895 + return NULL;
896 + }
897 + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
898 + !q->elevator->ops->elevator_dispatch_fn(q, 0))
899 return NULL;
900 }
901 }
902 diff --git a/block/genhd.c b/block/genhd.c
903 index 2dd9887..95822ae 100644
904 --- a/block/genhd.c
905 +++ b/block/genhd.c
906 @@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk)
907 {
908 struct disk_events *ev;
909
910 - if (!disk->fops->check_events || !(disk->events | disk->async_events))
911 + if (!disk->fops->check_events)
912 return;
913
914 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
915 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
916 index e2f57e9e..d51f979 100644
917 --- a/drivers/ata/libata-scsi.c
918 +++ b/drivers/ata/libata-scsi.c
919 @@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
920 static int ata_scsi_dev_config(struct scsi_device *sdev,
921 struct ata_device *dev)
922 {
923 + struct request_queue *q = sdev->request_queue;
924 +
925 if (!ata_id_has_unload(dev->id))
926 dev->flags |= ATA_DFLAG_NO_UNLOAD;
927
928 /* configure max sectors */
929 - blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
930 + blk_queue_max_hw_sectors(q, dev->max_sectors);
931
932 if (dev->class == ATA_DEV_ATAPI) {
933 - struct request_queue *q = sdev->request_queue;
934 void *buf;
935
936 sdev->sector_size = ATA_SECT_SIZE;
937
938 /* set DMA padding */
939 - blk_queue_update_dma_pad(sdev->request_queue,
940 - ATA_DMA_PAD_SZ - 1);
941 + blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
942
943 /* configure draining */
944 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
945 @@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
946 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
947 sdev->sector_size);
948
949 - blk_queue_update_dma_alignment(sdev->request_queue,
950 - sdev->sector_size - 1);
951 + blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
952
953 if (dev->flags & ATA_DFLAG_AN)
954 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
955 @@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
956 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
957 }
958
959 + blk_queue_flush_queueable(q, false);
960 +
961 dev->sdev = sdev;
962 return 0;
963 }
964 @@ -2138,7 +2139,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
965 * with the unmap bit set.
966 */
967 if (ata_id_has_trim(args->id)) {
968 - put_unaligned_be32(65535 * 512 / 8, &rbuf[20]);
969 + put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
970 put_unaligned_be32(1, &rbuf[28]);
971 }
972
973 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
974 index 905ff76..635a759 100644
975 --- a/drivers/ata/pata_cmd64x.c
976 +++ b/drivers/ata/pata_cmd64x.c
977 @@ -41,6 +41,9 @@
978 enum {
979 CFR = 0x50,
980 CFR_INTR_CH0 = 0x04,
981 + CNTRL = 0x51,
982 + CNTRL_CH0 = 0x04,
983 + CNTRL_CH1 = 0x08,
984 CMDTIM = 0x52,
985 ARTTIM0 = 0x53,
986 DRWTIM0 = 0x54,
987 @@ -328,9 +331,19 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
988 .port_ops = &cmd648_port_ops
989 }
990 };
991 - const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
992 - u8 mrdmode;
993 + const struct ata_port_info *ppi[] = {
994 + &cmd_info[id->driver_data],
995 + &cmd_info[id->driver_data],
996 + NULL
997 + };
998 + u8 mrdmode, reg;
999 int rc;
1000 + struct pci_dev *bridge = pdev->bus->self;
1001 + /* mobility split bridges don't report enabled ports correctly */
1002 + int port_ok = !(bridge && bridge->vendor ==
1003 + PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
1004 + /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
1005 + int cntrl_ch0_ok = (id->driver_data != 0);
1006
1007 rc = pcim_enable_device(pdev);
1008 if (rc)
1009 @@ -341,11 +354,18 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1010
1011 if (pdev->device == PCI_DEVICE_ID_CMD_646) {
1012 /* Does UDMA work ? */
1013 - if (pdev->revision > 4)
1014 + if (pdev->revision > 4) {
1015 ppi[0] = &cmd_info[2];
1016 + ppi[1] = &cmd_info[2];
1017 + }
1018 /* Early rev with other problems ? */
1019 - else if (pdev->revision == 1)
1020 + else if (pdev->revision == 1) {
1021 ppi[0] = &cmd_info[3];
1022 + ppi[1] = &cmd_info[3];
1023 + }
1024 + /* revs 1,2 have no CNTRL_CH0 */
1025 + if (pdev->revision < 3)
1026 + cntrl_ch0_ok = 0;
1027 }
1028
1029 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
1030 @@ -354,6 +374,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1031 mrdmode |= 0x02; /* Memory read line enable */
1032 pci_write_config_byte(pdev, MRDMODE, mrdmode);
1033
1034 + /* check for enabled ports */
1035 + pci_read_config_byte(pdev, CNTRL, &reg);
1036 + if (!port_ok)
1037 + dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
1038 + if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
1039 + dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
1040 + ppi[0] = &ata_dummy_port_info;
1041 +
1042 + }
1043 + if (port_ok && !(reg & CNTRL_CH1)) {
1044 + dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
1045 + ppi[1] = &ata_dummy_port_info;
1046 + }
1047 +
1048 /* Force PIO 0 here.. */
1049
1050 /* PPC specific fixup copied from old driver */
1051 diff --git a/drivers/block/brd.c b/drivers/block/brd.c
1052 index b7f51e4..c94bc48 100644
1053 --- a/drivers/block/brd.c
1054 +++ b/drivers/block/brd.c
1055 @@ -552,7 +552,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
1056 struct kobject *kobj;
1057
1058 mutex_lock(&brd_devices_mutex);
1059 - brd = brd_init_one(dev & MINORMASK);
1060 + brd = brd_init_one(MINOR(dev) >> part_shift);
1061 kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
1062 mutex_unlock(&brd_devices_mutex);
1063
1064 @@ -585,15 +585,18 @@ static int __init brd_init(void)
1065 if (max_part > 0)
1066 part_shift = fls(max_part);
1067
1068 + if ((1UL << part_shift) > DISK_MAX_PARTS)
1069 + return -EINVAL;
1070 +
1071 if (rd_nr > 1UL << (MINORBITS - part_shift))
1072 return -EINVAL;
1073
1074 if (rd_nr) {
1075 nr = rd_nr;
1076 - range = rd_nr;
1077 + range = rd_nr << part_shift;
1078 } else {
1079 nr = CONFIG_BLK_DEV_RAM_COUNT;
1080 - range = 1UL << (MINORBITS - part_shift);
1081 + range = 1UL << MINORBITS;
1082 }
1083
1084 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
1085 @@ -632,7 +635,7 @@ static void __exit brd_exit(void)
1086 unsigned long range;
1087 struct brd_device *brd, *next;
1088
1089 - range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
1090 + range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
1091
1092 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
1093 brd_del_one(brd);
1094 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1095 index a076a14..c59a672 100644
1096 --- a/drivers/block/loop.c
1097 +++ b/drivers/block/loop.c
1098 @@ -1658,7 +1658,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1099 struct kobject *kobj;
1100
1101 mutex_lock(&loop_devices_mutex);
1102 - lo = loop_init_one(dev & MINORMASK);
1103 + lo = loop_init_one(MINOR(dev) >> part_shift);
1104 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
1105 mutex_unlock(&loop_devices_mutex);
1106
1107 @@ -1691,15 +1691,18 @@ static int __init loop_init(void)
1108 if (max_part > 0)
1109 part_shift = fls(max_part);
1110
1111 + if ((1UL << part_shift) > DISK_MAX_PARTS)
1112 + return -EINVAL;
1113 +
1114 if (max_loop > 1UL << (MINORBITS - part_shift))
1115 return -EINVAL;
1116
1117 if (max_loop) {
1118 nr = max_loop;
1119 - range = max_loop;
1120 + range = max_loop << part_shift;
1121 } else {
1122 nr = 8;
1123 - range = 1UL << (MINORBITS - part_shift);
1124 + range = 1UL << MINORBITS;
1125 }
1126
1127 if (register_blkdev(LOOP_MAJOR, "loop"))
1128 @@ -1738,7 +1741,7 @@ static void __exit loop_exit(void)
1129 unsigned long range;
1130 struct loop_device *lo, *next;
1131
1132 - range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
1133 + range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1134
1135 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1136 loop_del_one(lo);
1137 diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
1138 index 8690e31..46b8136 100644
1139 --- a/drivers/block/paride/pcd.c
1140 +++ b/drivers/block/paride/pcd.c
1141 @@ -320,6 +320,7 @@ static void pcd_init_units(void)
1142 disk->first_minor = unit;
1143 strcpy(disk->disk_name, cd->name); /* umm... */
1144 disk->fops = &pcd_bdops;
1145 + disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1146 }
1147 }
1148
1149 diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
1150 index e427fbe..7878da8 100644
1151 --- a/drivers/cdrom/viocd.c
1152 +++ b/drivers/cdrom/viocd.c
1153 @@ -625,7 +625,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1154 blk_queue_max_hw_sectors(q, 4096 / 512);
1155 gendisk->queue = q;
1156 gendisk->fops = &viocd_fops;
1157 - gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
1158 + gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
1159 + GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1160 set_capacity(gendisk, 0);
1161 gendisk->private_data = d;
1162 d->viocd_disk = gendisk;
1163 diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
1164 index d72433f..ee01716 100644
1165 --- a/drivers/char/i8k.c
1166 +++ b/drivers/char/i8k.c
1167 @@ -139,8 +139,8 @@ static int i8k_smm(struct smm_regs *regs)
1168 "movl %%edi,20(%%rax)\n\t"
1169 "popq %%rdx\n\t"
1170 "movl %%edx,0(%%rax)\n\t"
1171 - "lahf\n\t"
1172 - "shrl $8,%%eax\n\t"
1173 + "pushfq\n\t"
1174 + "popq %%rax\n\t"
1175 "andl $1,%%eax\n"
1176 :"=a"(rc)
1177 : "a"(regs)
1178 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1179 index 2dafc5c..7c10f96 100644
1180 --- a/drivers/cpufreq/cpufreq.c
1181 +++ b/drivers/cpufreq/cpufreq.c
1182 @@ -1208,12 +1208,28 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1183 cpufreq_driver->exit(data);
1184 unlock_policy_rwsem_write(cpu);
1185
1186 + cpufreq_debug_enable_ratelimit();
1187 +
1188 +#ifdef CONFIG_HOTPLUG_CPU
1189 + /* when the CPU which is the parent of the kobj is hotplugged
1190 + * offline, check for siblings, and create cpufreq sysfs interface
1191 + * and symlinks
1192 + */
1193 + if (unlikely(cpumask_weight(data->cpus) > 1)) {
1194 + /* first sibling now owns the new sysfs dir */
1195 + cpumask_clear_cpu(cpu, data->cpus);
1196 + cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
1197 +
1198 + /* finally remove our own symlink */
1199 + lock_policy_rwsem_write(cpu);
1200 + __cpufreq_remove_dev(sys_dev);
1201 + }
1202 +#endif
1203 +
1204 free_cpumask_var(data->related_cpus);
1205 free_cpumask_var(data->cpus);
1206 kfree(data);
1207 - per_cpu(cpufreq_cpu_data, cpu) = NULL;
1208
1209 - cpufreq_debug_enable_ratelimit();
1210 return 0;
1211 }
1212
1213 diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
1214 index 00d73fc..4f1b8de 100644
1215 --- a/drivers/cpufreq/cpufreq_stats.c
1216 +++ b/drivers/cpufreq/cpufreq_stats.c
1217 @@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
1218 return -1;
1219 }
1220
1221 +/* should be called late in the CPU removal sequence so that the stats
1222 + * memory is still available in case someone tries to use it.
1223 + */
1224 static void cpufreq_stats_free_table(unsigned int cpu)
1225 {
1226 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
1227 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1228 - if (policy && policy->cpu == cpu)
1229 - sysfs_remove_group(&policy->kobj, &stats_attr_group);
1230 if (stat) {
1231 kfree(stat->time_in_state);
1232 kfree(stat);
1233 }
1234 per_cpu(cpufreq_stats_table, cpu) = NULL;
1235 +}
1236 +
1237 +/* must be called early in the CPU removal sequence (before
1238 + * cpufreq_remove_dev) so that policy is still valid.
1239 + */
1240 +static void cpufreq_stats_free_sysfs(unsigned int cpu)
1241 +{
1242 + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1243 + if (policy && policy->cpu == cpu)
1244 + sysfs_remove_group(&policy->kobj, &stats_attr_group);
1245 if (policy)
1246 cpufreq_cpu_put(policy);
1247 }
1248 @@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
1249 case CPU_ONLINE_FROZEN:
1250 cpufreq_update_policy(cpu);
1251 break;
1252 + case CPU_DOWN_PREPARE:
1253 + cpufreq_stats_free_sysfs(cpu);
1254 + break;
1255 case CPU_DEAD:
1256 case CPU_DEAD_FROZEN:
1257 cpufreq_stats_free_table(cpu);
1258 @@ -324,9 +337,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
1259 return NOTIFY_OK;
1260 }
1261
1262 +/* priority=1 so this will get called before cpufreq_remove_dev */
1263 static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
1264 {
1265 .notifier_call = cpufreq_stat_cpu_callback,
1266 + .priority = 1,
1267 };
1268
1269 static struct notifier_block notifier_policy_block = {
1270 diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
1271 index f508690..c47f3d0 100644
1272 --- a/drivers/cpuidle/governors/menu.c
1273 +++ b/drivers/cpuidle/governors/menu.c
1274 @@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
1275 unsigned int power_usage = -1;
1276 int i;
1277 int multiplier;
1278 + struct timespec t;
1279
1280 if (data->needs_update) {
1281 menu_update(dev);
1282 @@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
1283 return 0;
1284
1285 /* determine the expected residency time, round up */
1286 + t = ktime_to_timespec(tick_nohz_get_sleep_length());
1287 data->expected_us =
1288 - DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
1289 + t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
1290
1291
1292 data->bucket = which_bucket(data->expected_us);
1293 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1294 index e9e6f71..c4504a2 100644
1295 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1296 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1297 @@ -666,12 +666,37 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
1298 static bool
1299 bsd_ring_get_irq(struct intel_ring_buffer *ring)
1300 {
1301 - return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
1302 + struct drm_device *dev = ring->dev;
1303 + drm_i915_private_t *dev_priv = dev->dev_private;
1304 +
1305 + if (!dev->irq_enabled)
1306 + return false;
1307 +
1308 + spin_lock(&ring->irq_lock);
1309 + if (ring->irq_refcount++ == 0) {
1310 + if (IS_G4X(dev))
1311 + i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
1312 + else
1313 + ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
1314 + }
1315 + spin_unlock(&ring->irq_lock);
1316 +
1317 + return true;
1318 }
1319 static void
1320 bsd_ring_put_irq(struct intel_ring_buffer *ring)
1321 {
1322 - ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
1323 + struct drm_device *dev = ring->dev;
1324 + drm_i915_private_t *dev_priv = dev->dev_private;
1325 +
1326 + spin_lock(&ring->irq_lock);
1327 + if (--ring->irq_refcount == 0) {
1328 + if (IS_G4X(dev))
1329 + i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
1330 + else
1331 + ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
1332 + }
1333 + spin_unlock(&ring->irq_lock);
1334 }
1335
1336 static int
1337 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1338 index 9073e3b..296e6ec 100644
1339 --- a/drivers/gpu/drm/radeon/evergreen.c
1340 +++ b/drivers/gpu/drm/radeon/evergreen.c
1341 @@ -1578,7 +1578,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1342 u32 sq_stack_resource_mgmt_2;
1343 u32 sq_stack_resource_mgmt_3;
1344 u32 vgt_cache_invalidation;
1345 - u32 hdp_host_path_cntl;
1346 + u32 hdp_host_path_cntl, tmp;
1347 int i, j, num_shader_engines, ps_thread_count;
1348
1349 switch (rdev->family) {
1350 @@ -2141,6 +2141,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1351 for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
1352 WREG32(i, 0);
1353
1354 + tmp = RREG32(HDP_MISC_CNTL);
1355 + tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1356 + WREG32(HDP_MISC_CNTL, tmp);
1357 +
1358 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1359 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1360
1361 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
1362 index fc40e0c..f37e91e 100644
1363 --- a/drivers/gpu/drm/radeon/evergreend.h
1364 +++ b/drivers/gpu/drm/radeon/evergreend.h
1365 @@ -64,6 +64,8 @@
1366 #define GB_BACKEND_MAP 0x98FC
1367 #define DMIF_ADDR_CONFIG 0xBD4
1368 #define HDP_ADDR_CONFIG 0x2F48
1369 +#define HDP_MISC_CNTL 0x2F4C
1370 +#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
1371
1372 #define CC_SYS_RB_BACKEND_DISABLE 0x3F88
1373 #define GC_USER_RB_BACKEND_DISABLE 0x9B7C
1374 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1375 index 3d8a763..b205ba1 100644
1376 --- a/drivers/gpu/drm/radeon/ni.c
1377 +++ b/drivers/gpu/drm/radeon/ni.c
1378 @@ -417,7 +417,7 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1379 num_shader_engines = 1;
1380 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
1381 num_shader_engines = rdev->config.cayman.max_shader_engines;
1382 - if (num_backends_per_asic > num_shader_engines)
1383 + if (num_backends_per_asic < num_shader_engines)
1384 num_backends_per_asic = num_shader_engines;
1385 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
1386 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
1387 @@ -829,7 +829,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1388 rdev->config.cayman.tile_config |=
1389 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1390 rdev->config.cayman.tile_config |=
1391 - (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1392 + ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1393 rdev->config.cayman.tile_config |=
1394 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1395
1396 @@ -931,6 +931,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1397 WREG32(CB_PERF_CTR3_SEL_0, 0);
1398 WREG32(CB_PERF_CTR3_SEL_1, 0);
1399
1400 + tmp = RREG32(HDP_MISC_CNTL);
1401 + tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1402 + WREG32(HDP_MISC_CNTL, tmp);
1403 +
1404 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1405 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1406
1407 diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
1408 index 0f9a08b..b2088c1 100644
1409 --- a/drivers/gpu/drm/radeon/nid.h
1410 +++ b/drivers/gpu/drm/radeon/nid.h
1411 @@ -136,6 +136,8 @@
1412 #define HDP_NONSURFACE_INFO 0x2C08
1413 #define HDP_NONSURFACE_SIZE 0x2C0C
1414 #define HDP_ADDR_CONFIG 0x2F48
1415 +#define HDP_MISC_CNTL 0x2F4C
1416 +#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
1417
1418 #define CC_SYS_RB_BACKEND_DISABLE 0x3F88
1419 #define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
1420 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
1421 index ca57619..d948265 100644
1422 --- a/drivers/gpu/drm/radeon/radeon_asic.c
1423 +++ b/drivers/gpu/drm/radeon/radeon_asic.c
1424 @@ -782,6 +782,7 @@ static struct radeon_asic evergreen_asic = {
1425 .hpd_fini = &evergreen_hpd_fini,
1426 .hpd_sense = &evergreen_hpd_sense,
1427 .hpd_set_polarity = &evergreen_hpd_set_polarity,
1428 + .ioctl_wait_idle = r600_ioctl_wait_idle,
1429 .gui_idle = &r600_gui_idle,
1430 .pm_misc = &evergreen_pm_misc,
1431 .pm_prepare = &evergreen_pm_prepare,
1432 @@ -828,6 +829,7 @@ static struct radeon_asic sumo_asic = {
1433 .hpd_fini = &evergreen_hpd_fini,
1434 .hpd_sense = &evergreen_hpd_sense,
1435 .hpd_set_polarity = &evergreen_hpd_set_polarity,
1436 + .ioctl_wait_idle = r600_ioctl_wait_idle,
1437 .gui_idle = &r600_gui_idle,
1438 .pm_misc = &evergreen_pm_misc,
1439 .pm_prepare = &evergreen_pm_prepare,
1440 @@ -874,6 +876,7 @@ static struct radeon_asic btc_asic = {
1441 .hpd_fini = &evergreen_hpd_fini,
1442 .hpd_sense = &evergreen_hpd_sense,
1443 .hpd_set_polarity = &evergreen_hpd_set_polarity,
1444 + .ioctl_wait_idle = r600_ioctl_wait_idle,
1445 .gui_idle = &r600_gui_idle,
1446 .pm_misc = &evergreen_pm_misc,
1447 .pm_prepare = &evergreen_pm_prepare,
1448 @@ -920,6 +923,7 @@ static struct radeon_asic cayman_asic = {
1449 .hpd_fini = &evergreen_hpd_fini,
1450 .hpd_sense = &evergreen_hpd_sense,
1451 .hpd_set_polarity = &evergreen_hpd_set_polarity,
1452 + .ioctl_wait_idle = r600_ioctl_wait_idle,
1453 .gui_idle = &r600_gui_idle,
1454 .pm_misc = &evergreen_pm_misc,
1455 .pm_prepare = &evergreen_pm_prepare,
1456 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
1457 index 0ec91c1..a5eda4c 100644
1458 --- a/drivers/hid/hid-magicmouse.c
1459 +++ b/drivers/hid/hid-magicmouse.c
1460 @@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev,
1461 }
1462 report->size = 6;
1463
1464 + /*
1465 + * The device reponds with 'invalid report id' when feature
1466 + * report switching it into multitouch mode is sent to it.
1467 + *
1468 + * This results in -EIO from the _raw low-level transport callback,
1469 + * but there seems to be no other way of switching the mode.
1470 + * Thus the super-ugly hacky success check below.
1471 + */
1472 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
1473 HID_FEATURE_REPORT);
1474 - if (ret != sizeof(feature)) {
1475 + if (ret != -EIO) {
1476 hid_err(hdev, "unable to request touch data (%d)\n", ret);
1477 goto err_stop_hw;
1478 }
1479 diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
1480 index 196ffaf..7df490e 100644
1481 --- a/drivers/hwmon/pmbus_core.c
1482 +++ b/drivers/hwmon/pmbus_core.c
1483 @@ -700,6 +700,7 @@ do { \
1484 struct sensor_device_attribute *a \
1485 = &data->_type##s[data->num_##_type##s].attribute; \
1486 BUG_ON(data->num_attributes >= data->max_attributes); \
1487 + sysfs_attr_init(&a->dev_attr.attr); \
1488 a->dev_attr.attr.name = _name; \
1489 a->dev_attr.attr.mode = _mode; \
1490 a->dev_attr.show = _show; \
1491 diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
1492 index b4ab39b..5f1b92c 100644
1493 --- a/drivers/i2c/busses/i2c-tegra.c
1494 +++ b/drivers/i2c/busses/i2c-tegra.c
1495 @@ -330,6 +330,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
1496 i2c_writel(i2c_dev, 0, I2C_INT_MASK);
1497 clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
1498
1499 + if (!i2c_dev->is_dvc) {
1500 + u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
1501 + i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
1502 + }
1503 +
1504 val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
1505 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
1506 i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
1507 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
1508 index a5ec5a7..9560822 100644
1509 --- a/drivers/ide/ide-cd.c
1510 +++ b/drivers/ide/ide-cd.c
1511 @@ -1773,7 +1773,8 @@ static int ide_cd_probe(ide_drive_t *drive)
1512
1513 g->minors = 1;
1514 g->driverfs_dev = &drive->gendev;
1515 - g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
1516 + g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
1517 + GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1518 if (ide_cdrom_setup(drive)) {
1519 put_device(&info->dev);
1520 goto failed;
1521 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
1522 index 5c93627..70bd738 100644
1523 --- a/drivers/md/bitmap.c
1524 +++ b/drivers/md/bitmap.c
1525 @@ -493,11 +493,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
1526 spin_unlock_irqrestore(&bitmap->lock, flags);
1527 sb = kmap_atomic(bitmap->sb_page, KM_USER0);
1528 sb->events = cpu_to_le64(bitmap->mddev->events);
1529 - if (bitmap->mddev->events < bitmap->events_cleared) {
1530 + if (bitmap->mddev->events < bitmap->events_cleared)
1531 /* rocking back to read-only */
1532 bitmap->events_cleared = bitmap->mddev->events;
1533 - sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
1534 - }
1535 + sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
1536 + sb->state = cpu_to_le32(bitmap->flags);
1537 /* Just in case these have been changed via sysfs: */
1538 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
1539 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
1540 @@ -618,7 +618,7 @@ success:
1541 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
1542 bitmap->flags |= BITMAP_HOSTENDIAN;
1543 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
1544 - if (sb->state & cpu_to_le32(BITMAP_STALE))
1545 + if (bitmap->flags & BITMAP_STALE)
1546 bitmap->events_cleared = bitmap->mddev->events;
1547 err = 0;
1548 out:
1549 @@ -652,9 +652,11 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
1550 switch (op) {
1551 case MASK_SET:
1552 sb->state |= cpu_to_le32(bits);
1553 + bitmap->flags |= bits;
1554 break;
1555 case MASK_UNSET:
1556 sb->state &= cpu_to_le32(~bits);
1557 + bitmap->flags &= ~bits;
1558 break;
1559 default:
1560 BUG();
1561 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1562 index a550a05..aa4e570 100644
1563 --- a/drivers/md/dm-mpath.c
1564 +++ b/drivers/md/dm-mpath.c
1565 @@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
1566 if (!error && !clone->errors)
1567 return 0; /* I/O complete */
1568
1569 - if (error == -EOPNOTSUPP || error == -EREMOTEIO)
1570 + if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
1571 return error;
1572
1573 if (mpio->pgpath)
1574 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1575 index cb8380c..53e603b 100644
1576 --- a/drivers/md/dm-table.c
1577 +++ b/drivers/md/dm-table.c
1578 @@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
1579 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
1580 sector_t start, sector_t len, void *data)
1581 {
1582 + struct request_queue *q;
1583 struct queue_limits *limits = data;
1584 struct block_device *bdev = dev->bdev;
1585 sector_t dev_size =
1586 @@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
1587 limits->logical_block_size >> SECTOR_SHIFT;
1588 char b[BDEVNAME_SIZE];
1589
1590 + /*
1591 + * Some devices exist without request functions,
1592 + * such as loop devices not yet bound to backing files.
1593 + * Forbid the use of such devices.
1594 + */
1595 + q = bdev_get_queue(bdev);
1596 + if (!q || !q->make_request_fn) {
1597 + DMWARN("%s: %s is not yet initialised: "
1598 + "start=%llu, len=%llu, dev_size=%llu",
1599 + dm_device_name(ti->table->md), bdevname(bdev, b),
1600 + (unsigned long long)start,
1601 + (unsigned long long)len,
1602 + (unsigned long long)dev_size);
1603 + return 1;
1604 + }
1605 +
1606 if (!dev_size)
1607 return 0;
1608
1609 diff --git a/drivers/md/md.c b/drivers/md/md.c
1610 index 7d6f7f1..4a4c0f8 100644
1611 --- a/drivers/md/md.c
1612 +++ b/drivers/md/md.c
1613 @@ -4347,13 +4347,19 @@ static int md_alloc(dev_t dev, char *name)
1614 disk->fops = &md_fops;
1615 disk->private_data = mddev;
1616 disk->queue = mddev->queue;
1617 + blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
1618 /* Allow extended partitions. This makes the
1619 * 'mdp' device redundant, but we can't really
1620 * remove it now.
1621 */
1622 disk->flags |= GENHD_FL_EXT_DEVT;
1623 - add_disk(disk);
1624 mddev->gendisk = disk;
1625 + /* As soon as we call add_disk(), another thread could get
1626 + * through to md_open, so make sure it doesn't get too far
1627 + */
1628 + mutex_lock(&mddev->open_mutex);
1629 + add_disk(disk);
1630 +
1631 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
1632 &disk_to_dev(disk)->kobj, "%s", "md");
1633 if (error) {
1634 @@ -4367,8 +4373,7 @@ static int md_alloc(dev_t dev, char *name)
1635 if (mddev->kobj.sd &&
1636 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
1637 printk(KERN_DEBUG "pointless warning\n");
1638 -
1639 - blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
1640 + mutex_unlock(&mddev->open_mutex);
1641 abort:
1642 mutex_unlock(&disks_mutex);
1643 if (!error && mddev->kobj.sd) {
1644 diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
1645 index d4e466a..1d47d4d 100644
1646 --- a/drivers/media/dvb/frontends/dib0070.c
1647 +++ b/drivers/media/dvb/frontends/dib0070.c
1648 @@ -73,27 +73,47 @@ struct dib0070_state {
1649
1650 u8 wbd_gain_current;
1651 u16 wbd_offset_3_3[2];
1652 +
1653 + /* for the I2C transfer */
1654 + struct i2c_msg msg[2];
1655 + u8 i2c_write_buffer[3];
1656 + u8 i2c_read_buffer[2];
1657 };
1658
1659 static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
1660 {
1661 - u8 b[2];
1662 - struct i2c_msg msg[2] = {
1663 - { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
1664 - { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2 },
1665 - };
1666 - if (i2c_transfer(state->i2c, msg, 2) != 2) {
1667 + state->i2c_write_buffer[0] = reg;
1668 +
1669 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
1670 + state->msg[0].addr = state->cfg->i2c_address;
1671 + state->msg[0].flags = 0;
1672 + state->msg[0].buf = state->i2c_write_buffer;
1673 + state->msg[0].len = 1;
1674 + state->msg[1].addr = state->cfg->i2c_address;
1675 + state->msg[1].flags = I2C_M_RD;
1676 + state->msg[1].buf = state->i2c_read_buffer;
1677 + state->msg[1].len = 2;
1678 +
1679 + if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
1680 printk(KERN_WARNING "DiB0070 I2C read failed\n");
1681 return 0;
1682 }
1683 - return (b[0] << 8) | b[1];
1684 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1685 }
1686
1687 static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
1688 {
1689 - u8 b[3] = { reg, val >> 8, val & 0xff };
1690 - struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = b, .len = 3 };
1691 - if (i2c_transfer(state->i2c, &msg, 1) != 1) {
1692 + state->i2c_write_buffer[0] = reg;
1693 + state->i2c_write_buffer[1] = val >> 8;
1694 + state->i2c_write_buffer[2] = val & 0xff;
1695 +
1696 + memset(state->msg, 0, sizeof(struct i2c_msg));
1697 + state->msg[0].addr = state->cfg->i2c_address;
1698 + state->msg[0].flags = 0;
1699 + state->msg[0].buf = state->i2c_write_buffer;
1700 + state->msg[0].len = 3;
1701 +
1702 + if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
1703 printk(KERN_WARNING "DiB0070 I2C write failed\n");
1704 return -EREMOTEIO;
1705 }
1706 diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
1707 index 52ff1a2..c9c935a 100644
1708 --- a/drivers/media/dvb/frontends/dib0090.c
1709 +++ b/drivers/media/dvb/frontends/dib0090.c
1710 @@ -191,6 +191,11 @@ struct dib0090_state {
1711 u8 wbd_calibration_gain;
1712 const struct dib0090_wbd_slope *current_wbd_table;
1713 u16 wbdmux;
1714 +
1715 + /* for the I2C transfer */
1716 + struct i2c_msg msg[2];
1717 + u8 i2c_write_buffer[3];
1718 + u8 i2c_read_buffer[2];
1719 };
1720
1721 struct dib0090_fw_state {
1722 @@ -198,27 +203,48 @@ struct dib0090_fw_state {
1723 struct dvb_frontend *fe;
1724 struct dib0090_identity identity;
1725 const struct dib0090_config *config;
1726 +
1727 + /* for the I2C transfer */
1728 + struct i2c_msg msg;
1729 + u8 i2c_write_buffer[2];
1730 + u8 i2c_read_buffer[2];
1731 };
1732
1733 static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
1734 {
1735 - u8 b[2];
1736 - struct i2c_msg msg[2] = {
1737 - {.addr = state->config->i2c_address, .flags = 0, .buf = &reg, .len = 1},
1738 - {.addr = state->config->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2},
1739 - };
1740 - if (i2c_transfer(state->i2c, msg, 2) != 2) {
1741 + state->i2c_write_buffer[0] = reg;
1742 +
1743 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
1744 + state->msg[0].addr = state->config->i2c_address;
1745 + state->msg[0].flags = 0;
1746 + state->msg[0].buf = state->i2c_write_buffer;
1747 + state->msg[0].len = 1;
1748 + state->msg[1].addr = state->config->i2c_address;
1749 + state->msg[1].flags = I2C_M_RD;
1750 + state->msg[1].buf = state->i2c_read_buffer;
1751 + state->msg[1].len = 2;
1752 +
1753 + if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
1754 printk(KERN_WARNING "DiB0090 I2C read failed\n");
1755 return 0;
1756 }
1757 - return (b[0] << 8) | b[1];
1758 +
1759 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1760 }
1761
1762 static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
1763 {
1764 - u8 b[3] = { reg & 0xff, val >> 8, val & 0xff };
1765 - struct i2c_msg msg = {.addr = state->config->i2c_address, .flags = 0, .buf = b, .len = 3 };
1766 - if (i2c_transfer(state->i2c, &msg, 1) != 1) {
1767 + state->i2c_write_buffer[0] = reg & 0xff;
1768 + state->i2c_write_buffer[1] = val >> 8;
1769 + state->i2c_write_buffer[2] = val & 0xff;
1770 +
1771 + memset(state->msg, 0, sizeof(struct i2c_msg));
1772 + state->msg[0].addr = state->config->i2c_address;
1773 + state->msg[0].flags = 0;
1774 + state->msg[0].buf = state->i2c_write_buffer;
1775 + state->msg[0].len = 3;
1776 +
1777 + if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
1778 printk(KERN_WARNING "DiB0090 I2C write failed\n");
1779 return -EREMOTEIO;
1780 }
1781 @@ -227,20 +253,31 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
1782
1783 static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
1784 {
1785 - u8 b[2];
1786 - struct i2c_msg msg = {.addr = reg, .flags = I2C_M_RD, .buf = b, .len = 2 };
1787 - if (i2c_transfer(state->i2c, &msg, 1) != 1) {
1788 + state->i2c_write_buffer[0] = reg;
1789 +
1790 + memset(&state->msg, 0, sizeof(struct i2c_msg));
1791 + state->msg.addr = reg;
1792 + state->msg.flags = I2C_M_RD;
1793 + state->msg.buf = state->i2c_read_buffer;
1794 + state->msg.len = 2;
1795 + if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
1796 printk(KERN_WARNING "DiB0090 I2C read failed\n");
1797 return 0;
1798 }
1799 - return (b[0] << 8) | b[1];
1800 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1801 }
1802
1803 static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
1804 {
1805 - u8 b[2] = { val >> 8, val & 0xff };
1806 - struct i2c_msg msg = {.addr = reg, .flags = 0, .buf = b, .len = 2 };
1807 - if (i2c_transfer(state->i2c, &msg, 1) != 1) {
1808 + state->i2c_write_buffer[0] = val >> 8;
1809 + state->i2c_write_buffer[1] = val & 0xff;
1810 +
1811 + memset(&state->msg, 0, sizeof(struct i2c_msg));
1812 + state->msg.addr = reg;
1813 + state->msg.flags = 0;
1814 + state->msg.buf = state->i2c_write_buffer;
1815 + state->msg.len = 2;
1816 + if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
1817 printk(KERN_WARNING "DiB0090 I2C write failed\n");
1818 return -EREMOTEIO;
1819 }
1820 diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
1821 index 289a798..79cb1c2 100644
1822 --- a/drivers/media/dvb/frontends/dib7000m.c
1823 +++ b/drivers/media/dvb/frontends/dib7000m.c
1824 @@ -50,6 +50,11 @@ struct dib7000m_state {
1825 u16 revision;
1826
1827 u8 agc_state;
1828 +
1829 + /* for the I2C transfer */
1830 + struct i2c_msg msg[2];
1831 + u8 i2c_write_buffer[4];
1832 + u8 i2c_read_buffer[2];
1833 };
1834
1835 enum dib7000m_power_mode {
1836 @@ -64,29 +69,39 @@ enum dib7000m_power_mode {
1837
1838 static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
1839 {
1840 - u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
1841 - u8 rb[2];
1842 - struct i2c_msg msg[2] = {
1843 - { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
1844 - { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
1845 - };
1846 -
1847 - if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
1848 + state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
1849 + state->i2c_write_buffer[1] = reg & 0xff;
1850 +
1851 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
1852 + state->msg[0].addr = state->i2c_addr >> 1;
1853 + state->msg[0].flags = 0;
1854 + state->msg[0].buf = state->i2c_write_buffer;
1855 + state->msg[0].len = 2;
1856 + state->msg[1].addr = state->i2c_addr >> 1;
1857 + state->msg[1].flags = I2C_M_RD;
1858 + state->msg[1].buf = state->i2c_read_buffer;
1859 + state->msg[1].len = 2;
1860 +
1861 + if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
1862 dprintk("i2c read error on %d",reg);
1863
1864 - return (rb[0] << 8) | rb[1];
1865 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1866 }
1867
1868 static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
1869 {
1870 - u8 b[4] = {
1871 - (reg >> 8) & 0xff, reg & 0xff,
1872 - (val >> 8) & 0xff, val & 0xff,
1873 - };
1874 - struct i2c_msg msg = {
1875 - .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
1876 - };
1877 - return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
1878 + state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
1879 + state->i2c_write_buffer[1] = reg & 0xff;
1880 + state->i2c_write_buffer[2] = (val >> 8) & 0xff;
1881 + state->i2c_write_buffer[3] = val & 0xff;
1882 +
1883 + memset(&state->msg[0], 0, sizeof(struct i2c_msg));
1884 + state->msg[0].addr = state->i2c_addr >> 1;
1885 + state->msg[0].flags = 0;
1886 + state->msg[0].buf = state->i2c_write_buffer;
1887 + state->msg[0].len = 4;
1888 +
1889 + return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
1890 }
1891 static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
1892 {
1893 diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
1894 index 900af60..0c9f40c 100644
1895 --- a/drivers/media/dvb/frontends/dib7000p.c
1896 +++ b/drivers/media/dvb/frontends/dib7000p.c
1897 @@ -63,6 +63,11 @@ struct dib7000p_state {
1898
1899 u16 tuner_enable;
1900 struct i2c_adapter dib7090_tuner_adap;
1901 +
1902 + /* for the I2C transfer */
1903 + struct i2c_msg msg[2];
1904 + u8 i2c_write_buffer[4];
1905 + u8 i2c_read_buffer[2];
1906 };
1907
1908 enum dib7000p_power_mode {
1909 @@ -76,29 +81,39 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
1910
1911 static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
1912 {
1913 - u8 wb[2] = { reg >> 8, reg & 0xff };
1914 - u8 rb[2];
1915 - struct i2c_msg msg[2] = {
1916 - {.addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
1917 - {.addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
1918 - };
1919 + state->i2c_write_buffer[0] = reg >> 8;
1920 + state->i2c_write_buffer[1] = reg & 0xff;
1921 +
1922 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
1923 + state->msg[0].addr = state->i2c_addr >> 1;
1924 + state->msg[0].flags = 0;
1925 + state->msg[0].buf = state->i2c_write_buffer;
1926 + state->msg[0].len = 2;
1927 + state->msg[1].addr = state->i2c_addr >> 1;
1928 + state->msg[1].flags = I2C_M_RD;
1929 + state->msg[1].buf = state->i2c_read_buffer;
1930 + state->msg[1].len = 2;
1931
1932 - if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
1933 + if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
1934 dprintk("i2c read error on %d", reg);
1935
1936 - return (rb[0] << 8) | rb[1];
1937 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
1938 }
1939
1940 static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
1941 {
1942 - u8 b[4] = {
1943 - (reg >> 8) & 0xff, reg & 0xff,
1944 - (val >> 8) & 0xff, val & 0xff,
1945 - };
1946 - struct i2c_msg msg = {
1947 - .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
1948 - };
1949 - return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
1950 + state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
1951 + state->i2c_write_buffer[1] = reg & 0xff;
1952 + state->i2c_write_buffer[2] = (val >> 8) & 0xff;
1953 + state->i2c_write_buffer[3] = val & 0xff;
1954 +
1955 + memset(&state->msg[0], 0, sizeof(struct i2c_msg));
1956 + state->msg[0].addr = state->i2c_addr >> 1;
1957 + state->msg[0].flags = 0;
1958 + state->msg[0].buf = state->i2c_write_buffer;
1959 + state->msg[0].len = 4;
1960 +
1961 + return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
1962 }
1963
1964 static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
1965 @@ -1550,11 +1565,24 @@ static void dib7000p_release(struct dvb_frontend *demod)
1966
1967 int dib7000pc_detection(struct i2c_adapter *i2c_adap)
1968 {
1969 - u8 tx[2], rx[2];
1970 + u8 *tx, *rx;
1971 struct i2c_msg msg[2] = {
1972 - {.addr = 18 >> 1, .flags = 0, .buf = tx, .len = 2},
1973 - {.addr = 18 >> 1, .flags = I2C_M_RD, .buf = rx, .len = 2},
1974 + {.addr = 18 >> 1, .flags = 0, .len = 2},
1975 + {.addr = 18 >> 1, .flags = I2C_M_RD, .len = 2},
1976 };
1977 + int ret = 0;
1978 +
1979 + tx = kzalloc(2*sizeof(u8), GFP_KERNEL);
1980 + if (!tx)
1981 + return -ENOMEM;
1982 + rx = kzalloc(2*sizeof(u8), GFP_KERNEL);
1983 + if (!rx) {
1984 + goto rx_memory_error;
1985 + ret = -ENOMEM;
1986 + }
1987 +
1988 + msg[0].buf = tx;
1989 + msg[1].buf = rx;
1990
1991 tx[0] = 0x03;
1992 tx[1] = 0x00;
1993 @@ -1574,7 +1602,11 @@ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
1994 }
1995
1996 dprintk("-D- DiB7000PC not detected");
1997 - return 0;
1998 +
1999 + kfree(rx);
2000 +rx_memory_error:
2001 + kfree(tx);
2002 + return ret;
2003 }
2004 EXPORT_SYMBOL(dib7000pc_detection);
2005
2006 diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
2007 index c1c3e26..7d2ea11 100644
2008 --- a/drivers/media/dvb/frontends/dib8000.c
2009 +++ b/drivers/media/dvb/frontends/dib8000.c
2010 @@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
2011 struct i2c_device {
2012 struct i2c_adapter *adap;
2013 u8 addr;
2014 + u8 *i2c_write_buffer;
2015 + u8 *i2c_read_buffer;
2016 };
2017
2018 struct dib8000_state {
2019 @@ -70,6 +72,11 @@ struct dib8000_state {
2020 u32 status;
2021
2022 struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
2023 +
2024 + /* for the I2C transfer */
2025 + struct i2c_msg msg[2];
2026 + u8 i2c_write_buffer[4];
2027 + u8 i2c_read_buffer[2];
2028 };
2029
2030 enum dib8000_power_mode {
2031 @@ -79,22 +86,41 @@ enum dib8000_power_mode {
2032
2033 static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
2034 {
2035 - u8 wb[2] = { reg >> 8, reg & 0xff };
2036 - u8 rb[2];
2037 struct i2c_msg msg[2] = {
2038 - {.addr = i2c->addr >> 1,.flags = 0,.buf = wb,.len = 2},
2039 - {.addr = i2c->addr >> 1,.flags = I2C_M_RD,.buf = rb,.len = 2},
2040 + {.addr = i2c->addr >> 1, .flags = 0,
2041 + .buf = i2c->i2c_write_buffer, .len = 2},
2042 + {.addr = i2c->addr >> 1, .flags = I2C_M_RD,
2043 + .buf = i2c->i2c_read_buffer, .len = 2},
2044 };
2045
2046 + msg[0].buf[0] = reg >> 8;
2047 + msg[0].buf[1] = reg & 0xff;
2048 +
2049 if (i2c_transfer(i2c->adap, msg, 2) != 2)
2050 dprintk("i2c read error on %d", reg);
2051
2052 - return (rb[0] << 8) | rb[1];
2053 + return (msg[1].buf[0] << 8) | msg[1].buf[1];
2054 }
2055
2056 static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
2057 {
2058 - return dib8000_i2c_read16(&state->i2c, reg);
2059 + state->i2c_write_buffer[0] = reg >> 8;
2060 + state->i2c_write_buffer[1] = reg & 0xff;
2061 +
2062 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
2063 + state->msg[0].addr = state->i2c.addr >> 1;
2064 + state->msg[0].flags = 0;
2065 + state->msg[0].buf = state->i2c_write_buffer;
2066 + state->msg[0].len = 2;
2067 + state->msg[1].addr = state->i2c.addr >> 1;
2068 + state->msg[1].flags = I2C_M_RD;
2069 + state->msg[1].buf = state->i2c_read_buffer;
2070 + state->msg[1].len = 2;
2071 +
2072 + if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
2073 + dprintk("i2c read error on %d", reg);
2074 +
2075 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2076 }
2077
2078 static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
2079 @@ -109,19 +135,34 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
2080
2081 static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
2082 {
2083 - u8 b[4] = {
2084 - (reg >> 8) & 0xff, reg & 0xff,
2085 - (val >> 8) & 0xff, val & 0xff,
2086 - };
2087 - struct i2c_msg msg = {
2088 - .addr = i2c->addr >> 1,.flags = 0,.buf = b,.len = 4
2089 - };
2090 - return i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
2091 + struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0,
2092 + .buf = i2c->i2c_write_buffer, .len = 4};
2093 + int ret = 0;
2094 +
2095 + msg.buf[0] = (reg >> 8) & 0xff;
2096 + msg.buf[1] = reg & 0xff;
2097 + msg.buf[2] = (val >> 8) & 0xff;
2098 + msg.buf[3] = val & 0xff;
2099 +
2100 + ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
2101 +
2102 + return ret;
2103 }
2104
2105 static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
2106 {
2107 - return dib8000_i2c_write16(&state->i2c, reg, val);
2108 + state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2109 + state->i2c_write_buffer[1] = reg & 0xff;
2110 + state->i2c_write_buffer[2] = (val >> 8) & 0xff;
2111 + state->i2c_write_buffer[3] = val & 0xff;
2112 +
2113 + memset(&state->msg[0], 0, sizeof(struct i2c_msg));
2114 + state->msg[0].addr = state->i2c.addr >> 1;
2115 + state->msg[0].flags = 0;
2116 + state->msg[0].buf = state->i2c_write_buffer;
2117 + state->msg[0].len = 4;
2118 +
2119 + return i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
2120 }
2121
2122 static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
2123 @@ -980,30 +1021,31 @@ static void dib8000_update_timf(struct dib8000_state *state)
2124 dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
2125 }
2126
2127 +static const u16 adc_target_16dB[11] = {
2128 + (1 << 13) - 825 - 117,
2129 + (1 << 13) - 837 - 117,
2130 + (1 << 13) - 811 - 117,
2131 + (1 << 13) - 766 - 117,
2132 + (1 << 13) - 737 - 117,
2133 + (1 << 13) - 693 - 117,
2134 + (1 << 13) - 648 - 117,
2135 + (1 << 13) - 619 - 117,
2136 + (1 << 13) - 575 - 117,
2137 + (1 << 13) - 531 - 117,
2138 + (1 << 13) - 501 - 117
2139 +};
2140 +static const u8 permu_seg[] = { 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12 };
2141 +
2142 static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosearching)
2143 {
2144 u16 mode, max_constellation, seg_diff_mask = 0, nbseg_diff = 0;
2145 u8 guard, crate, constellation, timeI;
2146 - u8 permu_seg[] = { 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12 };
2147 u16 i, coeff[4], P_cfr_left_edge = 0, P_cfr_right_edge = 0, seg_mask13 = 0x1fff; // All 13 segments enabled
2148 const s16 *ncoeff = NULL, *ana_fe;
2149 u16 tmcc_pow = 0;
2150 u16 coff_pow = 0x2800;
2151 u16 init_prbs = 0xfff;
2152 u16 ana_gain = 0;
2153 - u16 adc_target_16dB[11] = {
2154 - (1 << 13) - 825 - 117,
2155 - (1 << 13) - 837 - 117,
2156 - (1 << 13) - 811 - 117,
2157 - (1 << 13) - 766 - 117,
2158 - (1 << 13) - 737 - 117,
2159 - (1 << 13) - 693 - 117,
2160 - (1 << 13) - 648 - 117,
2161 - (1 << 13) - 619 - 117,
2162 - (1 << 13) - 575 - 117,
2163 - (1 << 13) - 531 - 117,
2164 - (1 << 13) - 501 - 117
2165 - };
2166
2167 if (state->ber_monitored_layer != LAYER_ALL)
2168 dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & 0x60) | state->ber_monitored_layer);
2169 @@ -2379,10 +2421,22 @@ EXPORT_SYMBOL(dib8000_get_slave_frontend);
2170
2171 int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
2172 {
2173 - int k = 0;
2174 + int k = 0, ret = 0;
2175 u8 new_addr = 0;
2176 struct i2c_device client = {.adap = host };
2177
2178 + client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
2179 + if (!client.i2c_write_buffer) {
2180 + dprintk("%s: not enough memory", __func__);
2181 + return -ENOMEM;
2182 + }
2183 + client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
2184 + if (!client.i2c_read_buffer) {
2185 + dprintk("%s: not enough memory", __func__);
2186 + ret = -ENOMEM;
2187 + goto error_memory;
2188 + }
2189 +
2190 for (k = no_of_demods - 1; k >= 0; k--) {
2191 /* designated i2c address */
2192 new_addr = first_addr + (k << 1);
2193 @@ -2394,7 +2448,8 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
2194 client.addr = default_addr;
2195 if (dib8000_identify(&client) == 0) {
2196 dprintk("#%d: not identified", k);
2197 - return -EINVAL;
2198 + ret = -EINVAL;
2199 + goto error;
2200 }
2201 }
2202
2203 @@ -2420,7 +2475,12 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
2204 dib8000_i2c_write16(&client, 1286, 0);
2205 }
2206
2207 - return 0;
2208 +error:
2209 + kfree(client.i2c_read_buffer);
2210 +error_memory:
2211 + kfree(client.i2c_write_buffer);
2212 +
2213 + return ret;
2214 }
2215
2216 EXPORT_SYMBOL(dib8000_i2c_enumeration);
2217 @@ -2519,6 +2579,8 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
2218 memcpy(&state->cfg, cfg, sizeof(struct dib8000_config));
2219 state->i2c.adap = i2c_adap;
2220 state->i2c.addr = i2c_addr;
2221 + state->i2c.i2c_write_buffer = state->i2c_write_buffer;
2222 + state->i2c.i2c_read_buffer = state->i2c_read_buffer;
2223 state->gpio_val = cfg->gpio_val;
2224 state->gpio_dir = cfg->gpio_dir;
2225
2226 diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
2227 index 9151876..451ffa2 100644
2228 --- a/drivers/media/dvb/frontends/dib9000.c
2229 +++ b/drivers/media/dvb/frontends/dib9000.c
2230 @@ -27,6 +27,8 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
2231 struct i2c_device {
2232 struct i2c_adapter *i2c_adap;
2233 u8 i2c_addr;
2234 + u8 *i2c_read_buffer;
2235 + u8 *i2c_write_buffer;
2236 };
2237
2238 /* lock */
2239 @@ -92,11 +94,16 @@ struct dib9000_state {
2240
2241 struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
2242 u16 component_bus_speed;
2243 +
2244 + /* for the I2C transfer */
2245 + struct i2c_msg msg[2];
2246 + u8 i2c_write_buffer[255];
2247 + u8 i2c_read_buffer[255];
2248 };
2249
2250 -u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2251 +static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2252 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2253 - 0, 0, 0
2254 + 0, 0, 0, 0, 0, 0, 0, 0
2255 };
2256
2257 enum dib9000_power_mode {
2258 @@ -217,25 +224,33 @@ static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32
2259 u32 chunk_size = 126;
2260 u32 l;
2261 int ret;
2262 - u8 wb[2] = { reg >> 8, reg & 0xff };
2263 - struct i2c_msg msg[2] = {
2264 - {.addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
2265 - {.addr = state->i2c.i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = len},
2266 - };
2267
2268 if (state->platform.risc.fw_is_running && (reg < 1024))
2269 return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len);
2270
2271 + memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
2272 + state->msg[0].addr = state->i2c.i2c_addr >> 1;
2273 + state->msg[0].flags = 0;
2274 + state->msg[0].buf = state->i2c_write_buffer;
2275 + state->msg[0].len = 2;
2276 + state->msg[1].addr = state->i2c.i2c_addr >> 1;
2277 + state->msg[1].flags = I2C_M_RD;
2278 + state->msg[1].buf = b;
2279 + state->msg[1].len = len;
2280 +
2281 + state->i2c_write_buffer[0] = reg >> 8;
2282 + state->i2c_write_buffer[1] = reg & 0xff;
2283 +
2284 if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
2285 - wb[0] |= (1 << 5);
2286 + state->i2c_write_buffer[0] |= (1 << 5);
2287 if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
2288 - wb[0] |= (1 << 4);
2289 + state->i2c_write_buffer[0] |= (1 << 4);
2290
2291 do {
2292 l = len < chunk_size ? len : chunk_size;
2293 - msg[1].len = l;
2294 - msg[1].buf = b;
2295 - ret = i2c_transfer(state->i2c.i2c_adap, msg, 2) != 2 ? -EREMOTEIO : 0;
2296 + state->msg[1].len = l;
2297 + state->msg[1].buf = b;
2298 + ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
2299 if (ret != 0) {
2300 dprintk("i2c read error on %d", reg);
2301 return -EREMOTEIO;
2302 @@ -253,50 +268,47 @@ static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32
2303
2304 static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
2305 {
2306 - u8 b[2];
2307 - u8 wb[2] = { reg >> 8, reg & 0xff };
2308 struct i2c_msg msg[2] = {
2309 - {.addr = i2c->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
2310 - {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = 2},
2311 + {.addr = i2c->i2c_addr >> 1, .flags = 0,
2312 + .buf = i2c->i2c_write_buffer, .len = 2},
2313 + {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD,
2314 + .buf = i2c->i2c_read_buffer, .len = 2},
2315 };
2316
2317 + i2c->i2c_write_buffer[0] = reg >> 8;
2318 + i2c->i2c_write_buffer[1] = reg & 0xff;
2319 +
2320 if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
2321 dprintk("read register %x error", reg);
2322 return 0;
2323 }
2324
2325 - return (b[0] << 8) | b[1];
2326 + return (i2c->i2c_read_buffer[0] << 8) | i2c->i2c_read_buffer[1];
2327 }
2328
2329 static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg)
2330 {
2331 - u8 b[2];
2332 - if (dib9000_read16_attr(state, reg, b, 2, 0) != 0)
2333 + if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, 0) != 0)
2334 return 0;
2335 - return (b[0] << 8 | b[1]);
2336 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2337 }
2338
2339 static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute)
2340 {
2341 - u8 b[2];
2342 - if (dib9000_read16_attr(state, reg, b, 2, attribute) != 0)
2343 + if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2,
2344 + attribute) != 0)
2345 return 0;
2346 - return (b[0] << 8 | b[1]);
2347 + return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
2348 }
2349
2350 #define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
2351
2352 static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
2353 {
2354 - u8 b[255];
2355 u32 chunk_size = 126;
2356 u32 l;
2357 int ret;
2358
2359 - struct i2c_msg msg = {
2360 - .addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = b, .len = len + 2
2361 - };
2362 -
2363 if (state->platform.risc.fw_is_running && (reg < 1024)) {
2364 if (dib9000_risc_apb_access_write
2365 (state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0)
2366 @@ -304,20 +316,26 @@ static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *
2367 return 0;
2368 }
2369
2370 - b[0] = (reg >> 8) & 0xff;
2371 - b[1] = (reg) & 0xff;
2372 + memset(&state->msg[0], 0, sizeof(struct i2c_msg));
2373 + state->msg[0].addr = state->i2c.i2c_addr >> 1;
2374 + state->msg[0].flags = 0;
2375 + state->msg[0].buf = state->i2c_write_buffer;
2376 + state->msg[0].len = len + 2;
2377 +
2378 + state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2379 + state->i2c_write_buffer[1] = (reg) & 0xff;
2380
2381 if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
2382 - b[0] |= (1 << 5);
2383 + state->i2c_write_buffer[0] |= (1 << 5);
2384 if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
2385 - b[0] |= (1 << 4);
2386 + state->i2c_write_buffer[0] |= (1 << 4);
2387
2388 do {
2389 l = len < chunk_size ? len : chunk_size;
2390 - msg.len = l + 2;
2391 - memcpy(&b[2], buf, l);
2392 + state->msg[0].len = l + 2;
2393 + memcpy(&state->i2c_write_buffer[2], buf, l);
2394
2395 - ret = i2c_transfer(state->i2c.i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
2396 + ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
2397
2398 buf += l;
2399 len -= l;
2400 @@ -331,11 +349,16 @@ static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *
2401
2402 static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
2403 {
2404 - u8 b[4] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff };
2405 struct i2c_msg msg = {
2406 - .addr = i2c->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
2407 + .addr = i2c->i2c_addr >> 1, .flags = 0,
2408 + .buf = i2c->i2c_write_buffer, .len = 4
2409 };
2410
2411 + i2c->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2412 + i2c->i2c_write_buffer[1] = reg & 0xff;
2413 + i2c->i2c_write_buffer[2] = (val >> 8) & 0xff;
2414 + i2c->i2c_write_buffer[3] = val & 0xff;
2415 +
2416 return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
2417 }
2418
2419 @@ -1015,8 +1038,8 @@ static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i)
2420 return 0;
2421 dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i);
2422 do {
2423 - dib9000_risc_mem_read(state, FE_MM_RW_SYNC, &i, 1);
2424 - } while (i && index_loop--);
2425 + dib9000_risc_mem_read(state, FE_MM_RW_SYNC, state->i2c_read_buffer, 1);
2426 + } while (state->i2c_read_buffer[0] && index_loop--);
2427
2428 if (index_loop > 0)
2429 return 0;
2430 @@ -1139,7 +1162,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2431
2432 s8 intlv_native;
2433 };
2434 - struct dibDVBTChannel ch;
2435 + struct dibDVBTChannel *ch;
2436 int ret = 0;
2437
2438 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
2439 @@ -1148,9 +1171,12 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2440 ret = -EIO;
2441 }
2442
2443 - dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION, (u8 *) &ch, sizeof(struct dibDVBTChannel));
2444 + dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION,
2445 + state->i2c_read_buffer, sizeof(struct dibDVBTChannel));
2446 + ch = (struct dibDVBTChannel *)state->i2c_read_buffer;
2447 +
2448
2449 - switch (ch.spectrum_inversion & 0x7) {
2450 + switch (ch->spectrum_inversion & 0x7) {
2451 case 1:
2452 state->fe[0]->dtv_property_cache.inversion = INVERSION_ON;
2453 break;
2454 @@ -1162,7 +1188,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2455 state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO;
2456 break;
2457 }
2458 - switch (ch.nfft) {
2459 + switch (ch->nfft) {
2460 case 0:
2461 state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
2462 break;
2463 @@ -1177,7 +1203,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2464 state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
2465 break;
2466 }
2467 - switch (ch.guard) {
2468 + switch (ch->guard) {
2469 case 0:
2470 state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
2471 break;
2472 @@ -1195,7 +1221,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2473 state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
2474 break;
2475 }
2476 - switch (ch.constellation) {
2477 + switch (ch->constellation) {
2478 case 2:
2479 state->fe[0]->dtv_property_cache.modulation = QAM_64;
2480 break;
2481 @@ -1210,7 +1236,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2482 state->fe[0]->dtv_property_cache.modulation = QAM_AUTO;
2483 break;
2484 }
2485 - switch (ch.hrch) {
2486 + switch (ch->hrch) {
2487 case 0:
2488 state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE;
2489 break;
2490 @@ -1222,7 +1248,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2491 state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO;
2492 break;
2493 }
2494 - switch (ch.code_rate_hp) {
2495 + switch (ch->code_rate_hp) {
2496 case 1:
2497 state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2;
2498 break;
2499 @@ -1243,7 +1269,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
2500 state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO;
2501 break;
2502 }
2503 - switch (ch.code_rate_lp) {
2504 + switch (ch->code_rate_lp) {
2505 case 1:
2506 state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2;
2507 break;
2508 @@ -1439,9 +1465,10 @@ static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_paramete
2509 break;
2510 case CT_DEMOD_STEP_1:
2511 if (search)
2512 - dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, (u8 *) &i, 1);
2513 + dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, state->i2c_read_buffer, 1);
2514 else
2515 - dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, (u8 *) &i, 1);
2516 + dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, state->i2c_read_buffer, 1);
2517 + i = (s8)state->i2c_read_buffer[0];
2518 switch (i) { /* something happened */
2519 case 0:
2520 break;
2521 @@ -2038,14 +2065,17 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
2522 static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
2523 {
2524 struct dib9000_state *state = fe->demodulator_priv;
2525 - u16 c[16];
2526 + u16 *c;
2527
2528 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
2529 if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
2530 return -EIO;
2531 - dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
2532 + dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
2533 + state->i2c_read_buffer, 16 * 2);
2534 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
2535
2536 + c = (u16 *)state->i2c_read_buffer;
2537 +
2538 *ber = c[10] << 16 | c[11];
2539 return 0;
2540 }
2541 @@ -2054,7 +2084,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
2542 {
2543 struct dib9000_state *state = fe->demodulator_priv;
2544 u8 index_frontend;
2545 - u16 c[16];
2546 + u16 *c = (u16 *)state->i2c_read_buffer;
2547 u16 val;
2548
2549 *strength = 0;
2550 @@ -2069,7 +2099,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
2551 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
2552 if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
2553 return -EIO;
2554 - dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
2555 + dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
2556 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
2557
2558 val = 65535 - c[4];
2559 @@ -2083,14 +2113,14 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
2560 static u32 dib9000_get_snr(struct dvb_frontend *fe)
2561 {
2562 struct dib9000_state *state = fe->demodulator_priv;
2563 - u16 c[16];
2564 + u16 *c = (u16 *)state->i2c_read_buffer;
2565 u32 n, s, exp;
2566 u16 val;
2567
2568 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
2569 if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
2570 return -EIO;
2571 - dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
2572 + dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
2573 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
2574
2575 val = c[7];
2576 @@ -2137,12 +2167,12 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
2577 static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
2578 {
2579 struct dib9000_state *state = fe->demodulator_priv;
2580 - u16 c[16];
2581 + u16 *c = (u16 *)state->i2c_read_buffer;
2582
2583 DibAcquireLock(&state->platform.risc.mem_mbx_lock);
2584 if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
2585 return -EIO;
2586 - dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
2587 + dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
2588 DibReleaseLock(&state->platform.risc.mem_mbx_lock);
2589
2590 *unc = c[12];
2591 @@ -2151,10 +2181,22 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
2592
2593 int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
2594 {
2595 - int k = 0;
2596 + int k = 0, ret = 0;
2597 u8 new_addr = 0;
2598 struct i2c_device client = {.i2c_adap = i2c };
2599
2600 + client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
2601 + if (!client.i2c_write_buffer) {
2602 + dprintk("%s: not enough memory", __func__);
2603 + return -ENOMEM;
2604 + }
2605 + client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
2606 + if (!client.i2c_read_buffer) {
2607 + dprintk("%s: not enough memory", __func__);
2608 + ret = -ENOMEM;
2609 + goto error_memory;
2610 + }
2611 +
2612 client.i2c_addr = default_addr + 16;
2613 dib9000_i2c_write16(&client, 1796, 0x0);
2614
2615 @@ -2178,7 +2220,8 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
2616 client.i2c_addr = default_addr;
2617 if (dib9000_identify(&client) == 0) {
2618 dprintk("DiB9000 #%d: not identified", k);
2619 - return -EIO;
2620 + ret = -EIO;
2621 + goto error;
2622 }
2623 }
2624
2625 @@ -2196,7 +2239,12 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
2626 dib9000_i2c_write16(&client, 1795, 0);
2627 }
2628
2629 - return 0;
2630 +error:
2631 + kfree(client.i2c_read_buffer);
2632 +error_memory:
2633 + kfree(client.i2c_write_buffer);
2634 +
2635 + return ret;
2636 }
2637 EXPORT_SYMBOL(dib9000_i2c_enumeration);
2638
2639 @@ -2261,6 +2309,8 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
2640 memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config));
2641 st->i2c.i2c_adap = i2c_adap;
2642 st->i2c.i2c_addr = i2c_addr;
2643 + st->i2c.i2c_write_buffer = st->i2c_write_buffer;
2644 + st->i2c.i2c_read_buffer = st->i2c_read_buffer;
2645
2646 st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS;
2647 st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
2648 diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
2649 index f6938f9..dc5d17a 100644
2650 --- a/drivers/media/dvb/frontends/dibx000_common.c
2651 +++ b/drivers/media/dvb/frontends/dibx000_common.c
2652 @@ -10,30 +10,39 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
2653
2654 static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
2655 {
2656 - u8 b[4] = {
2657 - (reg >> 8) & 0xff, reg & 0xff,
2658 - (val >> 8) & 0xff, val & 0xff,
2659 - };
2660 - struct i2c_msg msg = {
2661 - .addr = mst->i2c_addr,.flags = 0,.buf = b,.len = 4
2662 - };
2663 -
2664 - return i2c_transfer(mst->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
2665 + mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
2666 + mst->i2c_write_buffer[1] = reg & 0xff;
2667 + mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
2668 + mst->i2c_write_buffer[3] = val & 0xff;
2669 +
2670 + memset(mst->msg, 0, sizeof(struct i2c_msg));
2671 + mst->msg[0].addr = mst->i2c_addr;
2672 + mst->msg[0].flags = 0;
2673 + mst->msg[0].buf = mst->i2c_write_buffer;
2674 + mst->msg[0].len = 4;
2675 +
2676 + return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
2677 }
2678
2679 static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
2680 {
2681 - u8 wb[2] = { reg >> 8, reg & 0xff };
2682 - u8 rb[2];
2683 - struct i2c_msg msg[2] = {
2684 - {.addr = mst->i2c_addr, .flags = 0, .buf = wb, .len = 2},
2685 - {.addr = mst->i2c_addr, .flags = I2C_M_RD, .buf = rb, .len = 2},
2686 - };
2687 -
2688 - if (i2c_transfer(mst->i2c_adap, msg, 2) != 2)
2689 + mst->i2c_write_buffer[0] = reg >> 8;
2690 + mst->i2c_write_buffer[1] = reg & 0xff;
2691 +
2692 + memset(mst->msg, 0, 2 * sizeof(struct i2c_msg));
2693 + mst->msg[0].addr = mst->i2c_addr;
2694 + mst->msg[0].flags = 0;
2695 + mst->msg[0].buf = mst->i2c_write_buffer;
2696 + mst->msg[0].len = 2;
2697 + mst->msg[1].addr = mst->i2c_addr;
2698 + mst->msg[1].flags = I2C_M_RD;
2699 + mst->msg[1].buf = mst->i2c_read_buffer;
2700 + mst->msg[1].len = 2;
2701 +
2702 + if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
2703 dprintk("i2c read error on %d", reg);
2704
2705 - return (rb[0] << 8) | rb[1];
2706 + return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
2707 }
2708
2709 static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
2710 @@ -248,26 +257,32 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
2711 struct i2c_msg msg[], int num)
2712 {
2713 struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
2714 - struct i2c_msg m[2 + num];
2715 - u8 tx_open[4], tx_close[4];
2716
2717 - memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
2718 + if (num > 32) {
2719 + dprintk("%s: too much I2C message to be transmitted (%i).\
2720 + Maximum is 32", __func__, num);
2721 + return -ENOMEM;
2722 + }
2723 +
2724 + memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
2725
2726 dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
2727
2728 - dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
2729 - m[0].addr = mst->i2c_addr;
2730 - m[0].buf = tx_open;
2731 - m[0].len = 4;
2732 + /* open the gate */
2733 + dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
2734 + mst->msg[0].addr = mst->i2c_addr;
2735 + mst->msg[0].buf = &mst->i2c_write_buffer[0];
2736 + mst->msg[0].len = 4;
2737
2738 - memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
2739 + memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num);
2740
2741 - dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
2742 - m[num + 1].addr = mst->i2c_addr;
2743 - m[num + 1].buf = tx_close;
2744 - m[num + 1].len = 4;
2745 + /* close the gate */
2746 + dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0);
2747 + mst->msg[num + 1].addr = mst->i2c_addr;
2748 + mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
2749 + mst->msg[num + 1].len = 4;
2750
2751 - return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
2752 + return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
2753 }
2754
2755 static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
2756 @@ -279,26 +294,32 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
2757 struct i2c_msg msg[], int num)
2758 {
2759 struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
2760 - struct i2c_msg m[2 + num];
2761 - u8 tx_open[4], tx_close[4];
2762
2763 - memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
2764 + if (num > 32) {
2765 + dprintk("%s: too much I2C message to be transmitted (%i).\
2766 + Maximum is 32", __func__, num);
2767 + return -ENOMEM;
2768 + }
2769 +
2770 + memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
2771
2772 dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
2773
2774 - dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
2775 - m[0].addr = mst->i2c_addr;
2776 - m[0].buf = tx_open;
2777 - m[0].len = 4;
2778 + /* open the gate */
2779 + dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
2780 + mst->msg[0].addr = mst->i2c_addr;
2781 + mst->msg[0].buf = &mst->i2c_write_buffer[0];
2782 + mst->msg[0].len = 4;
2783
2784 - memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
2785 + memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num);
2786
2787 - dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
2788 - m[num + 1].addr = mst->i2c_addr;
2789 - m[num + 1].buf = tx_close;
2790 - m[num + 1].len = 4;
2791 + /* close the gate */
2792 + dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0);
2793 + mst->msg[num + 1].addr = mst->i2c_addr;
2794 + mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
2795 + mst->msg[num + 1].len = 4;
2796
2797 - return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
2798 + return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
2799 }
2800
2801 static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
2802 diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
2803 index 977d343..f031165 100644
2804 --- a/drivers/media/dvb/frontends/dibx000_common.h
2805 +++ b/drivers/media/dvb/frontends/dibx000_common.h
2806 @@ -28,6 +28,11 @@ struct dibx000_i2c_master {
2807 u8 i2c_addr;
2808
2809 u16 base_reg;
2810 +
2811 + /* for the I2C transfer */
2812 + struct i2c_msg msg[34];
2813 + u8 i2c_write_buffer[8];
2814 + u8 i2c_read_buffer[2];
2815 };
2816
2817 extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
2818 diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
2819 index bca307e..f637d34 100644
2820 --- a/drivers/media/video/cx88/cx88-blackbird.c
2821 +++ b/drivers/media/video/cx88/cx88-blackbird.c
2822 @@ -1122,7 +1122,6 @@ static int mpeg_release(struct file *file)
2823 mutex_lock(&dev->core->lock);
2824 file->private_data = NULL;
2825 kfree(fh);
2826 - mutex_unlock(&dev->core->lock);
2827
2828 /* Make sure we release the hardware */
2829 drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
2830 @@ -1131,6 +1130,8 @@ static int mpeg_release(struct file *file)
2831
2832 atomic_dec(&dev->core->mpeg_users);
2833
2834 + mutex_unlock(&dev->core->lock);
2835 +
2836 return 0;
2837 }
2838
2839 @@ -1334,11 +1335,9 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
2840 blackbird_register_video(dev);
2841
2842 /* initial device configuration: needed ? */
2843 - mutex_lock(&dev->core->lock);
2844 // init_controls(core);
2845 cx88_set_tvnorm(core,core->tvnorm);
2846 cx88_video_mux(core,0);
2847 - mutex_unlock(&dev->core->lock);
2848
2849 return 0;
2850
2851 diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
2852 index 7b8c9d3..c69df7e 100644
2853 --- a/drivers/media/video/cx88/cx88-dvb.c
2854 +++ b/drivers/media/video/cx88/cx88-dvb.c
2855 @@ -133,6 +133,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
2856 return -EINVAL;
2857 }
2858
2859 + mutex_lock(&dev->core->lock);
2860 drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
2861 if (drv) {
2862 if (acquire){
2863 @@ -143,6 +144,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
2864 dev->frontends.active_fe_id = 0;
2865 }
2866 }
2867 + mutex_unlock(&dev->core->lock);
2868
2869 return ret;
2870 }
2871 diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
2872 index addf954..497f26f 100644
2873 --- a/drivers/media/video/cx88/cx88-mpeg.c
2874 +++ b/drivers/media/video/cx88/cx88-mpeg.c
2875 @@ -624,13 +624,11 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
2876
2877 if (drv->advise_acquire)
2878 {
2879 - mutex_lock(&drv->core->lock);
2880 core->active_ref++;
2881 if (core->active_type_id == CX88_BOARD_NONE) {
2882 core->active_type_id = drv->type_id;
2883 drv->advise_acquire(drv);
2884 }
2885 - mutex_unlock(&drv->core->lock);
2886
2887 mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
2888 }
2889 @@ -643,14 +641,12 @@ static int cx8802_request_release(struct cx8802_driver *drv)
2890 {
2891 struct cx88_core *core = drv->core;
2892
2893 - mutex_lock(&drv->core->lock);
2894 if (drv->advise_release && --core->active_ref == 0)
2895 {
2896 drv->advise_release(drv);
2897 core->active_type_id = CX88_BOARD_NONE;
2898 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
2899 }
2900 - mutex_unlock(&drv->core->lock);
2901
2902 return 0;
2903 }
2904 @@ -713,18 +709,17 @@ int cx8802_register_driver(struct cx8802_driver *drv)
2905 drv->request_release = cx8802_request_release;
2906 memcpy(driver, drv, sizeof(*driver));
2907
2908 + mutex_lock(&drv->core->lock);
2909 err = drv->probe(driver);
2910 if (err == 0) {
2911 i++;
2912 - mutex_lock(&drv->core->lock);
2913 list_add_tail(&driver->drvlist, &dev->drvlist);
2914 - mutex_unlock(&drv->core->lock);
2915 } else {
2916 printk(KERN_ERR
2917 "%s/2: cx8802 probe failed, err = %d\n",
2918 dev->core->name, err);
2919 }
2920 -
2921 + mutex_unlock(&drv->core->lock);
2922 }
2923
2924 return i ? 0 : -ENODEV;
2925 @@ -748,6 +743,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
2926 dev->pci->subsystem_device, dev->core->board.name,
2927 dev->core->boardnr);
2928
2929 + mutex_lock(&dev->core->lock);
2930 +
2931 list_for_each_entry_safe(d, dtmp, &dev->drvlist, drvlist) {
2932 /* only unregister the correct driver type */
2933 if (d->type_id != drv->type_id)
2934 @@ -755,15 +752,14 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
2935
2936 err = d->remove(d);
2937 if (err == 0) {
2938 - mutex_lock(&drv->core->lock);
2939 list_del(&d->drvlist);
2940 - mutex_unlock(&drv->core->lock);
2941 kfree(d);
2942 } else
2943 printk(KERN_ERR "%s/2: cx8802 driver remove "
2944 "failed (%d)\n", dev->core->name, err);
2945 }
2946
2947 + mutex_unlock(&dev->core->lock);
2948 }
2949
2950 return err;
2951 @@ -827,6 +823,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
2952
2953 flush_request_modules(dev);
2954
2955 + mutex_lock(&dev->core->lock);
2956 +
2957 if (!list_empty(&dev->drvlist)) {
2958 struct cx8802_driver *drv, *tmp;
2959 int err;
2960 @@ -838,9 +836,7 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
2961 list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
2962 err = drv->remove(drv);
2963 if (err == 0) {
2964 - mutex_lock(&drv->core->lock);
2965 list_del(&drv->drvlist);
2966 - mutex_unlock(&drv->core->lock);
2967 } else
2968 printk(KERN_ERR "%s/2: cx8802 driver remove "
2969 "failed (%d)\n", dev->core->name, err);
2970 @@ -848,6 +844,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
2971 }
2972 }
2973
2974 + mutex_unlock(&dev->core->lock);
2975 +
2976 /* Destroy any 8802 reference. */
2977 dev->core->dvbdev = NULL;
2978
2979 diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
2980 index 9b3742a..3d32f4a 100644
2981 --- a/drivers/media/video/cx88/cx88.h
2982 +++ b/drivers/media/video/cx88/cx88.h
2983 @@ -505,6 +505,8 @@ struct cx8802_driver {
2984 int (*suspend)(struct pci_dev *pci_dev, pm_message_t state);
2985 int (*resume)(struct pci_dev *pci_dev);
2986
2987 + /* Callers to the following functions must hold core->lock */
2988 +
2989 /* MPEG 8802 -> mini driver - Driver probe and configuration */
2990 int (*probe)(struct cx8802_driver *drv);
2991 int (*remove)(struct cx8802_driver *drv);
2992 @@ -561,8 +563,9 @@ struct cx8802_dev {
2993 /* for switching modulation types */
2994 unsigned char ts_gen_cntrl;
2995
2996 - /* List of attached drivers */
2997 + /* List of attached drivers; must hold core->lock to access */
2998 struct list_head drvlist;
2999 +
3000 struct work_struct request_module_wk;
3001 };
3002
3003 @@ -685,6 +688,8 @@ int cx88_audio_thread(void *data);
3004
3005 int cx8802_register_driver(struct cx8802_driver *drv);
3006 int cx8802_unregister_driver(struct cx8802_driver *drv);
3007 +
3008 +/* Caller must hold core->lock */
3009 struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
3010
3011 /* ----------------------------------------------------------- */
3012 diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
3013 index 3ab9ffa..55c5d47 100644
3014 --- a/drivers/mfd/omap-usb-host.c
3015 +++ b/drivers/mfd/omap-usb-host.c
3016 @@ -994,22 +994,33 @@ static void usbhs_disable(struct device *dev)
3017 dev_dbg(dev, "operation timed out\n");
3018 }
3019
3020 - if (pdata->ehci_data->phy_reset) {
3021 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
3022 - gpio_free(pdata->ehci_data->reset_gpio_port[0]);
3023 -
3024 - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
3025 - gpio_free(pdata->ehci_data->reset_gpio_port[1]);
3026 + if (is_omap_usbhs_rev2(omap)) {
3027 + if (is_ehci_tll_mode(pdata->port_mode[0]))
3028 + clk_enable(omap->usbtll_p1_fck);
3029 + if (is_ehci_tll_mode(pdata->port_mode[1]))
3030 + clk_enable(omap->usbtll_p2_fck);
3031 + clk_disable(omap->utmi_p2_fck);
3032 + clk_disable(omap->utmi_p1_fck);
3033 }
3034
3035 - clk_disable(omap->utmi_p2_fck);
3036 - clk_disable(omap->utmi_p1_fck);
3037 clk_disable(omap->usbtll_ick);
3038 clk_disable(omap->usbtll_fck);
3039 clk_disable(omap->usbhost_fs_fck);
3040 clk_disable(omap->usbhost_hs_fck);
3041 clk_disable(omap->usbhost_ick);
3042
3043 + /* The gpio_free migh sleep; so unlock the spinlock */
3044 + spin_unlock_irqrestore(&omap->lock, flags);
3045 +
3046 + if (pdata->ehci_data->phy_reset) {
3047 + if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
3048 + gpio_free(pdata->ehci_data->reset_gpio_port[0]);
3049 +
3050 + if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
3051 + gpio_free(pdata->ehci_data->reset_gpio_port[1]);
3052 + }
3053 + return;
3054 +
3055 end_disble:
3056 spin_unlock_irqrestore(&omap->lock, flags);
3057 }
3058 diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
3059 index 5060e60..e601672 100644
3060 --- a/drivers/mtd/mtdconcat.c
3061 +++ b/drivers/mtd/mtdconcat.c
3062 @@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
3063 if (!(mtd->flags & MTD_WRITEABLE))
3064 return -EROFS;
3065
3066 - ops->retlen = 0;
3067 + ops->retlen = ops->oobretlen = 0;
3068
3069 for (i = 0; i < concat->num_subdev; i++) {
3070 struct mtd_info *subdev = concat->subdev[i];
3071 @@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
3072 devops.len = subdev->size - to;
3073
3074 err = subdev->write_oob(subdev, to, &devops);
3075 - ops->retlen += devops.retlen;
3076 + ops->retlen += devops.oobretlen;
3077 if (err)
3078 return err;
3079
3080 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3081 index c54a4cb..d1345fc 100644
3082 --- a/drivers/mtd/nand/nand_base.c
3083 +++ b/drivers/mtd/nand/nand_base.c
3084 @@ -3112,6 +3112,8 @@ ident_done:
3085 chip->chip_shift += 32 - 1;
3086 }
3087
3088 + chip->badblockbits = 8;
3089 +
3090 /* Set the bad block position */
3091 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
3092 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
3093 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
3094 index da9a351..2c8040f 100644
3095 --- a/drivers/mtd/nand/omap2.c
3096 +++ b/drivers/mtd/nand/omap2.c
3097 @@ -263,11 +263,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
3098 if (ret) {
3099 /* PFPW engine is busy, use cpu copy method */
3100 if (info->nand.options & NAND_BUSWIDTH_16)
3101 - omap_read_buf16(mtd, buf, len);
3102 + omap_read_buf16(mtd, (u_char *)p, len);
3103 else
3104 - omap_read_buf8(mtd, buf, len);
3105 + omap_read_buf8(mtd, (u_char *)p, len);
3106 } else {
3107 - p = (u32 *) buf;
3108 do {
3109 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
3110 r_count = r_count >> 2;
3111 @@ -293,7 +292,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
3112 struct omap_nand_info, mtd);
3113 uint32_t w_count = 0;
3114 int i = 0, ret = 0;
3115 - u16 *p;
3116 + u16 *p = (u16 *)buf;
3117 unsigned long tim, limit;
3118
3119 /* take care of subpage writes */
3120 @@ -309,11 +308,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
3121 if (ret) {
3122 /* PFPW engine is busy, use cpu copy method */
3123 if (info->nand.options & NAND_BUSWIDTH_16)
3124 - omap_write_buf16(mtd, buf, len);
3125 + omap_write_buf16(mtd, (u_char *)p, len);
3126 else
3127 - omap_write_buf8(mtd, buf, len);
3128 + omap_write_buf8(mtd, (u_char *)p, len);
3129 } else {
3130 - p = (u16 *) buf;
3131 while (len) {
3132 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
3133 w_count = w_count >> 1;
3134 diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
3135 index ba71582..a20bfef 100644
3136 --- a/drivers/net/bonding/bond_alb.c
3137 +++ b/drivers/net/bonding/bond_alb.c
3138 @@ -163,8 +163,6 @@ static int tlb_initialize(struct bonding *bond)
3139 struct tlb_client_info *new_hashtbl;
3140 int i;
3141
3142 - spin_lock_init(&(bond_info->tx_hashtbl_lock));
3143 -
3144 new_hashtbl = kzalloc(size, GFP_KERNEL);
3145 if (!new_hashtbl) {
3146 pr_err("%s: Error: Failed to allocate TLB hash table\n",
3147 @@ -764,8 +762,6 @@ static int rlb_initialize(struct bonding *bond)
3148 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
3149 int i;
3150
3151 - spin_lock_init(&(bond_info->rx_hashtbl_lock));
3152 -
3153 new_hashtbl = kmalloc(size, GFP_KERNEL);
3154 if (!new_hashtbl) {
3155 pr_err("%s: Error: Failed to allocate RLB hash table\n",
3156 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
3157 index 16d6fe9..ffb0fde 100644
3158 --- a/drivers/net/bonding/bond_main.c
3159 +++ b/drivers/net/bonding/bond_main.c
3160 @@ -1535,12 +1535,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
3161 bond_dev->name, slave_dev->name);
3162 }
3163
3164 - /* bond must be initialized by bond_open() before enslaving */
3165 - if (!(bond_dev->flags & IFF_UP)) {
3166 - pr_warning("%s: master_dev is not up in bond_enslave\n",
3167 - bond_dev->name);
3168 - }
3169 -
3170 /* already enslaved */
3171 if (slave_dev->flags & IFF_SLAVE) {
3172 pr_debug("Error, Device was already enslaved\n");
3173 @@ -4975,9 +4969,19 @@ static int bond_init(struct net_device *bond_dev)
3174 {
3175 struct bonding *bond = netdev_priv(bond_dev);
3176 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3177 + struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
3178
3179 pr_debug("Begin bond_init for %s\n", bond_dev->name);
3180
3181 + /*
3182 + * Initialize locks that may be required during
3183 + * en/deslave operations. All of the bond_open work
3184 + * (of which this is part) should really be moved to
3185 + * a phase prior to dev_open
3186 + */
3187 + spin_lock_init(&(bond_info->tx_hashtbl_lock));
3188 + spin_lock_init(&(bond_info->rx_hashtbl_lock));
3189 +
3190 bond->wq = create_singlethread_workqueue(bond_dev->name);
3191 if (!bond->wq)
3192 return -ENOMEM;
3193 diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
3194 index de87aea..8a2717e 100644
3195 --- a/drivers/net/bonding/bond_sysfs.c
3196 +++ b/drivers/net/bonding/bond_sysfs.c
3197 @@ -227,12 +227,6 @@ static ssize_t bonding_store_slaves(struct device *d,
3198 struct net_device *dev;
3199 struct bonding *bond = to_bond(d);
3200
3201 - /* Quick sanity check -- is the bond interface up? */
3202 - if (!(bond->dev->flags & IFF_UP)) {
3203 - pr_warning("%s: doing slave updates when interface is down.\n",
3204 - bond->dev->name);
3205 - }
3206 -
3207 if (!rtnl_trylock())
3208 return restart_syscall();
3209
3210 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
3211 index 78e34e9..6d357d6 100644
3212 --- a/drivers/net/macvlan.c
3213 +++ b/drivers/net/macvlan.c
3214 @@ -598,8 +598,8 @@ static int macvlan_port_create(struct net_device *dev)
3215 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
3216 if (err)
3217 kfree(port);
3218 -
3219 - dev->priv_flags |= IFF_MACVLAN_PORT;
3220 + else
3221 + dev->priv_flags |= IFF_MACVLAN_PORT;
3222 return err;
3223 }
3224
3225 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3226 index 6eadf97..37af3f4 100644
3227 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3228 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3229 @@ -652,7 +652,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
3230 .regDmn = { LE16(0), LE16(0x1f) },
3231 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
3232 .opCapFlags = {
3233 - .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
3234 + .opFlags = AR5416_OPFLAGS_11A,
3235 .eepMisc = 0,
3236 },
3237 .rfSilent = 0,
3238 @@ -922,7 +922,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
3239 .db_stage2 = {3, 3, 3}, /* 3 chain */
3240 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
3241 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
3242 - .xpaBiasLvl = 0,
3243 + .xpaBiasLvl = 0xf,
3244 .txFrameToDataStart = 0x0e,
3245 .txFrameToPaOn = 0x0e,
3246 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
3247 @@ -3994,6 +3994,16 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3248 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
3249 );
3250
3251 + /* Write the power for duplicated frames - HT40 */
3252 +
3253 + /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
3254 + REG_WRITE(ah, 0xa3e0,
3255 + POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
3256 + POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
3257 + POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
3258 + POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
3259 + );
3260 +
3261 /* Write the HT20 power per rate set */
3262
3263 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
3264 diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
3265 index 8649581..fe3c10e 100644
3266 --- a/drivers/net/wireless/ath/ath9k/calib.c
3267 +++ b/drivers/net/wireless/ath/ath9k/calib.c
3268 @@ -69,15 +69,21 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
3269 int16_t *nfarray)
3270 {
3271 struct ath_common *common = ath9k_hw_common(ah);
3272 + struct ieee80211_conf *conf = &common->hw->conf;
3273 struct ath_nf_limits *limit;
3274 struct ath9k_nfcal_hist *h;
3275 bool high_nf_mid = false;
3276 + u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
3277 int i;
3278
3279 h = cal->nfCalHist;
3280 limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
3281
3282 for (i = 0; i < NUM_NF_READINGS; i++) {
3283 + if (!(chainmask & (1 << i)) ||
3284 + ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
3285 + continue;
3286 +
3287 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
3288
3289 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
3290 @@ -225,6 +231,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
3291 int32_t val;
3292 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
3293 struct ath_common *common = ath9k_hw_common(ah);
3294 + struct ieee80211_conf *conf = &common->hw->conf;
3295 s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
3296
3297 if (ah->caldata)
3298 @@ -234,6 +241,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
3299 if (chainmask & (1 << i)) {
3300 s16 nfval;
3301
3302 + if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
3303 + continue;
3304 +
3305 if (h)
3306 nfval = h[i].privNF;
3307 else
3308 @@ -293,6 +303,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
3309 ENABLE_REGWRITE_BUFFER(ah);
3310 for (i = 0; i < NUM_NF_READINGS; i++) {
3311 if (chainmask & (1 << i)) {
3312 + if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
3313 + continue;
3314 +
3315 val = REG_READ(ah, ah->nf_regs[i]);
3316 val &= 0xFFFFFE00;
3317 val |= (((u32) (-50) << 1) & 0x1ff);
3318 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
3319 index bafbe57..1755729 100644
3320 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
3321 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
3322 @@ -1783,6 +1783,15 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3323
3324 mutex_lock(&priv->mutex);
3325
3326 + if (!ctx->vif || !iwl_is_ready_rf(priv)) {
3327 + /*
3328 + * Huh? But wait ... this can maybe happen when
3329 + * we're in the middle of a firmware restart!
3330 + */
3331 + err = -EBUSY;
3332 + goto out;
3333 + }
3334 +
3335 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
3336
3337 if (!(interface_modes & BIT(newtype))) {
3338 @@ -1810,6 +1819,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3339 /* success */
3340 iwl_teardown_interface(priv, vif, true);
3341 vif->type = newtype;
3342 + vif->p2p = newp2p;
3343 err = iwl_setup_interface(priv, ctx);
3344 WARN_ON(err);
3345 /*
3346 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
3347 index 68b953f..c0a4cfb 100644
3348 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
3349 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
3350 @@ -1658,21 +1658,24 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
3351 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
3352 if (priv->valid_contexts & BIT(ctx->ctxid))
3353
3354 -static inline int iwl_is_associated(struct iwl_priv *priv,
3355 - enum iwl_rxon_context_id ctxid)
3356 +static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
3357 {
3358 - return (priv->contexts[ctxid].active.filter_flags &
3359 - RXON_FILTER_ASSOC_MSK) ? 1 : 0;
3360 + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
3361 }
3362
3363 -static inline int iwl_is_any_associated(struct iwl_priv *priv)
3364 +static inline int iwl_is_associated(struct iwl_priv *priv,
3365 + enum iwl_rxon_context_id ctxid)
3366 {
3367 - return iwl_is_associated(priv, IWL_RXON_CTX_BSS);
3368 + return iwl_is_associated_ctx(&priv->contexts[ctxid]);
3369 }
3370
3371 -static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
3372 +static inline int iwl_is_any_associated(struct iwl_priv *priv)
3373 {
3374 - return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
3375 + struct iwl_rxon_context *ctx;
3376 + for_each_context(priv, ctx)
3377 + if (iwl_is_associated_ctx(ctx))
3378 + return true;
3379 + return false;
3380 }
3381
3382 static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
3383 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
3384 index e183587..a8f3bc7 100644
3385 --- a/drivers/net/wireless/p54/p54usb.c
3386 +++ b/drivers/net/wireless/p54/p54usb.c
3387 @@ -82,6 +82,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
3388 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
3389 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
3390 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
3391 + {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
3392 {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
3393 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
3394 {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
3395 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3396 index 5129ed6..4b2bbe8 100644
3397 --- a/drivers/pci/quirks.c
3398 +++ b/drivers/pci/quirks.c
3399 @@ -2784,6 +2784,16 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3400 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3401 #endif
3402
3403 +static void __devinit fixup_ti816x_class(struct pci_dev* dev)
3404 +{
3405 + /* TI 816x devices do not have class code set when in PCIe boot mode */
3406 + if (dev->class == PCI_CLASS_NOT_DEFINED) {
3407 + dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
3408 + dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
3409 + }
3410 +}
3411 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
3412 +
3413 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
3414 struct pci_fixup *end)
3415 {
3416 diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
3417 index f0b8951..a8a2b6b 100644
3418 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c
3419 +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
3420 @@ -1274,6 +1274,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
3421 iscsi_init.dummy_buffer_addr_hi =
3422 (u32) ((u64) hba->dummy_buf_dma >> 32);
3423
3424 + hba->num_ccell = hba->max_sqes >> 1;
3425 hba->ctx_ccell_tasks =
3426 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
3427 iscsi_init.num_ccells_per_conn = hba->num_ccell;
3428 diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
3429 index 1d24a28..6adbdc3 100644
3430 --- a/drivers/scsi/bnx2i/bnx2i_init.c
3431 +++ b/drivers/scsi/bnx2i/bnx2i_init.c
3432 @@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
3433 wait_event_interruptible_timeout(hba->eh_wait,
3434 (list_empty(&hba->ep_ofld_list) &&
3435 list_empty(&hba->ep_destroy_list)),
3436 - 10 * HZ);
3437 + 2 * HZ);
3438 /* Wait for all endpoints to be torn down, Chip will be reset once
3439 * control returns to network driver. So it is required to cleanup and
3440 * release all connection resources before returning from this routine.
3441 diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
3442 index 1809f9c..51a970f 100644
3443 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
3444 +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
3445 @@ -858,7 +858,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
3446 mutex_init(&hba->net_dev_lock);
3447 init_waitqueue_head(&hba->eh_wait);
3448 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
3449 - hba->hba_shutdown_tmo = 20 * HZ;
3450 + hba->hba_shutdown_tmo = 30 * HZ;
3451 hba->conn_teardown_tmo = 20 * HZ;
3452 hba->conn_ctx_destroy_tmo = 6 * HZ;
3453 } else { /* 5706/5708/5709 */
3454 @@ -1208,6 +1208,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
3455 struct bnx2i_cmd *cmd = task->dd_data;
3456 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
3457
3458 + if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
3459 + return -ENOMEM;
3460 +
3461 /*
3462 * If there is no scsi_cmnd this must be a mgmt task
3463 */
3464 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
3465 index d2064a0..9aab26a 100644
3466 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
3467 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
3468 @@ -113,6 +113,7 @@ struct sense_info {
3469 };
3470
3471
3472 +#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
3473 #define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
3474
3475 /**
3476 @@ -121,6 +122,7 @@ struct sense_info {
3477 * @work: work object (ioc->fault_reset_work_q)
3478 * @cancel_pending_work: flag set during reset handling
3479 * @ioc: per adapter object
3480 + * @device_handle: device handle
3481 * @VF_ID: virtual function id
3482 * @VP_ID: virtual port id
3483 * @ignore: flag meaning this event has been marked to ignore
3484 @@ -134,6 +136,7 @@ struct fw_event_work {
3485 u8 cancel_pending_work;
3486 struct delayed_work delayed_work;
3487 struct MPT2SAS_ADAPTER *ioc;
3488 + u16 device_handle;
3489 u8 VF_ID;
3490 u8 VP_ID;
3491 u8 ignore;
3492 @@ -3708,17 +3711,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3493 #endif
3494
3495 /**
3496 - * _scsih_smart_predicted_fault - illuminate Fault LED
3497 + * _scsih_turn_on_fault_led - illuminate Fault LED
3498 * @ioc: per adapter object
3499 * @handle: device handle
3500 + * Context: process
3501 *
3502 * Return nothing.
3503 */
3504 static void
3505 -_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3506 +_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3507 {
3508 Mpi2SepReply_t mpi_reply;
3509 Mpi2SepRequest_t mpi_request;
3510 +
3511 + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
3512 + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
3513 + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
3514 + mpi_request.SlotStatus =
3515 + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
3516 + mpi_request.DevHandle = cpu_to_le16(handle);
3517 + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
3518 + if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
3519 + &mpi_request)) != 0) {
3520 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
3521 + __FILE__, __LINE__, __func__);
3522 + return;
3523 + }
3524 +
3525 + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
3526 + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
3527 + "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
3528 + le16_to_cpu(mpi_reply.IOCStatus),
3529 + le32_to_cpu(mpi_reply.IOCLogInfo)));
3530 + return;
3531 + }
3532 +}
3533 +
3534 +/**
3535 + * _scsih_send_event_to_turn_on_fault_led - fire delayed event
3536 + * @ioc: per adapter object
3537 + * @handle: device handle
3538 + * Context: interrupt.
3539 + *
3540 + * Return nothing.
3541 + */
3542 +static void
3543 +_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3544 +{
3545 + struct fw_event_work *fw_event;
3546 +
3547 + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
3548 + if (!fw_event)
3549 + return;
3550 + fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
3551 + fw_event->device_handle = handle;
3552 + fw_event->ioc = ioc;
3553 + _scsih_fw_event_add(ioc, fw_event);
3554 +}
3555 +
3556 +/**
3557 + * _scsih_smart_predicted_fault - process smart errors
3558 + * @ioc: per adapter object
3559 + * @handle: device handle
3560 + * Context: interrupt.
3561 + *
3562 + * Return nothing.
3563 + */
3564 +static void
3565 +_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3566 +{
3567 struct scsi_target *starget;
3568 struct MPT2SAS_TARGET *sas_target_priv_data;
3569 Mpi2EventNotificationReply_t *event_reply;
3570 @@ -3745,30 +3806,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3571 starget_printk(KERN_WARNING, starget, "predicted fault\n");
3572 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3573
3574 - if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
3575 - memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
3576 - mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
3577 - mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
3578 - mpi_request.SlotStatus =
3579 - cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
3580 - mpi_request.DevHandle = cpu_to_le16(handle);
3581 - mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
3582 - if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
3583 - &mpi_request)) != 0) {
3584 - printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3585 - ioc->name, __FILE__, __LINE__, __func__);
3586 - return;
3587 - }
3588 -
3589 - if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
3590 - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
3591 - "enclosure_processor: ioc_status (0x%04x), "
3592 - "loginfo(0x%08x)\n", ioc->name,
3593 - le16_to_cpu(mpi_reply.IOCStatus),
3594 - le32_to_cpu(mpi_reply.IOCLogInfo)));
3595 - return;
3596 - }
3597 - }
3598 + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
3599 + _scsih_send_event_to_turn_on_fault_led(ioc, handle);
3600
3601 /* insert into event log */
3602 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
3603 @@ -6330,6 +6369,9 @@ _firmware_event_work(struct work_struct *work)
3604 }
3605
3606 switch (fw_event->event) {
3607 + case MPT2SAS_TURN_ON_FAULT_LED:
3608 + _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
3609 + break;
3610 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3611 _scsih_sas_topology_change_event(ioc, fw_event);
3612 break;
3613 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
3614 index d3e58d7..c52a0a2 100644
3615 --- a/drivers/scsi/qla2xxx/qla_attr.c
3616 +++ b/drivers/scsi/qla2xxx/qla_attr.c
3617 @@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
3618
3619 scsi_remove_host(vha->host);
3620
3621 + /* Allow timer to run to drain queued items, when removing vp */
3622 + qla24xx_deallocate_vp_id(vha);
3623 +
3624 if (vha->timer_active) {
3625 qla2x00_vp_stop_timer(vha);
3626 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
3627 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
3628 }
3629
3630 - qla24xx_deallocate_vp_id(vha);
3631 -
3632 /* No pending activities shall be there on the vha now */
3633 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
3634 * the net we have placed below */
3635 diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
3636 index f5ba09c..5223c1d 100644
3637 --- a/drivers/scsi/qla2xxx/qla_fw.h
3638 +++ b/drivers/scsi/qla2xxx/qla_fw.h
3639 @@ -416,8 +416,7 @@ struct cmd_type_6 {
3640 uint8_t vp_index;
3641
3642 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
3643 - uint16_t fcp_data_dseg_len; /* Data segment length. */
3644 - uint16_t reserved_1; /* MUST be set to 0. */
3645 + uint32_t fcp_data_dseg_len; /* Data segment length. */
3646 };
3647
3648 #define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
3649 diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
3650 index 455fe13..eb31213 100644
3651 --- a/drivers/scsi/qla2xxx/qla_nx.c
3652 +++ b/drivers/scsi/qla2xxx/qla_nx.c
3653 @@ -2548,11 +2548,11 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
3654 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3655 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
3656 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
3657 - cmd_pkt->fcp_data_dseg_len = dsd_list_len;
3658 + *dsd_seg++ = cpu_to_le32(dsd_list_len);
3659 } else {
3660 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
3661 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
3662 - *cur_dsd++ = dsd_list_len;
3663 + *cur_dsd++ = cpu_to_le32(dsd_list_len);
3664 }
3665 cur_dsd = (uint32_t *)next_dsd;
3666 while (avail_dsds) {
3667 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3668 index aa77475..4c3f5e8 100644
3669 --- a/drivers/scsi/qla2xxx/qla_os.c
3670 +++ b/drivers/scsi/qla2xxx/qla_os.c
3671 @@ -2360,21 +2360,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
3672 base_vha = pci_get_drvdata(pdev);
3673 ha = base_vha->hw;
3674
3675 - spin_lock_irqsave(&ha->vport_slock, flags);
3676 - list_for_each_entry(vha, &ha->vp_list, list) {
3677 - atomic_inc(&vha->vref_count);
3678 + mutex_lock(&ha->vport_lock);
3679 + while (ha->cur_vport_count) {
3680 + struct Scsi_Host *scsi_host;
3681
3682 - if (vha->fc_vport) {
3683 - spin_unlock_irqrestore(&ha->vport_slock, flags);
3684 + spin_lock_irqsave(&ha->vport_slock, flags);
3685
3686 - fc_vport_terminate(vha->fc_vport);
3687 + BUG_ON(base_vha->list.next == &ha->vp_list);
3688 + /* This assumes first entry in ha->vp_list is always base vha */
3689 + vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
3690 + scsi_host = scsi_host_get(vha->host);
3691
3692 - spin_lock_irqsave(&ha->vport_slock, flags);
3693 - }
3694 + spin_unlock_irqrestore(&ha->vport_slock, flags);
3695 + mutex_unlock(&ha->vport_lock);
3696 +
3697 + fc_vport_terminate(vha->fc_vport);
3698 + scsi_host_put(vha->host);
3699
3700 - atomic_dec(&vha->vref_count);
3701 + mutex_lock(&ha->vport_lock);
3702 }
3703 - spin_unlock_irqrestore(&ha->vport_slock, flags);
3704 + mutex_unlock(&ha->vport_lock);
3705
3706 set_bit(UNLOADING, &base_vha->dpc_flags);
3707
3708 @@ -3604,7 +3609,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3709 if (!pci_channel_offline(ha->pdev))
3710 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3711
3712 - if (IS_QLA82XX(ha)) {
3713 + /* Make sure qla82xx_watchdog is run only for physical port */
3714 + if (!vha->vp_idx && IS_QLA82XX(ha)) {
3715 if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
3716 start_dpc++;
3717 qla82xx_watchdog(vha);
3718 @@ -3675,8 +3681,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3719 atomic_read(&vha->loop_down_timer)));
3720 }
3721
3722 - /* Check if beacon LED needs to be blinked */
3723 - if (ha->beacon_blink_led == 1) {
3724 + /* Check if beacon LED needs to be blinked for physical host only */
3725 + if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3726 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3727 start_dpc++;
3728 }
3729 diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
3730 index 95019c7..4778e27 100644
3731 --- a/drivers/scsi/sr.c
3732 +++ b/drivers/scsi/sr.c
3733 @@ -636,7 +636,7 @@ static int sr_probe(struct device *dev)
3734 disk->first_minor = minor;
3735 sprintf(disk->disk_name, "sr%d", minor);
3736 disk->fops = &sr_bdops;
3737 - disk->flags = GENHD_FL_CD;
3738 + disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
3739 disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
3740
3741 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
3742 diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
3743 index 9f4b58b..7e22b73 100644
3744 --- a/drivers/scsi/ultrastor.c
3745 +++ b/drivers/scsi/ultrastor.c
3746 @@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
3747 "0: bsfw %1,%w0\n\t"
3748 "btr %0,%1\n\t"
3749 "jnc 0b"
3750 - : "=&r" (rv), "=m" (*field) :);
3751 + : "=&r" (rv), "+m" (*field) :);
3752
3753 return rv;
3754 }
3755 diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
3756 index 6172335..82dd6fb 100644
3757 --- a/drivers/sh/clk/cpg.c
3758 +++ b/drivers/sh/clk/cpg.c
3759 @@ -105,7 +105,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
3760
3761 /* Rebuild the frequency table */
3762 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
3763 - table, &clk->arch_flags);
3764 + table, NULL);
3765
3766 return 0;
3767 }
3768 diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c b/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
3769 index f008659..f7bff4e 100644
3770 --- a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
3771 +++ b/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
3772 @@ -1123,21 +1123,12 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
3773 ini->txretry[index] = 0;
3774
3775 /* ampdu_ack_len: number of acked aggregated frames */
3776 - /* ampdu_ack_map: block ack bit map for the aggregation */
3777 /* ampdu_len: number of aggregated frames */
3778 rate_status(wlc, tx_info, txs, mcs);
3779 tx_info->flags |= IEEE80211_TX_STAT_ACK;
3780 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
3781 -
3782 - /* XXX TODO: Make these accurate. */
3783 tx_info->status.ampdu_ack_len =
3784 - (txs->
3785 - status & TX_STATUS_FRM_RTX_MASK) >>
3786 - TX_STATUS_FRM_RTX_SHIFT;
3787 - tx_info->status.ampdu_len =
3788 - (txs->
3789 - status & TX_STATUS_FRM_RTX_MASK) >>
3790 - TX_STATUS_FRM_RTX_SHIFT;
3791 + tx_info->status.ampdu_len = 1;
3792
3793 skb_pull(p, D11_PHY_HDR_LEN);
3794 skb_pull(p, D11_TXH_LEN);
3795 @@ -1163,6 +1154,8 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
3796 /* Retry timeout */
3797 ini->tx_in_transit--;
3798 ieee80211_tx_info_clear_status(tx_info);
3799 + tx_info->status.ampdu_ack_len = 0;
3800 + tx_info->status.ampdu_len = 1;
3801 tx_info->flags |=
3802 IEEE80211_TX_STAT_AMPDU_NO_BACK;
3803 skb_pull(p, D11_PHY_HDR_LEN);
3804 diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
3805 index 8b1451d..8486eb1 100644
3806 --- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
3807 +++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
3808 @@ -68,7 +68,10 @@ static u8 do_join(struct _adapter *padapter)
3809 pmlmepriv->fw_state |= _FW_UNDER_LINKING;
3810 pmlmepriv->pscanned = plist;
3811 pmlmepriv->to_join = true;
3812 - if (_queue_empty(queue) == true) {
3813 +
3814 + /* adhoc mode will start with an empty queue, but skip checking */
3815 + if (!check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) &&
3816 + _queue_empty(queue)) {
3817 if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
3818 pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
3819 /* when set_ssid/set_bssid for do_join(), but scanning queue
3820 diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
3821 index 7b1fe45..37b650b 100644
3822 --- a/drivers/staging/usbip/usbip_common.c
3823 +++ b/drivers/staging/usbip/usbip_common.c
3824 @@ -604,7 +604,7 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
3825 be32_to_cpus(&pdu->status);
3826 be32_to_cpus(&pdu->actual_length);
3827 be32_to_cpus(&pdu->start_frame);
3828 - cpu_to_be32s(&pdu->number_of_packets);
3829 + be32_to_cpus(&pdu->number_of_packets);
3830 be32_to_cpus(&pdu->error_count);
3831 }
3832 }
3833 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
3834 index d25e208..fc10ed4 100644
3835 --- a/drivers/target/target_core_device.c
3836 +++ b/drivers/target/target_core_device.c
3837 @@ -150,13 +150,13 @@ out:
3838
3839 {
3840 struct se_device *dev = se_lun->lun_se_dev;
3841 - spin_lock(&dev->stats_lock);
3842 + spin_lock_irq(&dev->stats_lock);
3843 dev->num_cmds++;
3844 if (se_cmd->data_direction == DMA_TO_DEVICE)
3845 dev->write_bytes += se_cmd->data_length;
3846 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
3847 dev->read_bytes += se_cmd->data_length;
3848 - spin_unlock(&dev->stats_lock);
3849 + spin_unlock_irq(&dev->stats_lock);
3850 }
3851
3852 /*
3853 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
3854 index 4a10983..59b8b9c 100644
3855 --- a/drivers/target/target_core_tmr.c
3856 +++ b/drivers/target/target_core_tmr.c
3857 @@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
3858 {
3859 struct se_tmr_req *tmr;
3860
3861 - tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
3862 + tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
3863 + GFP_ATOMIC : GFP_KERNEL);
3864 if (!(tmr)) {
3865 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
3866 return ERR_PTR(-ENOMEM);
3867 @@ -398,9 +399,9 @@ int core_tmr_lun_reset(
3868 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
3869 }
3870
3871 - spin_lock(&dev->stats_lock);
3872 + spin_lock_irq(&dev->stats_lock);
3873 dev->num_resets++;
3874 - spin_unlock(&dev->stats_lock);
3875 + spin_unlock_irq(&dev->stats_lock);
3876
3877 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
3878 (preempt_and_abort_list) ? "Preempt" : "TMR",
3879 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3880 index 9583b23..beaf8fa 100644
3881 --- a/drivers/target/target_core_transport.c
3882 +++ b/drivers/target/target_core_transport.c
3883 @@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
3884 transport_all_task_dev_remove_state(cmd);
3885 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3886
3887 - transport_free_dev_tasks(cmd);
3888
3889 check_lun:
3890 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
3891 @@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
3892 break;
3893
3894 list_del(&task->t_execute_list);
3895 + atomic_set(&task->task_execute_queue, 0);
3896 atomic_dec(&dev->execute_tasks);
3897
3898 return task;
3899 @@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
3900 {
3901 unsigned long flags;
3902
3903 + if (atomic_read(&task->task_execute_queue) == 0) {
3904 + dump_stack();
3905 + return;
3906 + }
3907 +
3908 spin_lock_irqsave(&dev->execute_task_lock, flags);
3909 list_del(&task->t_execute_list);
3910 + atomic_set(&task->task_execute_queue, 0);
3911 atomic_dec(&dev->execute_tasks);
3912 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
3913 }
3914 @@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
3915 }
3916 EXPORT_SYMBOL(transport_generic_handle_tmr);
3917
3918 +void transport_generic_free_cmd_intr(
3919 + struct se_cmd *cmd)
3920 +{
3921 + transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
3922 +}
3923 +EXPORT_SYMBOL(transport_generic_free_cmd_intr);
3924 +
3925 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
3926 {
3927 struct se_task *task, *task_tmp;
3928 @@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
3929 sg_end_cur->page_link &= ~0x02;
3930
3931 sg_chain(sg_head, task_sg_num, sg_head_cur);
3932 - sg_count += (task->task_sg_num + 1);
3933 - } else
3934 sg_count += task->task_sg_num;
3935 + task_sg_num = (task->task_sg_num + 1);
3936 + } else {
3937 + sg_chain(sg_head, task_sg_num, sg_head_cur);
3938 + sg_count += task->task_sg_num;
3939 + task_sg_num = task->task_sg_num;
3940 + }
3941
3942 sg_head = sg_head_cur;
3943 sg_link = sg_link_cur;
3944 - task_sg_num = task->task_sg_num;
3945 continue;
3946 }
3947 sg_head = sg_first = &task->task_sg[0];
3948 sg_link = &task->task_sg[task->task_sg_num];
3949 - task_sg_num = task->task_sg_num;
3950 /*
3951 * Check for single task..
3952 */
3953 @@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
3954 */
3955 sg_end = &task->task_sg[task->task_sg_num - 1];
3956 sg_end->page_link &= ~0x02;
3957 - sg_count += (task->task_sg_num + 1);
3958 - } else
3959 sg_count += task->task_sg_num;
3960 + task_sg_num = (task->task_sg_num + 1);
3961 + } else {
3962 + sg_count += task->task_sg_num;
3963 + task_sg_num = task->task_sg_num;
3964 + }
3965 }
3966 /*
3967 * Setup the starting pointer and total t_tasks_sg_linked_no including
3968 @@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
3969 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
3970 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
3971
3972 - DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
3973 - " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
3974 + DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
3975 + " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
3976 T_TASK(cmd)->t_tasks_sg_chained_no);
3977
3978 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
3979 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
3980
3981 - DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
3982 - sg, sg_page(sg), sg->length, sg->offset);
3983 + DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
3984 + i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
3985 if (sg_is_chain(sg))
3986 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
3987 if (sg_is_last(sg))
3988 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
3989 }
3990 -
3991 }
3992 EXPORT_SYMBOL(transport_do_task_sg_chain);
3993
3994 @@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
3995 if (wait_for_tasks && cmd->transport_wait_for_tasks)
3996 cmd->transport_wait_for_tasks(cmd, 0, 0);
3997
3998 + transport_free_dev_tasks(cmd);
3999 +
4000 transport_generic_remove(cmd, release_to_pool,
4001 session_reinstatement);
4002 }
4003 @@ -6132,6 +6151,9 @@ get_cmd:
4004 case TRANSPORT_REMOVE:
4005 transport_generic_remove(cmd, 1, 0);
4006 break;
4007 + case TRANSPORT_FREE_CMD_INTR:
4008 + transport_generic_free_cmd(cmd, 0, 1, 0);
4009 + break;
4010 case TRANSPORT_PROCESS_TMR:
4011 transport_generic_do_tmr(cmd);
4012 break;
4013 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
4014 index e057e53..caa2535 100644
4015 --- a/drivers/usb/class/cdc-acm.c
4016 +++ b/drivers/usb/class/cdc-acm.c
4017 @@ -946,7 +946,7 @@ static int acm_probe(struct usb_interface *intf,
4018 u8 ac_management_function = 0;
4019 u8 call_management_function = 0;
4020 int call_interface_num = -1;
4021 - int data_interface_num;
4022 + int data_interface_num = -1;
4023 unsigned long quirks;
4024 int num_rx_buf;
4025 int i;
4026 @@ -1030,7 +1030,11 @@ next_desc:
4027 if (!union_header) {
4028 if (call_interface_num > 0) {
4029 dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
4030 - data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
4031 + /* quirks for Droids MuIn LCD */
4032 + if (quirks & NO_DATA_INTERFACE)
4033 + data_interface = usb_ifnum_to_if(usb_dev, 0);
4034 + else
4035 + data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
4036 control_interface = intf;
4037 } else {
4038 if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
4039 @@ -1622,6 +1626,11 @@ static const struct usb_device_id acm_ids[] = {
4040 .driver_info = NOT_A_MODEM,
4041 },
4042
4043 + /* Support for Droids MuIn LCD */
4044 + { USB_DEVICE(0x04d8, 0x000b),
4045 + .driver_info = NO_DATA_INTERFACE,
4046 + },
4047 +
4048 /* control interfaces without any protocol set */
4049 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
4050 USB_CDC_PROTO_NONE) },
4051 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
4052 index b4ea54d..683104a 100644
4053 --- a/drivers/usb/class/cdc-acm.h
4054 +++ b/drivers/usb/class/cdc-acm.h
4055 @@ -137,3 +137,4 @@ struct acm {
4056 #define SINGLE_RX_URB 2
4057 #define NO_CAP_LINE 4
4058 #define NOT_A_MODEM 8
4059 +#define NO_DATA_INTERFACE 16
4060 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4061 index 77a7fae..cddc533 100644
4062 --- a/drivers/usb/core/hcd.c
4063 +++ b/drivers/usb/core/hcd.c
4064 @@ -986,7 +986,7 @@ static int register_root_hub(struct usb_hcd *hcd)
4065 spin_unlock_irq (&hcd_root_hub_lock);
4066
4067 /* Did the HC die before the root hub was registered? */
4068 - if (HCD_DEAD(hcd) || hcd->state == HC_STATE_HALT)
4069 + if (HCD_DEAD(hcd))
4070 usb_hc_died (hcd); /* This time clean up */
4071 }
4072
4073 @@ -2128,9 +2128,6 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
4074 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
4075 if (hcd->shared_hcd)
4076 set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
4077 -
4078 - if (unlikely(hcd->state == HC_STATE_HALT))
4079 - usb_hc_died(hcd);
4080 rc = IRQ_HANDLED;
4081 }
4082
4083 diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
4084 index 9b7cdb1..41dc093 100644
4085 --- a/drivers/usb/gadget/at91_udc.c
4086 +++ b/drivers/usb/gadget/at91_udc.c
4087 @@ -1767,7 +1767,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
4088 }
4089
4090 /* newer chips have more FIFO memory than rm9200 */
4091 - if (cpu_is_at91sam9260()) {
4092 + if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
4093 udc->ep[0].maxpacket = 64;
4094 udc->ep[3].maxpacket = 64;
4095 udc->ep[4].maxpacket = 512;
4096 diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
4097 index 882484a..fa12ec8 100644
4098 --- a/drivers/usb/gadget/f_rndis.c
4099 +++ b/drivers/usb/gadget/f_rndis.c
4100 @@ -420,8 +420,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
4101 */
4102 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
4103 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
4104 - if (w_length > req->length || w_value
4105 - || w_index != rndis->ctrl_id)
4106 + if (w_value || w_index != rndis->ctrl_id)
4107 goto invalid;
4108 /* read the request; process it later */
4109 value = w_length;
4110 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
4111 index 78561d1..c606b02 100644
4112 --- a/drivers/usb/host/ehci-hcd.c
4113 +++ b/drivers/usb/host/ehci-hcd.c
4114 @@ -777,8 +777,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
4115 goto dead;
4116 }
4117
4118 + /* Shared IRQ? */
4119 masked_status = status & INTR_MASK;
4120 - if (!masked_status) { /* irq sharing? */
4121 + if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) {
4122 spin_unlock(&ehci->lock);
4123 return IRQ_NONE;
4124 }
4125 @@ -873,6 +874,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
4126 dead:
4127 ehci_reset(ehci);
4128 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
4129 + usb_hc_died(hcd);
4130 /* generic layer kills/unlinks all urbs, then
4131 * uses ehci_stop to clean up the rest
4132 */
4133 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
4134 index 1543c83..d12426f 100644
4135 --- a/drivers/usb/host/ehci-sched.c
4136 +++ b/drivers/usb/host/ehci-sched.c
4137 @@ -471,8 +471,10 @@ static int enable_periodic (struct ehci_hcd *ehci)
4138 */
4139 status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
4140 STS_PSS, 0, 9 * 125);
4141 - if (status)
4142 + if (status) {
4143 + usb_hc_died(ehci_to_hcd(ehci));
4144 return status;
4145 + }
4146
4147 cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
4148 ehci_writel(ehci, cmd, &ehci->regs->command);
4149 @@ -510,8 +512,10 @@ static int disable_periodic (struct ehci_hcd *ehci)
4150 */
4151 status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
4152 STS_PSS, STS_PSS, 9 * 125);
4153 - if (status)
4154 + if (status) {
4155 + usb_hc_died(ehci_to_hcd(ehci));
4156 return status;
4157 + }
4158
4159 cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
4160 ehci_writel(ehci, cmd, &ehci->regs->command);
4161 diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
4162 index c0e22f2..baae4cc 100644
4163 --- a/drivers/usb/host/isp116x-hcd.c
4164 +++ b/drivers/usb/host/isp116x-hcd.c
4165 @@ -612,6 +612,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
4166 /* IRQ's are off, we do no DMA,
4167 perfectly ready to die ... */
4168 hcd->state = HC_STATE_HALT;
4169 + usb_hc_died(hcd);
4170 ret = IRQ_HANDLED;
4171 goto done;
4172 }
4173 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
4174 index d557235..c001fff 100644
4175 --- a/drivers/usb/host/ohci-hcd.c
4176 +++ b/drivers/usb/host/ohci-hcd.c
4177 @@ -764,6 +764,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
4178 if (ints == ~(u32)0) {
4179 disable (ohci);
4180 ohci_dbg (ohci, "device removed!\n");
4181 + usb_hc_died(hcd);
4182 return IRQ_HANDLED;
4183 }
4184
4185 @@ -771,7 +772,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
4186 ints &= ohci_readl(ohci, &regs->intrenable);
4187
4188 /* interrupt for some other device? */
4189 - if (ints == 0)
4190 + if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT))
4191 return IRQ_NOTMINE;
4192
4193 if (ints & OHCI_INTR_UE) {
4194 @@ -788,6 +789,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
4195 } else {
4196 disable (ohci);
4197 ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
4198 + usb_hc_died(hcd);
4199 }
4200
4201 ohci_dump (ohci, 1);
4202 diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
4203 index d84d6f0..ad8166c 100644
4204 --- a/drivers/usb/host/ohci-pci.c
4205 +++ b/drivers/usb/host/ohci-pci.c
4206 @@ -181,10 +181,18 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
4207 */
4208 static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
4209 {
4210 + struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
4211 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
4212
4213 - ohci->flags |= OHCI_QUIRK_SHUTDOWN;
4214 - ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
4215 + /* Evidently nVidia fixed their later hardware; this is a guess at
4216 + * the changeover point.
4217 + */
4218 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
4219 +
4220 + if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
4221 + ohci->flags |= OHCI_QUIRK_SHUTDOWN;
4222 + ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
4223 + }
4224
4225 return 0;
4226 }
4227 diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
4228 index 4a771f6..5fbe997 100644
4229 --- a/drivers/usb/host/oxu210hp-hcd.c
4230 +++ b/drivers/usb/host/oxu210hp-hcd.c
4231 @@ -1884,6 +1884,7 @@ static int enable_periodic(struct oxu_hcd *oxu)
4232 status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
4233 if (status != 0) {
4234 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
4235 + usb_hc_died(oxu_to_hcd(oxu));
4236 return status;
4237 }
4238
4239 @@ -1909,6 +1910,7 @@ static int disable_periodic(struct oxu_hcd *oxu)
4240 status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
4241 if (status != 0) {
4242 oxu_to_hcd(oxu)->state = HC_STATE_HALT;
4243 + usb_hc_died(oxu_to_hcd(oxu));
4244 return status;
4245 }
4246
4247 @@ -2449,8 +2451,9 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
4248 goto dead;
4249 }
4250
4251 + /* Shared IRQ? */
4252 status &= INTR_MASK;
4253 - if (!status) { /* irq sharing? */
4254 + if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
4255 spin_unlock(&oxu->lock);
4256 return IRQ_NONE;
4257 }
4258 @@ -2516,6 +2519,7 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
4259 dead:
4260 ehci_reset(oxu);
4261 writel(0, &oxu->regs->configured_flag);
4262 + usb_hc_died(hcd);
4263 /* generic layer kills/unlinks all urbs, then
4264 * uses oxu_stop to clean up the rest
4265 */
4266 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4267 index 627f343..783e5e0 100644
4268 --- a/drivers/usb/host/xhci-mem.c
4269 +++ b/drivers/usb/host/xhci-mem.c
4270 @@ -207,14 +207,13 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
4271
4272 rings_cached = virt_dev->num_rings_cached;
4273 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
4274 - virt_dev->num_rings_cached++;
4275 - rings_cached = virt_dev->num_rings_cached;
4276 virt_dev->ring_cache[rings_cached] =
4277 virt_dev->eps[ep_index].ring;
4278 + virt_dev->num_rings_cached++;
4279 xhci_dbg(xhci, "Cached old ring, "
4280 "%d ring%s cached\n",
4281 - rings_cached,
4282 - (rings_cached > 1) ? "s" : "");
4283 + virt_dev->num_rings_cached,
4284 + (virt_dev->num_rings_cached > 1) ? "s" : "");
4285 } else {
4286 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
4287 xhci_dbg(xhci, "Ring cache full (%d rings), "
4288 @@ -1046,12 +1045,12 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
4289 break;
4290
4291 case USB_SPEED_FULL:
4292 - if (usb_endpoint_xfer_int(&ep->desc)) {
4293 + if (usb_endpoint_xfer_isoc(&ep->desc)) {
4294 interval = xhci_parse_exponent_interval(udev, ep);
4295 break;
4296 }
4297 /*
4298 - * Fall through for isochronous endpoint interval decoding
4299 + * Fall through for interrupt endpoint interval decoding
4300 * since it uses the same rules as low speed interrupt
4301 * endpoints.
4302 */
4303 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4304 index 7437386..078b566 100644
4305 --- a/drivers/usb/host/xhci-ring.c
4306 +++ b/drivers/usb/host/xhci-ring.c
4307 @@ -1632,6 +1632,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
4308 else
4309 *status = 0;
4310 break;
4311 + case COMP_STOP_INVAL:
4312 + case COMP_STOP:
4313 + return finish_td(xhci, td, event_trb, event, ep, status, false);
4314 default:
4315 if (!xhci_requires_manual_halt_cleanup(xhci,
4316 ep_ctx, trb_comp_code))
4317 @@ -1676,15 +1679,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
4318 }
4319 } else {
4320 /* Maybe the event was for the data stage? */
4321 - if (trb_comp_code != COMP_STOP_INVAL) {
4322 - /* We didn't stop on a link TRB in the middle */
4323 - td->urb->actual_length =
4324 - td->urb->transfer_buffer_length -
4325 - TRB_LEN(event->transfer_len);
4326 - xhci_dbg(xhci, "Waiting for status "
4327 - "stage event\n");
4328 - return 0;
4329 - }
4330 + td->urb->actual_length =
4331 + td->urb->transfer_buffer_length -
4332 + TRB_LEN(le32_to_cpu(event->transfer_len));
4333 + xhci_dbg(xhci, "Waiting for status "
4334 + "stage event\n");
4335 + return 0;
4336 }
4337 }
4338
4339 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4340 index 81b976e..d2cd3ce 100644
4341 --- a/drivers/usb/host/xhci.c
4342 +++ b/drivers/usb/host/xhci.c
4343 @@ -1692,8 +1692,17 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
4344 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
4345 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
4346
4347 + /* Free any rings that were dropped, but not changed. */
4348 + for (i = 1; i < 31; ++i) {
4349 + if ((ctrl_ctx->drop_flags & (1 << (i + 1))) &&
4350 + !(ctrl_ctx->add_flags & (1 << (i + 1))))
4351 + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
4352 + }
4353 xhci_zero_in_ctx(xhci, virt_dev);
4354 - /* Install new rings and free or cache any old rings */
4355 + /*
4356 + * Install any rings for completely new endpoints or changed endpoints,
4357 + * and free or cache any old rings from changed endpoints.
4358 + */
4359 for (i = 1; i < 31; ++i) {
4360 if (!virt_dev->eps[i].new_ring)
4361 continue;
4362 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
4363 index 0f11afd..ebeccb7 100644
4364 --- a/drivers/usb/serial/cp210x.c
4365 +++ b/drivers/usb/serial/cp210x.c
4366 @@ -112,6 +112,10 @@ static const struct usb_device_id id_table[] = {
4367 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
4368 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
4369 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
4370 + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
4371 + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
4372 + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
4373 + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
4374 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
4375 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
4376 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
4377 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
4378 index 4de6ef0..e8dbde5 100644
4379 --- a/drivers/usb/serial/ftdi_sio.c
4380 +++ b/drivers/usb/serial/ftdi_sio.c
4381 @@ -566,6 +566,7 @@ static struct usb_device_id id_table_combined [] = {
4382 { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) },
4383 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
4384 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
4385 + { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
4386 /*
4387 * ELV devices:
4388 */
4389 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
4390 index efffc23..1d946cd 100644
4391 --- a/drivers/usb/serial/ftdi_sio_ids.h
4392 +++ b/drivers/usb/serial/ftdi_sio_ids.h
4393 @@ -491,6 +491,11 @@
4394 /* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
4395 #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
4396
4397 +/*
4398 + * TavIR AVR product ids (FTDI_VID)
4399 + */
4400 +#define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
4401 +
4402
4403
4404 /********************************/
4405 diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
4406 index 26710b1..456447e033 100644
4407 --- a/drivers/usb/serial/garmin_gps.c
4408 +++ b/drivers/usb/serial/garmin_gps.c
4409 @@ -1,7 +1,7 @@
4410 /*
4411 * Garmin GPS driver
4412 *
4413 - * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
4414 + * Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de
4415 *
4416 * The latest version of the driver can be found at
4417 * http://sourceforge.net/projects/garmin-gps/
4418 @@ -51,7 +51,7 @@ static int debug;
4419 */
4420
4421 #define VERSION_MAJOR 0
4422 -#define VERSION_MINOR 33
4423 +#define VERSION_MINOR 36
4424
4425 #define _STR(s) #s
4426 #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
4427 @@ -410,6 +410,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
4428 */
4429 static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
4430 {
4431 + unsigned long flags;
4432 const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
4433 __le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
4434
4435 @@ -458,7 +459,9 @@ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
4436 /* if this was an abort-transfer command, flush all
4437 queued data. */
4438 if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
4439 + spin_lock_irqsave(&garmin_data_p->lock, flags);
4440 garmin_data_p->flags |= FLAGS_DROP_DATA;
4441 + spin_unlock_irqrestore(&garmin_data_p->lock, flags);
4442 pkt_clear(garmin_data_p);
4443 }
4444
4445 @@ -943,7 +946,7 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
4446 spin_lock_irqsave(&garmin_data_p->lock, flags);
4447 garmin_data_p->mode = initial_mode;
4448 garmin_data_p->count = 0;
4449 - garmin_data_p->flags = 0;
4450 + garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
4451 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
4452
4453 /* shutdown any bulk reads that might be going on */
4454 @@ -1178,7 +1181,8 @@ static int garmin_write_room(struct tty_struct *tty)
4455
4456
4457 static void garmin_read_process(struct garmin_data *garmin_data_p,
4458 - unsigned char *data, unsigned data_length)
4459 + unsigned char *data, unsigned data_length,
4460 + int bulk_data)
4461 {
4462 unsigned long flags;
4463
4464 @@ -1193,7 +1197,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
4465 send it directly to the tty port */
4466 if (garmin_data_p->flags & FLAGS_QUEUING) {
4467 pkt_add(garmin_data_p, data, data_length);
4468 - } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
4469 + } else if (bulk_data ||
4470 + getLayerId(data) == GARMIN_LAYERID_APPL) {
4471
4472 spin_lock_irqsave(&garmin_data_p->lock, flags);
4473 garmin_data_p->flags |= APP_RESP_SEEN;
4474 @@ -1237,7 +1242,7 @@ static void garmin_read_bulk_callback(struct urb *urb)
4475 usb_serial_debug_data(debug, &port->dev,
4476 __func__, urb->actual_length, data);
4477
4478 - garmin_read_process(garmin_data_p, data, urb->actual_length);
4479 + garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
4480
4481 if (urb->actual_length == 0 &&
4482 0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
4483 @@ -1346,7 +1351,7 @@ static void garmin_read_int_callback(struct urb *urb)
4484 __func__, garmin_data_p->serial_num);
4485 }
4486
4487 - garmin_read_process(garmin_data_p, data, urb->actual_length);
4488 + garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
4489
4490 port->interrupt_in_urb->dev = port->serial->dev;
4491 retval = usb_submit_urb(urb, GFP_ATOMIC);
4492 @@ -1461,6 +1466,7 @@ static int garmin_attach(struct usb_serial *serial)
4493 garmin_data_p->timer.function = timeout_handler;
4494 garmin_data_p->port = port;
4495 garmin_data_p->state = 0;
4496 + garmin_data_p->flags = 0;
4497 garmin_data_p->count = 0;
4498 usb_set_serial_port_data(port, garmin_data_p);
4499
4500 diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
4501 index 653465f..e2bfecc 100644
4502 --- a/drivers/usb/serial/moto_modem.c
4503 +++ b/drivers/usb/serial/moto_modem.c
4504 @@ -25,6 +25,7 @@ static const struct usb_device_id id_table[] = {
4505 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
4506 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
4507 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
4508 + { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */
4509 { USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
4510 { },
4511 };
4512 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4513 index d77ff04..318dd00 100644
4514 --- a/drivers/usb/serial/option.c
4515 +++ b/drivers/usb/serial/option.c
4516 @@ -149,6 +149,7 @@ static void option_instat_callback(struct urb *urb);
4517 #define HUAWEI_PRODUCT_K3765 0x1465
4518 #define HUAWEI_PRODUCT_E14AC 0x14AC
4519 #define HUAWEI_PRODUCT_ETS1220 0x1803
4520 +#define HUAWEI_PRODUCT_E353 0x1506
4521
4522 #define QUANTA_VENDOR_ID 0x0408
4523 #define QUANTA_PRODUCT_Q101 0xEA02
4524 @@ -532,6 +533,7 @@ static const struct usb_device_id option_ids[] = {
4525 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
4526 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
4527 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
4528 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
4529 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
4530 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
4531 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
4532 @@ -972,7 +974,7 @@ static const struct usb_device_id option_ids[] = {
4533 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
4534 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
4535 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
4536 - { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
4537 + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
4538 { } /* Terminating entry */
4539 };
4540 MODULE_DEVICE_TABLE(usb, option_ids);
4541 @@ -1109,6 +1111,12 @@ static int option_probe(struct usb_serial *serial,
4542 serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
4543 return -ENODEV;
4544
4545 + /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
4546 + if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID &&
4547 + serial->dev->descriptor.idProduct == SAMSUNG_PRODUCT_GT_B3730 &&
4548 + serial->interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA)
4549 + return -ENODEV;
4550 +
4551 data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
4552
4553 if (!data)
4554 diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
4555 index 3236e03..e41f50c 100644
4556 --- a/drivers/usb/storage/unusual_realtek.h
4557 +++ b/drivers/usb/storage/unusual_realtek.h
4558 @@ -23,19 +23,19 @@
4559 #if defined(CONFIG_USB_STORAGE_REALTEK) || \
4560 defined(CONFIG_USB_STORAGE_REALTEK_MODULE)
4561
4562 -UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
4563 +UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
4564 "Realtek",
4565 "USB Card Reader",
4566 - USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
4567 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
4568
4569 UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
4570 "Realtek",
4571 "USB Card Reader",
4572 - USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
4573 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
4574
4575 -UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
4576 +UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
4577 "Realtek",
4578 "USB Card Reader",
4579 - USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
4580 + USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
4581
4582 #endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
4583 diff --git a/fs/block_dev.c b/fs/block_dev.c
4584 index 257b00e..1f2b199 100644
4585 --- a/fs/block_dev.c
4586 +++ b/fs/block_dev.c
4587 @@ -1120,6 +1120,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
4588 goto restart;
4589 }
4590 }
4591 +
4592 + if (!ret && !bdev->bd_openers) {
4593 + bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
4594 + bdi = blk_get_backing_dev_info(bdev);
4595 + if (bdi == NULL)
4596 + bdi = &default_backing_dev_info;
4597 + bdev_inode_switch_bdi(bdev->bd_inode, bdi);
4598 + }
4599 +
4600 /*
4601 * If the device is invalidated, rescan partition
4602 * if open succeeded or failed with -ENOMEDIUM.
4603 @@ -1130,14 +1139,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
4604 rescan_partitions(disk, bdev);
4605 if (ret)
4606 goto out_clear;
4607 -
4608 - if (!bdev->bd_openers) {
4609 - bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
4610 - bdi = blk_get_backing_dev_info(bdev);
4611 - if (bdi == NULL)
4612 - bdi = &default_backing_dev_info;
4613 - bdev_inode_switch_bdi(bdev->bd_inode, bdi);
4614 - }
4615 } else {
4616 struct block_device *whole;
4617 whole = bdget_disk(disk, 0);
4618 @@ -1237,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
4619 res = __blkdev_get(bdev, mode, 0);
4620
4621 if (whole) {
4622 + struct gendisk *disk = whole->bd_disk;
4623 +
4624 /* finish claiming */
4625 mutex_lock(&bdev->bd_mutex);
4626 spin_lock(&bdev_lock);
4627 @@ -1263,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
4628 spin_unlock(&bdev_lock);
4629
4630 /*
4631 - * Block event polling for write claims. Any write
4632 - * holder makes the write_holder state stick until all
4633 - * are released. This is good enough and tracking
4634 - * individual writeable reference is too fragile given
4635 - * the way @mode is used in blkdev_get/put().
4636 + * Block event polling for write claims if requested. Any
4637 + * write holder makes the write_holder state stick until
4638 + * all are released. This is good enough and tracking
4639 + * individual writeable reference is too fragile given the
4640 + * way @mode is used in blkdev_get/put().
4641 */
4642 - if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
4643 + if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
4644 + !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
4645 bdev->bd_write_holder = true;
4646 - disk_block_events(bdev->bd_disk);
4647 + disk_block_events(disk);
4648 }
4649
4650 mutex_unlock(&bdev->bd_mutex);
4651 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4652 index 277262a..29fac128 100644
4653 --- a/fs/cifs/connect.c
4654 +++ b/fs/cifs/connect.c
4655 @@ -2447,7 +2447,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
4656
4657 if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
4658 __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
4659 -
4660 + cFYI(1, "unix caps which server supports %lld", cap);
4661 /* check for reconnect case in which we do not
4662 want to change the mount behavior if we can avoid it */
4663 if (vol_info == NULL) {
4664 @@ -2465,6 +2465,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
4665 }
4666 }
4667
4668 + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
4669 + cERROR(1, "per-share encryption not supported yet");
4670 +
4671 cap &= CIFS_UNIX_CAP_MASK;
4672 if (vol_info && vol_info->no_psx_acl)
4673 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
4674 @@ -2513,6 +2516,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
4675 cFYI(1, "very large read cap");
4676 if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
4677 cFYI(1, "very large write cap");
4678 + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
4679 + cFYI(1, "transport encryption cap");
4680 + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
4681 + cFYI(1, "mandatory transport encryption cap");
4682 #endif /* CIFS_DEBUG2 */
4683 if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
4684 if (vol_info == NULL) {
4685 @@ -2831,20 +2838,26 @@ try_mount_again:
4686 goto remote_path_check;
4687 }
4688
4689 - /* do not care if following two calls succeed - informational */
4690 - if (!tcon->ipc) {
4691 - CIFSSMBQFSDeviceInfo(xid, tcon);
4692 - CIFSSMBQFSAttributeInfo(xid, tcon);
4693 - }
4694 -
4695 /* tell server which Unix caps we support */
4696 - if (tcon->ses->capabilities & CAP_UNIX)
4697 + if (tcon->ses->capabilities & CAP_UNIX) {
4698 /* reset of caps checks mount to see if unix extensions
4699 disabled for just this mount */
4700 reset_cifs_unix_caps(xid, tcon, sb, volume_info);
4701 - else
4702 + if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
4703 + (le64_to_cpu(tcon->fsUnixInfo.Capability) &
4704 + CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
4705 + rc = -EACCES;
4706 + goto mount_fail_check;
4707 + }
4708 + } else
4709 tcon->unix_ext = 0; /* server does not support them */
4710
4711 + /* do not care if following two calls succeed - informational */
4712 + if (!tcon->ipc) {
4713 + CIFSSMBQFSDeviceInfo(xid, tcon);
4714 + CIFSSMBQFSAttributeInfo(xid, tcon);
4715 + }
4716 +
4717 /* convert forward to back slashes in prepath here if needed */
4718 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
4719 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
4720 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
4721 index 4d4cc6a..94ab3c0 100644
4722 --- a/fs/ecryptfs/inode.c
4723 +++ b/fs/ecryptfs/inode.c
4724 @@ -527,6 +527,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
4725 dget(lower_dentry);
4726 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
4727 dput(lower_dentry);
4728 + if (!rc && dentry->d_inode)
4729 + clear_nlink(dentry->d_inode);
4730 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
4731 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
4732 unlock_dir(lower_dir_dentry);
4733 diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
4734 index 03e609c..27a7fef 100644
4735 --- a/fs/ecryptfs/keystore.c
4736 +++ b/fs/ecryptfs/keystore.c
4737 @@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
4738 struct mutex *tfm_mutex;
4739 char *block_aligned_filename;
4740 struct ecryptfs_auth_tok *auth_tok;
4741 - struct scatterlist src_sg;
4742 - struct scatterlist dst_sg;
4743 + struct scatterlist src_sg[2];
4744 + struct scatterlist dst_sg[2];
4745 struct blkcipher_desc desc;
4746 char iv[ECRYPTFS_MAX_IV_BYTES];
4747 char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
4748 @@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
4749 memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
4750 filename_size);
4751 rc = virt_to_scatterlist(s->block_aligned_filename,
4752 - s->block_aligned_filename_size, &s->src_sg, 1);
4753 - if (rc != 1) {
4754 + s->block_aligned_filename_size, s->src_sg, 2);
4755 + if (rc < 1) {
4756 printk(KERN_ERR "%s: Internal error whilst attempting to "
4757 - "convert filename memory to scatterlist; "
4758 - "expected rc = 1; got rc = [%d]. "
4759 + "convert filename memory to scatterlist; rc = [%d]. "
4760 "block_aligned_filename_size = [%zd]\n", __func__, rc,
4761 s->block_aligned_filename_size);
4762 goto out_release_free_unlock;
4763 }
4764 rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
4765 - &s->dst_sg, 1);
4766 - if (rc != 1) {
4767 + s->dst_sg, 2);
4768 + if (rc < 1) {
4769 printk(KERN_ERR "%s: Internal error whilst attempting to "
4770 "convert encrypted filename memory to scatterlist; "
4771 - "expected rc = 1; got rc = [%d]. "
4772 - "block_aligned_filename_size = [%zd]\n", __func__, rc,
4773 - s->block_aligned_filename_size);
4774 + "rc = [%d]. block_aligned_filename_size = [%zd]\n",
4775 + __func__, rc, s->block_aligned_filename_size);
4776 goto out_release_free_unlock;
4777 }
4778 /* The characters in the first block effectively do the job
4779 @@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
4780 mount_crypt_stat->global_default_fn_cipher_key_bytes);
4781 goto out_release_free_unlock;
4782 }
4783 - rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
4784 + rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
4785 s->block_aligned_filename_size);
4786 if (rc) {
4787 printk(KERN_ERR "%s: Error attempting to encrypt filename; "
4788 @@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
4789 struct mutex *tfm_mutex;
4790 char *decrypted_filename;
4791 struct ecryptfs_auth_tok *auth_tok;
4792 - struct scatterlist src_sg;
4793 - struct scatterlist dst_sg;
4794 + struct scatterlist src_sg[2];
4795 + struct scatterlist dst_sg[2];
4796 struct blkcipher_desc desc;
4797 char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
4798 char iv[ECRYPTFS_MAX_IV_BYTES];
4799 @@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
4800 }
4801 mutex_lock(s->tfm_mutex);
4802 rc = virt_to_scatterlist(&data[(*packet_size)],
4803 - s->block_aligned_filename_size, &s->src_sg, 1);
4804 - if (rc != 1) {
4805 + s->block_aligned_filename_size, s->src_sg, 2);
4806 + if (rc < 1) {
4807 printk(KERN_ERR "%s: Internal error whilst attempting to "
4808 "convert encrypted filename memory to scatterlist; "
4809 - "expected rc = 1; got rc = [%d]. "
4810 - "block_aligned_filename_size = [%zd]\n", __func__, rc,
4811 - s->block_aligned_filename_size);
4812 + "rc = [%d]. block_aligned_filename_size = [%zd]\n",
4813 + __func__, rc, s->block_aligned_filename_size);
4814 goto out_unlock;
4815 }
4816 (*packet_size) += s->block_aligned_filename_size;
4817 @@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
4818 goto out_unlock;
4819 }
4820 rc = virt_to_scatterlist(s->decrypted_filename,
4821 - s->block_aligned_filename_size, &s->dst_sg, 1);
4822 - if (rc != 1) {
4823 + s->block_aligned_filename_size, s->dst_sg, 2);
4824 + if (rc < 1) {
4825 printk(KERN_ERR "%s: Internal error whilst attempting to "
4826 "convert decrypted filename memory to scatterlist; "
4827 - "expected rc = 1; got rc = [%d]. "
4828 - "block_aligned_filename_size = [%zd]\n", __func__, rc,
4829 - s->block_aligned_filename_size);
4830 + "rc = [%d]. block_aligned_filename_size = [%zd]\n",
4831 + __func__, rc, s->block_aligned_filename_size);
4832 goto out_free_unlock;
4833 }
4834 /* The characters in the first block effectively do the job of
4835 @@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
4836 mount_crypt_stat->global_default_fn_cipher_key_bytes);
4837 goto out_free_unlock;
4838 }
4839 - rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
4840 + rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
4841 s->block_aligned_filename_size);
4842 if (rc) {
4843 printk(KERN_ERR "%s: Error attempting to decrypt filename; "
4844 diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
4845 index 32f3b86..93f9fd0 100644
4846 --- a/fs/ext3/namei.c
4847 +++ b/fs/ext3/namei.c
4848 @@ -1416,10 +1416,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
4849 frame->at = entries;
4850 frame->bh = bh;
4851 bh = bh2;
4852 + /*
4853 + * Mark buffers dirty here so that if do_split() fails we write a
4854 + * consistent set of buffers to disk.
4855 + */
4856 + ext3_journal_dirty_metadata(handle, frame->bh);
4857 + ext3_journal_dirty_metadata(handle, bh);
4858 de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
4859 - dx_release (frames);
4860 - if (!(de))
4861 + if (!de) {
4862 + ext3_mark_inode_dirty(handle, dir);
4863 + dx_release(frames);
4864 return retval;
4865 + }
4866 + dx_release(frames);
4867
4868 return add_dirent_to_buf(handle, dentry, inode, de, bh);
4869 }
4870 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4871 index 4daaf2b..1e37c09 100644
4872 --- a/fs/ext4/ext4.h
4873 +++ b/fs/ext4/ext4.h
4874 @@ -1590,12 +1590,8 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
4875 */
4876 struct ext4_lazy_init {
4877 unsigned long li_state;
4878 -
4879 - wait_queue_head_t li_wait_daemon;
4880 wait_queue_head_t li_wait_task;
4881 - struct timer_list li_timer;
4882 struct task_struct *li_task;
4883 -
4884 struct list_head li_request_list;
4885 struct mutex li_list_mtx;
4886 };
4887 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
4888 index d8a16ee..15bfa44 100644
4889 --- a/fs/ext4/mballoc.c
4890 +++ b/fs/ext4/mballoc.c
4891 @@ -1273,6 +1273,8 @@ repeat_load_buddy:
4892 return 0;
4893
4894 err:
4895 + if (page)
4896 + page_cache_release(page);
4897 if (e4b->bd_bitmap_page)
4898 page_cache_release(e4b->bd_bitmap_page);
4899 if (e4b->bd_buddy_page)
4900 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
4901 index b6dbd05..7bb8f76 100644
4902 --- a/fs/ext4/page-io.c
4903 +++ b/fs/ext4/page-io.c
4904 @@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error)
4905 for (i = 0; i < io_end->num_io_pages; i++) {
4906 struct page *page = io_end->pages[i]->p_page;
4907 struct buffer_head *bh, *head;
4908 - int partial_write = 0;
4909 + loff_t offset;
4910 + loff_t io_end_offset;
4911
4912 - head = page_buffers(page);
4913 - if (error)
4914 + if (error) {
4915 SetPageError(page);
4916 - BUG_ON(!head);
4917 - if (head->b_size != PAGE_CACHE_SIZE) {
4918 - loff_t offset;
4919 - loff_t io_end_offset = io_end->offset + io_end->size;
4920 + set_bit(AS_EIO, &page->mapping->flags);
4921 + head = page_buffers(page);
4922 + BUG_ON(!head);
4923 +
4924 + io_end_offset = io_end->offset + io_end->size;
4925
4926 offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
4927 bh = head;
4928 do {
4929 if ((offset >= io_end->offset) &&
4930 - (offset+bh->b_size <= io_end_offset)) {
4931 - if (error)
4932 - buffer_io_error(bh);
4933 -
4934 - }
4935 - if (buffer_delay(bh))
4936 - partial_write = 1;
4937 - else if (!buffer_mapped(bh))
4938 - clear_buffer_dirty(bh);
4939 - else if (buffer_dirty(bh))
4940 - partial_write = 1;
4941 + (offset+bh->b_size <= io_end_offset))
4942 + buffer_io_error(bh);
4943 +
4944 offset += bh->b_size;
4945 bh = bh->b_this_page;
4946 } while (bh != head);
4947 }
4948
4949 - /*
4950 - * If this is a partial write which happened to make
4951 - * all buffers uptodate then we can optimize away a
4952 - * bogus readpage() for the next read(). Here we
4953 - * 'discover' whether the page went uptodate as a
4954 - * result of this (potentially partial) write.
4955 - */
4956 - if (!partial_write)
4957 - SetPageUptodate(page);
4958 -
4959 put_io_page(io_end->pages[i]);
4960 }
4961 io_end->num_io_pages = 0;
4962 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4963 index 8553dfb..e28c0f2 100644
4964 --- a/fs/ext4/super.c
4965 +++ b/fs/ext4/super.c
4966 @@ -2659,12 +2659,6 @@ static void print_daily_error_info(unsigned long arg)
4967 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
4968 }
4969
4970 -static void ext4_lazyinode_timeout(unsigned long data)
4971 -{
4972 - struct task_struct *p = (struct task_struct *)data;
4973 - wake_up_process(p);
4974 -}
4975 -
4976 /* Find next suitable group and run ext4_init_inode_table */
4977 static int ext4_run_li_request(struct ext4_li_request *elr)
4978 {
4979 @@ -2712,7 +2706,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
4980
4981 /*
4982 * Remove lr_request from the list_request and free the
4983 - * request tructure. Should be called with li_list_mtx held
4984 + * request structure. Should be called with li_list_mtx held
4985 */
4986 static void ext4_remove_li_request(struct ext4_li_request *elr)
4987 {
4988 @@ -2730,14 +2724,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr)
4989
4990 static void ext4_unregister_li_request(struct super_block *sb)
4991 {
4992 - struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request;
4993 -
4994 - if (!ext4_li_info)
4995 + mutex_lock(&ext4_li_mtx);
4996 + if (!ext4_li_info) {
4997 + mutex_unlock(&ext4_li_mtx);
4998 return;
4999 + }
5000
5001 mutex_lock(&ext4_li_info->li_list_mtx);
5002 - ext4_remove_li_request(elr);
5003 + ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
5004 mutex_unlock(&ext4_li_info->li_list_mtx);
5005 + mutex_unlock(&ext4_li_mtx);
5006 }
5007
5008 static struct task_struct *ext4_lazyinit_task;
5009 @@ -2756,14 +2752,10 @@ static int ext4_lazyinit_thread(void *arg)
5010 struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
5011 struct list_head *pos, *n;
5012 struct ext4_li_request *elr;
5013 - unsigned long next_wakeup;
5014 - DEFINE_WAIT(wait);
5015 + unsigned long next_wakeup, cur;
5016
5017 BUG_ON(NULL == eli);
5018
5019 - eli->li_timer.data = (unsigned long)current;
5020 - eli->li_timer.function = ext4_lazyinode_timeout;
5021 -
5022 eli->li_task = current;
5023 wake_up(&eli->li_wait_task);
5024
5025 @@ -2797,19 +2789,15 @@ cont_thread:
5026 if (freezing(current))
5027 refrigerator();
5028
5029 - if ((time_after_eq(jiffies, next_wakeup)) ||
5030 + cur = jiffies;
5031 + if ((time_after_eq(cur, next_wakeup)) ||
5032 (MAX_JIFFY_OFFSET == next_wakeup)) {
5033 cond_resched();
5034 continue;
5035 }
5036
5037 - eli->li_timer.expires = next_wakeup;
5038 - add_timer(&eli->li_timer);
5039 - prepare_to_wait(&eli->li_wait_daemon, &wait,
5040 - TASK_INTERRUPTIBLE);
5041 - if (time_before(jiffies, next_wakeup))
5042 - schedule();
5043 - finish_wait(&eli->li_wait_daemon, &wait);
5044 + schedule_timeout_interruptible(next_wakeup - cur);
5045 +
5046 if (kthread_should_stop()) {
5047 ext4_clear_request_list();
5048 goto exit_thread;
5049 @@ -2833,12 +2821,10 @@ exit_thread:
5050 goto cont_thread;
5051 }
5052 mutex_unlock(&eli->li_list_mtx);
5053 - del_timer_sync(&ext4_li_info->li_timer);
5054 eli->li_task = NULL;
5055 wake_up(&eli->li_wait_task);
5056
5057 kfree(ext4_li_info);
5058 - ext4_lazyinit_task = NULL;
5059 ext4_li_info = NULL;
5060 mutex_unlock(&ext4_li_mtx);
5061
5062 @@ -2866,7 +2852,6 @@ static int ext4_run_lazyinit_thread(void)
5063 if (IS_ERR(ext4_lazyinit_task)) {
5064 int err = PTR_ERR(ext4_lazyinit_task);
5065 ext4_clear_request_list();
5066 - del_timer_sync(&ext4_li_info->li_timer);
5067 kfree(ext4_li_info);
5068 ext4_li_info = NULL;
5069 printk(KERN_CRIT "EXT4: error %d creating inode table "
5070 @@ -2915,9 +2900,7 @@ static int ext4_li_info_new(void)
5071 INIT_LIST_HEAD(&eli->li_request_list);
5072 mutex_init(&eli->li_list_mtx);
5073
5074 - init_waitqueue_head(&eli->li_wait_daemon);
5075 init_waitqueue_head(&eli->li_wait_task);
5076 - init_timer(&eli->li_timer);
5077 eli->li_state |= EXT4_LAZYINIT_QUIT;
5078
5079 ext4_li_info = eli;
5080 diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
5081 index 69b1804..f486ff6 100644
5082 --- a/fs/jbd/commit.c
5083 +++ b/fs/jbd/commit.c
5084 @@ -722,8 +722,13 @@ wait_for_iobuf:
5085 required. */
5086 JBUFFER_TRACE(jh, "file as BJ_Forget");
5087 journal_file_buffer(jh, commit_transaction, BJ_Forget);
5088 - /* Wake up any transactions which were waiting for this
5089 - IO to complete */
5090 + /*
5091 + * Wake up any transactions which were waiting for this
5092 + * IO to complete. The barrier must be here so that changes
5093 + * by journal_file_buffer() take effect before wake_up_bit()
5094 + * does the waitqueue check.
5095 + */
5096 + smp_mb();
5097 wake_up_bit(&bh->b_state, BH_Unshadow);
5098 JBUFFER_TRACE(jh, "brelse shadowed buffer");
5099 __brelse(bh);
5100 diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
5101 index b3713af..e2d4285 100644
5102 --- a/fs/jbd/journal.c
5103 +++ b/fs/jbd/journal.c
5104 @@ -437,9 +437,12 @@ int __log_space_left(journal_t *journal)
5105 int __log_start_commit(journal_t *journal, tid_t target)
5106 {
5107 /*
5108 - * Are we already doing a recent enough commit?
5109 + * The only transaction we can possibly wait upon is the
5110 + * currently running transaction (if it exists). Otherwise,
5111 + * the target tid must be an old one.
5112 */
5113 - if (!tid_geq(journal->j_commit_request, target)) {
5114 + if (journal->j_running_transaction &&
5115 + journal->j_running_transaction->t_tid == target) {
5116 /*
5117 * We want a new commit: OK, mark the request and wakeup the
5118 * commit thread. We do _not_ do the commit ourselves.
5119 @@ -451,7 +454,14 @@ int __log_start_commit(journal_t *journal, tid_t target)
5120 journal->j_commit_sequence);
5121 wake_up(&journal->j_wait_commit);
5122 return 1;
5123 - }
5124 + } else if (!tid_geq(journal->j_commit_request, target))
5125 + /* This should never happen, but if it does, preserve
5126 + the evidence before kjournald goes into a loop and
5127 + increments j_commit_sequence beyond all recognition. */
5128 + WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
5129 + journal->j_commit_request, journal->j_commit_sequence,
5130 + target, journal->j_running_transaction ?
5131 + journal->j_running_transaction->t_tid : 0);
5132 return 0;
5133 }
5134
5135 diff --git a/fs/namei.c b/fs/namei.c
5136 index e3c4f11..6ff858c 100644
5137 --- a/fs/namei.c
5138 +++ b/fs/namei.c
5139 @@ -1378,12 +1378,12 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
5140 {
5141 int res;
5142
5143 - BUG_ON(nd->depth >= MAX_NESTED_LINKS);
5144 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
5145 path_put_conditional(path, nd);
5146 path_put(&nd->path);
5147 return -ELOOP;
5148 }
5149 + BUG_ON(nd->depth >= MAX_NESTED_LINKS);
5150
5151 nd->depth++;
5152 current->link_count++;
5153 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5154 index cf1b339..d0e15db 100644
5155 --- a/fs/nfs/nfs4proc.c
5156 +++ b/fs/nfs/nfs4proc.c
5157 @@ -267,9 +267,11 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
5158 break;
5159 nfs4_schedule_stateid_recovery(server, state);
5160 goto wait_on_recovery;
5161 + case -NFS4ERR_EXPIRED:
5162 + if (state != NULL)
5163 + nfs4_schedule_stateid_recovery(server, state);
5164 case -NFS4ERR_STALE_STATEID:
5165 case -NFS4ERR_STALE_CLIENTID:
5166 - case -NFS4ERR_EXPIRED:
5167 nfs4_schedule_lease_recovery(clp);
5168 goto wait_on_recovery;
5169 #if defined(CONFIG_NFS_V4_1)
5170 @@ -3670,9 +3672,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
5171 break;
5172 nfs4_schedule_stateid_recovery(server, state);
5173 goto wait_on_recovery;
5174 + case -NFS4ERR_EXPIRED:
5175 + if (state != NULL)
5176 + nfs4_schedule_stateid_recovery(server, state);
5177 case -NFS4ERR_STALE_STATEID:
5178 case -NFS4ERR_STALE_CLIENTID:
5179 - case -NFS4ERR_EXPIRED:
5180 nfs4_schedule_lease_recovery(clp);
5181 goto wait_on_recovery;
5182 #if defined(CONFIG_NFS_V4_1)
5183 @@ -4543,6 +4547,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
5184 case -ESTALE:
5185 goto out;
5186 case -NFS4ERR_EXPIRED:
5187 + nfs4_schedule_stateid_recovery(server, state);
5188 case -NFS4ERR_STALE_CLIENTID:
5189 case -NFS4ERR_STALE_STATEID:
5190 nfs4_schedule_lease_recovery(server->nfs_client);
5191 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
5192 index 036f5ad..e97dd21 100644
5193 --- a/fs/nfs/nfs4state.c
5194 +++ b/fs/nfs/nfs4state.c
5195 @@ -1466,7 +1466,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
5196 #ifdef CONFIG_NFS_V4_1
5197 void nfs4_schedule_session_recovery(struct nfs4_session *session)
5198 {
5199 - nfs4_schedule_lease_recovery(session->clp);
5200 + struct nfs_client *clp = session->clp;
5201 +
5202 + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
5203 + nfs4_schedule_lease_recovery(clp);
5204 }
5205 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
5206
5207 @@ -1549,6 +1552,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
5208 status = nfs4_recovery_handle_error(clp, status);
5209 goto out;
5210 }
5211 + clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
5212 /* create_session negotiated new slot table */
5213 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
5214
5215 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
5216 index f57f528..101c85a 100644
5217 --- a/fs/nfs/pnfs.c
5218 +++ b/fs/nfs/pnfs.c
5219 @@ -1009,7 +1009,7 @@ void
5220 pnfs_set_layoutcommit(struct nfs_write_data *wdata)
5221 {
5222 struct nfs_inode *nfsi = NFS_I(wdata->inode);
5223 - loff_t end_pos = wdata->args.offset + wdata->res.count;
5224 + loff_t end_pos = wdata->mds_offset + wdata->res.count;
5225 bool mark_as_dirty = false;
5226
5227 spin_lock(&nfsi->vfs_inode.i_lock);
5228 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
5229 index ce4f624..a29d5cc 100644
5230 --- a/fs/partitions/ldm.c
5231 +++ b/fs/partitions/ldm.c
5232 @@ -1335,6 +1335,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
5233
5234 list_add_tail (&f->list, frags);
5235 found:
5236 + if (rec >= f->num) {
5237 + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
5238 + return false;
5239 + }
5240 +
5241 if (f->map & (1 << rec)) {
5242 ldm_error ("Duplicate VBLK, part %d.", rec);
5243 f->map &= 0x7F; /* Mark the group as broken */
5244 diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
5245 index bf31b47..cad60b5 100644
5246 --- a/fs/ubifs/sb.c
5247 +++ b/fs/ubifs/sb.c
5248 @@ -475,7 +475,8 @@ failed:
5249 * @c: UBIFS file-system description object
5250 *
5251 * This function returns a pointer to the superblock node or a negative error
5252 - * code.
5253 + * code. Note, the user of this function is responsible of kfree()'ing the
5254 + * returned superblock buffer.
5255 */
5256 struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
5257 {
5258 diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
5259 index 04ad07f..328e6fc 100644
5260 --- a/fs/ubifs/super.c
5261 +++ b/fs/ubifs/super.c
5262 @@ -1584,6 +1584,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
5263 }
5264 sup->leb_cnt = cpu_to_le32(c->leb_cnt);
5265 err = ubifs_write_sb_node(c, sup);
5266 + kfree(sup);
5267 if (err)
5268 goto out;
5269 }
5270 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5271 index 2ad95fa..ae9091a 100644
5272 --- a/include/linux/blkdev.h
5273 +++ b/include/linux/blkdev.h
5274 @@ -257,7 +257,7 @@ struct queue_limits {
5275 unsigned char misaligned;
5276 unsigned char discard_misaligned;
5277 unsigned char cluster;
5278 - signed char discard_zeroes_data;
5279 + unsigned char discard_zeroes_data;
5280 };
5281
5282 struct request_queue
5283 @@ -364,6 +364,8 @@ struct request_queue
5284 * for flush operations
5285 */
5286 unsigned int flush_flags;
5287 + unsigned int flush_not_queueable:1;
5288 + unsigned int flush_queue_delayed:1;
5289 unsigned int flush_pending_idx:1;
5290 unsigned int flush_running_idx:1;
5291 unsigned long flush_pending_since;
5292 @@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
5293 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
5294 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
5295 extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
5296 +extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
5297 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
5298
5299 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
5300 @@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
5301 {
5302 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
5303
5304 + if (!lim->max_discard_sectors)
5305 + return 0;
5306 +
5307 return (lim->discard_granularity + lim->discard_alignment - alignment)
5308 & (lim->discard_granularity - 1);
5309 }
5310
5311 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
5312 {
5313 - if (q->limits.discard_zeroes_data == 1)
5314 + if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
5315 return 1;
5316
5317 return 0;
5318 @@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
5319 return bdev->bd_block_size;
5320 }
5321
5322 +static inline bool queue_flush_queueable(struct request_queue *q)
5323 +{
5324 + return !q->flush_not_queueable;
5325 +}
5326 +
5327 typedef struct {struct page *v;} Sector;
5328
5329 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
5330 diff --git a/include/linux/efi.h b/include/linux/efi.h
5331 index 33fa120..e376270 100644
5332 --- a/include/linux/efi.h
5333 +++ b/include/linux/efi.h
5334 @@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
5335 struct resource *data_resource, struct resource *bss_resource);
5336 extern unsigned long efi_get_time(void);
5337 extern int efi_set_rtc_mmss(unsigned long nowtime);
5338 +extern void efi_reserve_boot_services(void);
5339 extern struct efi_memory_map memmap;
5340
5341 /**
5342 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
5343 index d764a42..300d758 100644
5344 --- a/include/linux/genhd.h
5345 +++ b/include/linux/genhd.h
5346 @@ -127,6 +127,7 @@ struct hd_struct {
5347 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32
5348 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
5349 #define GENHD_FL_NATIVE_CAPACITY 128
5350 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
5351
5352 enum {
5353 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
5354 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
5355 index 8abe8d7..8652a4f 100644
5356 --- a/include/linux/pci_ids.h
5357 +++ b/include/linux/pci_ids.h
5358 @@ -608,6 +608,8 @@
5359 #define PCI_DEVICE_ID_MATROX_G550 0x2527
5360 #define PCI_DEVICE_ID_MATROX_VIA 0x4536
5361
5362 +#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
5363 +
5364 #define PCI_VENDOR_ID_CT 0x102c
5365 #define PCI_DEVICE_ID_CT_69000 0x00c0
5366 #define PCI_DEVICE_ID_CT_65545 0x00d8
5367 diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
5368 index 77cbddb..a7d87f9 100644
5369 --- a/include/linux/pm_qos_params.h
5370 +++ b/include/linux/pm_qos_params.h
5371 @@ -16,6 +16,10 @@
5372 #define PM_QOS_NUM_CLASSES 4
5373 #define PM_QOS_DEFAULT_VALUE -1
5374
5375 +#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
5376 +#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
5377 +#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
5378 +
5379 struct pm_qos_request_list {
5380 struct plist_node list;
5381 int pm_qos_class;
5382 diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
5383 index e98cd2e..06d6964 100644
5384 --- a/include/linux/seqlock.h
5385 +++ b/include/linux/seqlock.h
5386 @@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
5387 unsigned ret;
5388
5389 repeat:
5390 - ret = sl->sequence;
5391 - smp_rmb();
5392 + ret = ACCESS_ONCE(sl->sequence);
5393 if (unlikely(ret & 1)) {
5394 cpu_relax();
5395 goto repeat;
5396 }
5397 + smp_rmb();
5398
5399 return ret;
5400 }
5401 diff --git a/include/net/dst.h b/include/net/dst.h
5402 index 75b95df..b3ad020 100644
5403 --- a/include/net/dst.h
5404 +++ b/include/net/dst.h
5405 @@ -120,6 +120,8 @@ static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
5406 {
5407 unsigned long p = dst->_metrics;
5408
5409 + BUG_ON(!p);
5410 +
5411 if (p & DST_METRICS_READ_ONLY)
5412 return dst->ops->cow_metrics(dst, p);
5413 return __DST_METRICS_PTR(p);
5414 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
5415 index 1d3b5b2..561ac99 100644
5416 --- a/include/target/target_core_base.h
5417 +++ b/include/target/target_core_base.h
5418 @@ -98,6 +98,7 @@ enum transport_state_table {
5419 TRANSPORT_REMOVE = 14,
5420 TRANSPORT_FREE = 15,
5421 TRANSPORT_NEW_CMD_MAP = 16,
5422 + TRANSPORT_FREE_CMD_INTR = 17,
5423 };
5424
5425 /* Used for struct se_cmd->se_cmd_flags */
5426 diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
5427 index 59aa464..24a1c6c 100644
5428 --- a/include/target/target_core_transport.h
5429 +++ b/include/target/target_core_transport.h
5430 @@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
5431 extern int transport_generic_handle_data(struct se_cmd *);
5432 extern void transport_new_cmd_failure(struct se_cmd *);
5433 extern int transport_generic_handle_tmr(struct se_cmd *);
5434 +extern void transport_generic_free_cmd_intr(struct se_cmd *);
5435 extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
5436 extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
5437 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
5438 diff --git a/init/main.c b/init/main.c
5439 index 4a9479e..48df882 100644
5440 --- a/init/main.c
5441 +++ b/init/main.c
5442 @@ -580,8 +580,8 @@ asmlinkage void __init start_kernel(void)
5443 #endif
5444 page_cgroup_init();
5445 enable_debug_pagealloc();
5446 - kmemleak_init();
5447 debug_objects_mem_init();
5448 + kmemleak_init();
5449 setup_per_cpu_pageset();
5450 numa_policy_init();
5451 if (late_time_init)
5452 diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
5453 index 0da058b..a9582ef 100644
5454 --- a/kernel/pm_qos_params.c
5455 +++ b/kernel/pm_qos_params.c
5456 @@ -53,11 +53,17 @@ enum pm_qos_type {
5457 PM_QOS_MIN /* return the smallest value */
5458 };
5459
5460 +/*
5461 + * Note: The lockless read path depends on the CPU accessing
5462 + * target_value atomically. Atomic access is only guaranteed on all CPU
5463 + * types linux supports for 32 bit quantites
5464 + */
5465 struct pm_qos_object {
5466 struct plist_head requests;
5467 struct blocking_notifier_head *notifiers;
5468 struct miscdevice pm_qos_power_miscdev;
5469 char *name;
5470 + s32 target_value; /* Do not change to 64 bit */
5471 s32 default_value;
5472 enum pm_qos_type type;
5473 };
5474 @@ -70,7 +76,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
5475 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
5476 .notifiers = &cpu_dma_lat_notifier,
5477 .name = "cpu_dma_latency",
5478 - .default_value = 2000 * USEC_PER_SEC,
5479 + .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
5480 + .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
5481 .type = PM_QOS_MIN,
5482 };
5483
5484 @@ -79,7 +86,8 @@ static struct pm_qos_object network_lat_pm_qos = {
5485 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
5486 .notifiers = &network_lat_notifier,
5487 .name = "network_latency",
5488 - .default_value = 2000 * USEC_PER_SEC,
5489 + .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
5490 + .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
5491 .type = PM_QOS_MIN
5492 };
5493
5494 @@ -89,7 +97,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
5495 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
5496 .notifiers = &network_throughput_notifier,
5497 .name = "network_throughput",
5498 - .default_value = 0,
5499 + .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
5500 + .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
5501 .type = PM_QOS_MAX,
5502 };
5503
5504 @@ -135,6 +144,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
5505 }
5506 }
5507
5508 +static inline s32 pm_qos_read_value(struct pm_qos_object *o)
5509 +{
5510 + return o->target_value;
5511 +}
5512 +
5513 +static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
5514 +{
5515 + o->target_value = value;
5516 +}
5517 +
5518 static void update_target(struct pm_qos_object *o, struct plist_node *node,
5519 int del, int value)
5520 {
5521 @@ -159,6 +178,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
5522 plist_add(node, &o->requests);
5523 }
5524 curr_value = pm_qos_get_value(o);
5525 + pm_qos_set_value(o, curr_value);
5526 spin_unlock_irqrestore(&pm_qos_lock, flags);
5527
5528 if (prev_value != curr_value)
5529 @@ -193,18 +213,11 @@ static int find_pm_qos_object_by_minor(int minor)
5530 * pm_qos_request - returns current system wide qos expectation
5531 * @pm_qos_class: identification of which qos value is requested
5532 *
5533 - * This function returns the current target value in an atomic manner.
5534 + * This function returns the current target value.
5535 */
5536 int pm_qos_request(int pm_qos_class)
5537 {
5538 - unsigned long flags;
5539 - int value;
5540 -
5541 - spin_lock_irqsave(&pm_qos_lock, flags);
5542 - value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
5543 - spin_unlock_irqrestore(&pm_qos_lock, flags);
5544 -
5545 - return value;
5546 + return pm_qos_read_value(pm_qos_array[pm_qos_class]);
5547 }
5548 EXPORT_SYMBOL_GPL(pm_qos_request);
5549
5550 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
5551 index ee24fa1..666880d 100644
5552 --- a/kernel/trace/ftrace.c
5553 +++ b/kernel/trace/ftrace.c
5554 @@ -2413,14 +2413,16 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5555 ftrace_match_records(parser->buffer, parser->idx, enable);
5556 }
5557
5558 - mutex_lock(&ftrace_lock);
5559 - if (ftrace_start_up && ftrace_enabled)
5560 - ftrace_run_update_code(FTRACE_ENABLE_CALLS);
5561 - mutex_unlock(&ftrace_lock);
5562 -
5563 trace_parser_put(parser);
5564 kfree(iter);
5565
5566 + if (file->f_mode & FMODE_WRITE) {
5567 + mutex_lock(&ftrace_lock);
5568 + if (ftrace_start_up && ftrace_enabled)
5569 + ftrace_run_update_code(FTRACE_ENABLE_CALLS);
5570 + mutex_unlock(&ftrace_lock);
5571 + }
5572 +
5573 mutex_unlock(&ftrace_regex_lock);
5574 return 0;
5575 }
5576 diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
5577 index 619313e..507a22f 100644
5578 --- a/lib/locking-selftest.c
5579 +++ b/lib/locking-selftest.c
5580 @@ -144,7 +144,7 @@ static void init_shared_classes(void)
5581
5582 #define HARDIRQ_ENTER() \
5583 local_irq_disable(); \
5584 - irq_enter(); \
5585 + __irq_enter(); \
5586 WARN_ON(!in_irq());
5587
5588 #define HARDIRQ_EXIT() \
5589 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
5590 index c1d5867..aacee45 100644
5591 --- a/mm/kmemleak.c
5592 +++ b/mm/kmemleak.c
5593 @@ -1414,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5594 ++(*pos);
5595
5596 list_for_each_continue_rcu(n, &object_list) {
5597 - next_obj = list_entry(n, struct kmemleak_object, object_list);
5598 - if (get_object(next_obj))
5599 + struct kmemleak_object *obj =
5600 + list_entry(n, struct kmemleak_object, object_list);
5601 + if (get_object(obj)) {
5602 + next_obj = obj;
5603 break;
5604 + }
5605 }
5606
5607 put_object(prev_obj);
5608 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5609 index 3f8bce2..e78b324 100644
5610 --- a/mm/page_alloc.c
5611 +++ b/mm/page_alloc.c
5612 @@ -2064,6 +2064,7 @@ restart:
5613 first_zones_zonelist(zonelist, high_zoneidx, NULL,
5614 &preferred_zone);
5615
5616 +rebalance:
5617 /* This is the last chance, in general, before the goto nopage. */
5618 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
5619 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
5620 @@ -2071,7 +2072,6 @@ restart:
5621 if (page)
5622 goto got_pg;
5623
5624 -rebalance:
5625 /* Allocate without watermarks if the context allows */
5626 if (alloc_flags & ALLOC_NO_WATERMARKS) {
5627 page = __alloc_pages_high_priority(gfp_mask, order,
5628 diff --git a/mm/shmem.c b/mm/shmem.c
5629 index dfc7069..ba12be4 100644
5630 --- a/mm/shmem.c
5631 +++ b/mm/shmem.c
5632 @@ -916,11 +916,12 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
5633 if (size > ENTRIES_PER_PAGE)
5634 size = ENTRIES_PER_PAGE;
5635 offset = shmem_find_swp(entry, ptr, ptr+size);
5636 + shmem_swp_unmap(ptr);
5637 if (offset >= 0) {
5638 shmem_dir_unmap(dir);
5639 + ptr = shmem_swp_map(subdir);
5640 goto found;
5641 }
5642 - shmem_swp_unmap(ptr);
5643 }
5644 }
5645 lost1:
5646 @@ -1100,8 +1101,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
5647 delete_from_page_cache(page);
5648 shmem_swp_set(info, entry, swap.val);
5649 shmem_swp_unmap(entry);
5650 - spin_unlock(&info->lock);
5651 swap_shmem_alloc(swap);
5652 + spin_unlock(&info->lock);
5653 BUG_ON(page_mapped(page));
5654 swap_writepage(page, wbc);
5655 return 0;
5656 diff --git a/mm/slub.c b/mm/slub.c
5657 index 9d2e5e4..f6cb6cd 100644
5658 --- a/mm/slub.c
5659 +++ b/mm/slub.c
5660 @@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
5661 return *(void **)(object + s->offset);
5662 }
5663
5664 +static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
5665 +{
5666 + void *p;
5667 +
5668 +#ifdef CONFIG_DEBUG_PAGEALLOC
5669 + probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
5670 +#else
5671 + p = get_freepointer(s, object);
5672 +#endif
5673 + return p;
5674 +}
5675 +
5676 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
5677 {
5678 *(void **)(object + s->offset) = fp;
5679 @@ -1943,7 +1955,7 @@ redo:
5680 if (unlikely(!irqsafe_cpu_cmpxchg_double(
5681 s->cpu_slab->freelist, s->cpu_slab->tid,
5682 object, tid,
5683 - get_freepointer(s, object), next_tid(tid)))) {
5684 + get_freepointer_safe(s, object), next_tid(tid)))) {
5685
5686 note_cmpxchg_failure("slab_alloc", s, tid);
5687 goto redo;
5688 diff --git a/mm/vmscan.c b/mm/vmscan.c
5689 index 8bfd450..cc1470b 100644
5690 --- a/mm/vmscan.c
5691 +++ b/mm/vmscan.c
5692 @@ -230,8 +230,11 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
5693 if (scanned == 0)
5694 scanned = SWAP_CLUSTER_MAX;
5695
5696 - if (!down_read_trylock(&shrinker_rwsem))
5697 - return 1; /* Assume we'll be able to shrink next time */
5698 + if (!down_read_trylock(&shrinker_rwsem)) {
5699 + /* Assume we'll be able to shrink next time */
5700 + ret = 1;
5701 + goto out;
5702 + }
5703
5704 list_for_each_entry(shrinker, &shrinker_list, list) {
5705 unsigned long long delta;
5706 @@ -282,6 +285,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
5707 shrinker->nr += total_scan;
5708 }
5709 up_read(&shrinker_rwsem);
5710 +out:
5711 + cond_resched();
5712 return ret;
5713 }
5714
5715 @@ -2286,7 +2291,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
5716 * must be balanced
5717 */
5718 if (order)
5719 - return pgdat_balanced(pgdat, balanced, classzone_idx);
5720 + return !pgdat_balanced(pgdat, balanced, classzone_idx);
5721 else
5722 return !all_zones_ok;
5723 }
5724 diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
5725 index f7fa67c..f49da58 100644
5726 --- a/net/atm/atm_sysfs.c
5727 +++ b/net/atm/atm_sysfs.c
5728 @@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
5729 return pos - buf;
5730 }
5731
5732 +static ssize_t show_atmindex(struct device *cdev,
5733 + struct device_attribute *attr, char *buf)
5734 +{
5735 + struct atm_dev *adev = to_atm_dev(cdev);
5736 +
5737 + return sprintf(buf, "%d\n", adev->number);
5738 +}
5739 +
5740 static ssize_t show_carrier(struct device *cdev,
5741 struct device_attribute *attr, char *buf)
5742 {
5743 @@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
5744
5745 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
5746 static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
5747 +static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
5748 static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
5749 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
5750 static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
5751 @@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
5752 static struct device_attribute *atm_attrs[] = {
5753 &dev_attr_atmaddress,
5754 &dev_attr_address,
5755 + &dev_attr_atmindex,
5756 &dev_attr_carrier,
5757 &dev_attr_type,
5758 &dev_attr_link_rate,
5759 diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
5760 index 74ef4d4..5f9c091 100644
5761 --- a/net/bridge/br_netfilter.c
5762 +++ b/net/bridge/br_netfilter.c
5763 @@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = {
5764 * ipt_REJECT needs it. Future netfilter modules might
5765 * require us to fill additional fields.
5766 */
5767 +static const u32 br_dst_default_metrics[RTAX_MAX] = {
5768 + [RTAX_MTU - 1] = 1500,
5769 +};
5770 +
5771 void br_netfilter_rtable_init(struct net_bridge *br)
5772 {
5773 struct rtable *rt = &br->fake_rtable;
5774 @@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
5775 atomic_set(&rt->dst.__refcnt, 1);
5776 rt->dst.dev = br->dev;
5777 rt->dst.path = &rt->dst;
5778 - dst_metric_set(&rt->dst, RTAX_MTU, 1500);
5779 + dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
5780 rt->dst.flags = DST_NOXFRM;
5781 rt->dst.ops = &fake_dst_ops;
5782 }
5783 diff --git a/net/core/dev.c b/net/core/dev.c
5784 index b624fe4..acd7423 100644
5785 --- a/net/core/dev.c
5786 +++ b/net/core/dev.c
5787 @@ -1007,7 +1007,7 @@ rollback:
5788 }
5789
5790 write_lock_bh(&dev_base_lock);
5791 - hlist_del(&dev->name_hlist);
5792 + hlist_del_rcu(&dev->name_hlist);
5793 write_unlock_bh(&dev_base_lock);
5794
5795 synchronize_rcu();
5796 @@ -5258,7 +5258,7 @@ void netdev_update_features(struct net_device *dev)
5797 if (dev->features == features)
5798 return;
5799
5800 - netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
5801 + netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5802 dev->features, features);
5803
5804 if (dev->netdev_ops->ndo_set_features)
5805 diff --git a/net/core/dst.c b/net/core/dst.c
5806 index 91104d3..b71b7a3 100644
5807 --- a/net/core/dst.c
5808 +++ b/net/core/dst.c
5809 @@ -314,7 +314,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
5810 {
5811 unsigned long prev, new;
5812
5813 - new = (unsigned long) dst_default_metrics;
5814 + new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
5815 prev = cmpxchg(&dst->_metrics, old, new);
5816 if (prev == old)
5817 kfree(__DST_METRICS_PTR(old));
5818 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
5819 index 74ead9e..f337525 100644
5820 --- a/net/core/ethtool.c
5821 +++ b/net/core/ethtool.c
5822 @@ -330,7 +330,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
5823 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
5824 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
5825 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
5826 - /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6",
5827 + /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
5828 /* NETIF_F_HIGHDMA */ "highdma",
5829 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
5830 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
5831 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5832 index 7ebeed0..3e934fe 100644
5833 --- a/net/core/skbuff.c
5834 +++ b/net/core/skbuff.c
5835 @@ -2993,6 +2993,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5836 skb->destructor = sock_rmem_free;
5837 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
5838
5839 + /* before exiting rcu section, make sure dst is refcounted */
5840 + skb_dst_force(skb);
5841 +
5842 skb_queue_tail(&sk->sk_error_queue, skb);
5843 if (!sock_flag(sk, SOCK_DEAD))
5844 sk->sk_data_ready(sk, skb->len);
5845 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
5846 index 1fd3d9c..57ca93a 100644
5847 --- a/net/ipv4/igmp.c
5848 +++ b/net/ipv4/igmp.c
5849 @@ -1169,20 +1169,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
5850
5851 if (!in_dev->dead) {
5852 if (IGMP_V1_SEEN(in_dev))
5853 - goto done;
5854 + return;
5855 if (IGMP_V2_SEEN(in_dev)) {
5856 if (reporter)
5857 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
5858 - goto done;
5859 + return;
5860 }
5861 /* IGMPv3 */
5862 igmpv3_add_delrec(in_dev, im);
5863
5864 igmp_ifc_event(in_dev);
5865 }
5866 -done:
5867 #endif
5868 - ip_mc_clear_src(im);
5869 }
5870
5871 static void igmp_group_added(struct ip_mc_list *im)
5872 @@ -1319,6 +1317,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
5873 *ip = i->next_rcu;
5874 in_dev->mc_count--;
5875 igmp_group_dropped(i);
5876 + ip_mc_clear_src(i);
5877
5878 if (!in_dev->dead)
5879 ip_rt_multicast_event(in_dev);
5880 @@ -1428,7 +1427,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
5881 in_dev->mc_list = i->next_rcu;
5882 in_dev->mc_count--;
5883
5884 - igmp_group_dropped(i);
5885 + /* We've dropped the groups in ip_mc_down already */
5886 + ip_mc_clear_src(i);
5887 ip_ma_put(i);
5888 }
5889 }
5890 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
5891 index 4404973..3740403 100644
5892 --- a/net/mac80211/cfg.c
5893 +++ b/net/mac80211/cfg.c
5894 @@ -228,11 +228,11 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
5895 goto out;
5896
5897 if (pairwise)
5898 - key = sta->ptk;
5899 + key = rcu_dereference(sta->ptk);
5900 else if (key_idx < NUM_DEFAULT_KEYS)
5901 - key = sta->gtk[key_idx];
5902 + key = rcu_dereference(sta->gtk[key_idx]);
5903 } else
5904 - key = sdata->keys[key_idx];
5905 + key = rcu_dereference(sdata->keys[key_idx]);
5906
5907 if (!key)
5908 goto out;
5909 @@ -921,8 +921,10 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
5910 static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
5911 struct mpath_info *pinfo)
5912 {
5913 - if (mpath->next_hop)
5914 - memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
5915 + struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop);
5916 +
5917 + if (next_hop_sta)
5918 + memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
5919 else
5920 memset(next_hop, 0, ETH_ALEN);
5921
5922 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5923 index 64d92d5..7ffcb55 100644
5924 --- a/net/mac80211/mlme.c
5925 +++ b/net/mac80211/mlme.c
5926 @@ -789,7 +789,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
5927 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5928 }
5929
5930 - netif_tx_start_all_queues(sdata->dev);
5931 + netif_tx_wake_all_queues(sdata->dev);
5932 }
5933
5934 void ieee80211_dynamic_ps_timer(unsigned long data)
5935 diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
5936 index 237cc19..cb5a285 100644
5937 --- a/net/netfilter/nf_conntrack_sip.c
5938 +++ b/net/netfilter/nf_conntrack_sip.c
5939 @@ -1419,6 +1419,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
5940 const char *dptr, *end;
5941 s16 diff, tdiff = 0;
5942 int ret = NF_ACCEPT;
5943 + bool term;
5944 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
5945
5946 if (ctinfo != IP_CT_ESTABLISHED &&
5947 @@ -1453,14 +1454,21 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
5948 if (dptr + matchoff == end)
5949 break;
5950
5951 - if (end + strlen("\r\n\r\n") > dptr + datalen)
5952 - break;
5953 - if (end[0] != '\r' || end[1] != '\n' ||
5954 - end[2] != '\r' || end[3] != '\n')
5955 + term = false;
5956 + for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
5957 + if (end[0] == '\r' && end[1] == '\n' &&
5958 + end[2] == '\r' && end[3] == '\n') {
5959 + term = true;
5960 + break;
5961 + }
5962 + }
5963 + if (!term)
5964 break;
5965 end += strlen("\r\n\r\n") + clen;
5966
5967 msglen = origlen = end - dptr;
5968 + if (msglen > datalen)
5969 + return NF_DROP;
5970
5971 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
5972 if (ret != NF_ACCEPT)
5973 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
5974 index c2e628d..6d96275 100644
5975 --- a/net/sched/sch_sfq.c
5976 +++ b/net/sched/sch_sfq.c
5977 @@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
5978 {
5979 struct sfq_sched_data *q = qdisc_priv(sch);
5980 unsigned int hash;
5981 - sfq_index x;
5982 + sfq_index x, qlen;
5983 struct sfq_slot *slot;
5984 int uninitialized_var(ret);
5985
5986 @@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
5987 if (++sch->q.qlen <= q->limit)
5988 return NET_XMIT_SUCCESS;
5989
5990 + qlen = slot->qlen;
5991 sfq_drop(sch);
5992 - return NET_XMIT_CN;
5993 -}
5994 -
5995 -static struct sk_buff *
5996 -sfq_peek(struct Qdisc *sch)
5997 -{
5998 - struct sfq_sched_data *q = qdisc_priv(sch);
5999 -
6000 - /* No active slots */
6001 - if (q->tail == NULL)
6002 - return NULL;
6003 -
6004 - return q->slots[q->tail->next].skblist_next;
6005 + /* Return Congestion Notification only if we dropped a packet
6006 + * from this flow.
6007 + */
6008 + return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
6009 }
6010
6011 static struct sk_buff *
6012 @@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
6013 .priv_size = sizeof(struct sfq_sched_data),
6014 .enqueue = sfq_enqueue,
6015 .dequeue = sfq_dequeue,
6016 - .peek = sfq_peek,
6017 + .peek = qdisc_peek_dequeued,
6018 .drop = sfq_drop,
6019 .init = sfq_init,
6020 .reset = sfq_reset,
6021 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
6022 index 1a21c57..525f97c 100644
6023 --- a/net/sctp/associola.c
6024 +++ b/net/sctp/associola.c
6025 @@ -64,6 +64,7 @@
6026 /* Forward declarations for internal functions. */
6027 static void sctp_assoc_bh_rcv(struct work_struct *work);
6028 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
6029 +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
6030
6031 /* Keep track of the new idr low so that we don't re-use association id
6032 * numbers too fast. It is protected by they idr spin lock is in the
6033 @@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc)
6034 /* Free any cached ASCONF_ACK chunk. */
6035 sctp_assoc_free_asconf_acks(asoc);
6036
6037 + /* Free the ASCONF queue. */
6038 + sctp_assoc_free_asconf_queue(asoc);
6039 +
6040 /* Free any cached ASCONF chunk. */
6041 if (asoc->addip_last_asconf)
6042 sctp_chunk_free(asoc->addip_last_asconf);
6043 @@ -1578,6 +1582,18 @@ retry:
6044 return error;
6045 }
6046
6047 +/* Free the ASCONF queue */
6048 +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
6049 +{
6050 + struct sctp_chunk *asconf;
6051 + struct sctp_chunk *tmp;
6052 +
6053 + list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
6054 + list_del_init(&asconf->list);
6055 + sctp_chunk_free(asconf);
6056 + }
6057 +}
6058 +
6059 /* Free asconf_ack cache */
6060 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
6061 {
6062 diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
6063 index faf71d1..6150ac5 100644
6064 --- a/net/sctp/bind_addr.c
6065 +++ b/net/sctp/bind_addr.c
6066 @@ -140,14 +140,12 @@ void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port)
6067 /* Dispose of the address list. */
6068 static void sctp_bind_addr_clean(struct sctp_bind_addr *bp)
6069 {
6070 - struct sctp_sockaddr_entry *addr;
6071 - struct list_head *pos, *temp;
6072 + struct sctp_sockaddr_entry *addr, *temp;
6073
6074 /* Empty the bind address list. */
6075 - list_for_each_safe(pos, temp, &bp->address_list) {
6076 - addr = list_entry(pos, struct sctp_sockaddr_entry, list);
6077 - list_del(pos);
6078 - kfree(addr);
6079 + list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
6080 + list_del_rcu(&addr->list);
6081 + call_rcu(&addr->rcu, sctp_local_addr_free);
6082 SCTP_DBG_OBJCNT_DEC(addr);
6083 }
6084 }
6085 diff --git a/net/socket.c b/net/socket.c
6086 index 310d16b..65b2310 100644
6087 --- a/net/socket.c
6088 +++ b/net/socket.c
6089 @@ -2122,14 +2122,16 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
6090 */
6091 if (MSG_CMSG_COMPAT & flags) {
6092 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
6093 - &msg_sys, flags, datagrams);
6094 + &msg_sys, flags & ~MSG_WAITFORONE,
6095 + datagrams);
6096 if (err < 0)
6097 break;
6098 err = __put_user(err, &compat_entry->msg_len);
6099 ++compat_entry;
6100 } else {
6101 err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
6102 - &msg_sys, flags, datagrams);
6103 + &msg_sys, flags & ~MSG_WAITFORONE,
6104 + datagrams);
6105 if (err < 0)
6106 break;
6107 err = put_user(err, &entry->msg_len);
6108 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
6109 index bf005d3..f34f5ab 100644
6110 --- a/net/sunrpc/xprtsock.c
6111 +++ b/net/sunrpc/xprtsock.c
6112 @@ -1344,7 +1344,6 @@ static void xs_tcp_state_change(struct sock *sk)
6113 case TCP_CLOSE_WAIT:
6114 /* The server initiated a shutdown of the socket */
6115 xprt_force_disconnect(xprt);
6116 - case TCP_SYN_SENT:
6117 xprt->connect_cookie++;
6118 case TCP_CLOSING:
6119 /*
6120 @@ -1758,6 +1757,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
6121 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
6122 {
6123 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
6124 + int ret = -ENOTCONN;
6125
6126 if (!transport->inet) {
6127 struct sock *sk = sock->sk;
6128 @@ -1789,12 +1789,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
6129 }
6130
6131 if (!xprt_bound(xprt))
6132 - return -ENOTCONN;
6133 + goto out;
6134
6135 /* Tell the socket layer to start connecting... */
6136 xprt->stat.connect_count++;
6137 xprt->stat.connect_start = jiffies;
6138 - return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
6139 + ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
6140 + switch (ret) {
6141 + case 0:
6142 + case -EINPROGRESS:
6143 + /* SYN_SENT! */
6144 + xprt->connect_cookie++;
6145 + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
6146 + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
6147 + }
6148 +out:
6149 + return ret;
6150 }
6151
6152 /**
6153 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
6154 index 4ebce42..2c70a1e 100644
6155 --- a/net/wireless/nl80211.c
6156 +++ b/net/wireless/nl80211.c
6157 @@ -1679,14 +1679,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
6158 if (err)
6159 goto out;
6160
6161 - if (!(rdev->wiphy.flags &
6162 - WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) {
6163 - if (!key.def_uni || !key.def_multi) {
6164 - err = -EOPNOTSUPP;
6165 - goto out;
6166 - }
6167 - }
6168 -
6169 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
6170 key.def_uni, key.def_multi);
6171
6172 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
6173 index ae3a698..ec1bcec 100644
6174 --- a/security/apparmor/lsm.c
6175 +++ b/security/apparmor/lsm.c
6176 @@ -593,7 +593,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
6177 sa.aad.op = OP_SETPROCATTR;
6178 sa.aad.info = name;
6179 sa.aad.error = -EINVAL;
6180 - return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
6181 + return aa_audit(AUDIT_APPARMOR_DENIED,
6182 + __aa_current_profile(), GFP_KERNEL,
6183 &sa, NULL);
6184 }
6185 } else if (strcmp(name, "exec") == 0) {
6186 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
6187 index 930634e..7a0c586 100644
6188 --- a/security/keys/process_keys.c
6189 +++ b/security/keys/process_keys.c
6190 @@ -845,6 +845,7 @@ void key_replace_session_keyring(void)
6191 new-> sgid = old-> sgid;
6192 new->fsgid = old->fsgid;
6193 new->user = get_uid(old->user);
6194 + new->user_ns = new->user->user_ns;
6195 new->group_info = get_group_info(old->group_info);
6196
6197 new->securebits = old->securebits;
6198 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6199 index 70a9d32..f5cad7c 100644
6200 --- a/sound/pci/hda/hda_intel.c
6201 +++ b/sound/pci/hda/hda_intel.c
6202 @@ -2349,9 +2349,16 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
6203 /* Check VIA/ATI HD Audio Controller exist */
6204 switch (chip->driver_type) {
6205 case AZX_DRIVER_VIA:
6206 - case AZX_DRIVER_ATI:
6207 /* Use link position directly, avoid any transfer problem. */
6208 return POS_FIX_VIACOMBO;
6209 + case AZX_DRIVER_ATI:
6210 + /* ATI chipsets don't work well with position-buffer */
6211 + return POS_FIX_LPIB;
6212 + case AZX_DRIVER_GENERIC:
6213 + /* AMD chipsets also don't work with position-buffer */
6214 + if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
6215 + return POS_FIX_LPIB;
6216 + break;
6217 }
6218
6219 return POS_FIX_AUTO;
6220 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
6221 index 2942d2a..9f886bf 100644
6222 --- a/sound/pci/hda/patch_analog.c
6223 +++ b/sound/pci/hda/patch_analog.c
6224 @@ -3070,6 +3070,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec)
6225
6226 for (i = 0; i < cfg->num_inputs; i++) {
6227 hda_nid_t nid = cfg->inputs[i].pin;
6228 + int type = cfg->inputs[i].type;
6229 switch (nid) {
6230 case 0x15: /* port-C */
6231 snd_hda_codec_write(codec, 0x33, 0, AC_VERB_SET_CONNECT_SEL, 0x0);
6232 @@ -3079,7 +3080,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec)
6233 break;
6234 }
6235 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
6236 - i == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN);
6237 + type == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN);
6238 if (nid != AD1988_PIN_CD_NID)
6239 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
6240 AMP_OUT_MUTE);
6241 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6242 index ad97d93..15b9d16 100644
6243 --- a/sound/pci/hda/patch_conexant.c
6244 +++ b/sound/pci/hda/patch_conexant.c
6245 @@ -3036,6 +3036,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
6246 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
6247 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
6248 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
6249 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
6250 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
6251 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
6252 {}
6253 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
6254 index 94d19c0..1e32235 100644
6255 --- a/sound/pci/hda/patch_sigmatel.c
6256 +++ b/sound/pci/hda/patch_sigmatel.c
6257 @@ -1600,7 +1600,7 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
6258 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
6259 "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
6260 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
6261 - "Dell Studio 1558", STAC_DELL_M6_BOTH),
6262 + "Dell Studio 1558", STAC_DELL_M6_DMIC),
6263 {} /* terminator */
6264 };
6265
6266 diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
6267 index 4005e9a..e55b298 100644
6268 --- a/sound/soc/codecs/wm_hubs.c
6269 +++ b/sound/soc/codecs/wm_hubs.c
6270 @@ -787,17 +787,17 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
6271 static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
6272 { "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
6273 { "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
6274 - { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
6275 + { "LINEOUT1 Mixer", "Output Switch", "Left Output PGA" },
6276
6277 { "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
6278 { "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
6279 };
6280
6281 static const struct snd_soc_dapm_route lineout1_se_routes[] = {
6282 - { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
6283 - { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
6284 + { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
6285 + { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
6286
6287 - { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
6288 + { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
6289
6290 { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
6291 { "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
6292 @@ -806,17 +806,17 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
6293 static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
6294 { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
6295 { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
6296 - { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
6297 + { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
6298
6299 { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
6300 { "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
6301 };
6302
6303 static const struct snd_soc_dapm_route lineout2_se_routes[] = {
6304 - { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
6305 - { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
6306 + { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
6307 + { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
6308
6309 - { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
6310 + { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
6311
6312 { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
6313 { "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
6314 @@ -836,17 +836,21 @@ int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
6315 snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
6316 WM8993_IN2_VU, WM8993_IN2_VU);
6317
6318 + snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_LEFT,
6319 + WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
6320 snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
6321 WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
6322
6323 snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
6324 - WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
6325 + WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC,
6326 + WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC);
6327 snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
6328 WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
6329 WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
6330
6331 snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
6332 - WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
6333 + WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU,
6334 + WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU);
6335 snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
6336 WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
6337 WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);
6338 diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
6339 index 2afabaf..1a591f1 100644
6340 --- a/sound/soc/pxa/raumfeld.c
6341 +++ b/sound/soc/pxa/raumfeld.c
6342 @@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
6343 .hw_params = raumfeld_cs4270_hw_params,
6344 };
6345
6346 -static int raumfeld_line_suspend(struct snd_soc_card *card)
6347 +static int raumfeld_analog_suspend(struct snd_soc_card *card)
6348 {
6349 raumfeld_enable_audio(false);
6350 return 0;
6351 }
6352
6353 -static int raumfeld_line_resume(struct snd_soc_card *card)
6354 +static int raumfeld_analog_resume(struct snd_soc_card *card)
6355 {
6356 raumfeld_enable_audio(true);
6357 return 0;
6358 @@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
6359 .hw_params = raumfeld_ak4104_hw_params,
6360 };
6361
6362 -static struct snd_soc_dai_link raumfeld_dai[] = {
6363 +#define DAI_LINK_CS4270 \
6364 +{ \
6365 + .name = "CS4270", \
6366 + .stream_name = "CS4270", \
6367 + .cpu_dai_name = "pxa-ssp-dai.0", \
6368 + .platform_name = "pxa-pcm-audio", \
6369 + .codec_dai_name = "cs4270-hifi", \
6370 + .codec_name = "cs4270-codec.0-0048", \
6371 + .ops = &raumfeld_cs4270_ops, \
6372 +}
6373 +
6374 +#define DAI_LINK_AK4104 \
6375 +{ \
6376 + .name = "ak4104", \
6377 + .stream_name = "Playback", \
6378 + .cpu_dai_name = "pxa-ssp-dai.1", \
6379 + .codec_dai_name = "ak4104-hifi", \
6380 + .platform_name = "pxa-pcm-audio", \
6381 + .ops = &raumfeld_ak4104_ops, \
6382 + .codec_name = "spi0.0", \
6383 +}
6384 +
6385 +static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
6386 {
6387 - .name = "ak4104",
6388 - .stream_name = "Playback",
6389 - .cpu_dai_name = "pxa-ssp-dai.1",
6390 - .codec_dai_name = "ak4104-hifi",
6391 - .platform_name = "pxa-pcm-audio",
6392 - .ops = &raumfeld_ak4104_ops,
6393 - .codec_name = "ak4104-codec.0",
6394 -},
6395 + DAI_LINK_CS4270,
6396 + DAI_LINK_AK4104,
6397 +};
6398 +
6399 +static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
6400 {
6401 - .name = "CS4270",
6402 - .stream_name = "CS4270",
6403 - .cpu_dai_name = "pxa-ssp-dai.0",
6404 - .platform_name = "pxa-pcm-audio",
6405 - .codec_dai_name = "cs4270-hifi",
6406 - .codec_name = "cs4270-codec.0-0048",
6407 - .ops = &raumfeld_cs4270_ops,
6408 -},};
6409 -
6410 -static struct snd_soc_card snd_soc_raumfeld = {
6411 - .name = "Raumfeld",
6412 - .dai_link = raumfeld_dai,
6413 - .suspend_post = raumfeld_line_suspend,
6414 - .resume_pre = raumfeld_line_resume,
6415 - .num_links = ARRAY_SIZE(raumfeld_dai),
6416 + DAI_LINK_CS4270,
6417 +};
6418 +
6419 +static struct snd_soc_card snd_soc_raumfeld_connector = {
6420 + .name = "Raumfeld Connector",
6421 + .dai_link = snd_soc_raumfeld_connector_dai,
6422 + .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
6423 + .suspend_post = raumfeld_analog_suspend,
6424 + .resume_pre = raumfeld_analog_resume,
6425 +};
6426 +
6427 +static struct snd_soc_card snd_soc_raumfeld_speaker = {
6428 + .name = "Raumfeld Speaker",
6429 + .dai_link = snd_soc_raumfeld_speaker_dai,
6430 + .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
6431 + .suspend_post = raumfeld_analog_suspend,
6432 + .resume_pre = raumfeld_analog_resume,
6433 };
6434
6435 static struct platform_device *raumfeld_audio_device;
6436 @@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
6437
6438 set_max9485_clk(MAX9485_MCLK_FREQ_122880);
6439
6440 - /* Register LINE and SPDIF */
6441 + /* Register analog device */
6442 raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
6443 if (!raumfeld_audio_device)
6444 return -ENOMEM;
6445
6446 - platform_set_drvdata(raumfeld_audio_device,
6447 - &snd_soc_raumfeld);
6448 - ret = platform_device_add(raumfeld_audio_device);
6449 -
6450 - /* no S/PDIF on Speakers */
6451 if (machine_is_raumfeld_speaker())
6452 + platform_set_drvdata(raumfeld_audio_device,
6453 + &snd_soc_raumfeld_speaker);
6454 +
6455 + if (machine_is_raumfeld_connector())
6456 + platform_set_drvdata(raumfeld_audio_device,
6457 + &snd_soc_raumfeld_connector);
6458 +
6459 + ret = platform_device_add(raumfeld_audio_device);
6460 + if (ret < 0)
6461 return ret;
6462
6463 raumfeld_enable_audio(true);
6464 -
6465 - return ret;
6466 + return 0;
6467 }
6468
6469 static void __exit raumfeld_audio_exit(void)