Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.7/0102-3.7.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2049 - (show annotations) (download)
Mon Jan 28 09:30:34 2013 UTC (11 years, 3 months ago) by niro
File size: 329182 byte(s)
-fixed patch
1 diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
2 index 4abe83e..03591a7 100644
3 --- a/Documentation/power/runtime_pm.txt
4 +++ b/Documentation/power/runtime_pm.txt
5 @@ -642,12 +642,13 @@ out the following operations:
6 * During system suspend it calls pm_runtime_get_noresume() and
7 pm_runtime_barrier() for every device right before executing the
8 subsystem-level .suspend() callback for it. In addition to that it calls
9 - pm_runtime_disable() for every device right after executing the
10 - subsystem-level .suspend() callback for it.
11 + __pm_runtime_disable() with 'false' as the second argument for every device
12 + right before executing the subsystem-level .suspend_late() callback for it.
13
14 * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
15 - for every device right before and right after executing the subsystem-level
16 - .resume() callback for it, respectively.
17 + for every device right after executing the subsystem-level .resume_early()
18 + callback and right after executing the subsystem-level .resume() callback
19 + for it, respectively.
20
21 7. Generic subsystem callbacks
22
23 diff --git a/Makefile b/Makefile
24 index 23807aa..51a9bda 100644
25 --- a/Makefile
26 +++ b/Makefile
27 @@ -1021,11 +1021,14 @@ clean: rm-dirs := $(CLEAN_DIRS)
28 clean: rm-files := $(CLEAN_FILES)
29 clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
30
31 -PHONY += $(clean-dirs) clean archclean
32 +PHONY += $(clean-dirs) clean archclean vmlinuxclean
33 $(clean-dirs):
34 $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
35
36 -clean: archclean
37 +vmlinuxclean:
38 + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
39 +
40 +clean: archclean vmlinuxclean
41
42 # mrproper - Delete all generated files, including .config
43 #
44 @@ -1252,7 +1255,6 @@ scripts: ;
45 endif # KBUILD_EXTMOD
46
47 clean: $(clean-dirs)
48 - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
49 $(call cmd,rmdirs)
50 $(call cmd,rmfiles)
51 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
52 diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
53 index 96cd369..09e1790 100644
54 --- a/arch/arm/mach-omap2/board-3430sdp.c
55 +++ b/arch/arm/mach-omap2/board-3430sdp.c
56 @@ -157,6 +157,7 @@ static struct omap_dss_device sdp3430_lcd_device = {
57
58 static struct tfp410_platform_data dvi_panel = {
59 .power_down_gpio = -1,
60 + .i2c_bus_num = -1,
61 };
62
63 static struct omap_dss_device sdp3430_dvi_device = {
64 diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
65 index e162897..f2a920a 100644
66 --- a/arch/arm/mach-omap2/board-am3517evm.c
67 +++ b/arch/arm/mach-omap2/board-am3517evm.c
68 @@ -208,6 +208,7 @@ static struct omap_dss_device am3517_evm_tv_device = {
69
70 static struct tfp410_platform_data dvi_panel = {
71 .power_down_gpio = -1,
72 + .i2c_bus_num = -1,
73 };
74
75 static struct omap_dss_device am3517_evm_dvi_device = {
76 diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
77 index 376d26e..7ed0270 100644
78 --- a/arch/arm/mach-omap2/board-cm-t35.c
79 +++ b/arch/arm/mach-omap2/board-cm-t35.c
80 @@ -243,6 +243,7 @@ static struct omap_dss_device cm_t35_lcd_device = {
81
82 static struct tfp410_platform_data dvi_panel = {
83 .power_down_gpio = CM_T35_DVI_EN_GPIO,
84 + .i2c_bus_num = -1,
85 };
86
87 static struct omap_dss_device cm_t35_dvi_device = {
88 diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
89 index 1fd161e..6f04f0f 100644
90 --- a/arch/arm/mach-omap2/board-devkit8000.c
91 +++ b/arch/arm/mach-omap2/board-devkit8000.c
92 @@ -139,6 +139,7 @@ static struct omap_dss_device devkit8000_lcd_device = {
93
94 static struct tfp410_platform_data dvi_panel = {
95 .power_down_gpio = -1,
96 + .i2c_bus_num = 1,
97 };
98
99 static struct omap_dss_device devkit8000_dvi_device = {
100 diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
101 index b9b776b..5631eb9 100644
102 --- a/arch/arm/mach-omap2/board-omap3evm.c
103 +++ b/arch/arm/mach-omap2/board-omap3evm.c
104 @@ -236,6 +236,7 @@ static struct omap_dss_device omap3_evm_tv_device = {
105
106 static struct tfp410_platform_data dvi_panel = {
107 .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
108 + .i2c_bus_num = -1,
109 };
110
111 static struct omap_dss_device omap3_evm_dvi_device = {
112 diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
113 index 731235e..797be22 100644
114 --- a/arch/arm/mach-omap2/board-omap3stalker.c
115 +++ b/arch/arm/mach-omap2/board-omap3stalker.c
116 @@ -119,6 +119,7 @@ static struct omap_dss_device omap3_stalker_tv_device = {
117
118 static struct tfp410_platform_data dvi_panel = {
119 .power_down_gpio = DSS_ENABLE_GPIO,
120 + .i2c_bus_num = -1,
121 };
122
123 static struct omap_dss_device omap3_stalker_dvi_device = {
124 diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
125 index a611ad3..b6132aa 100644
126 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
127 +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
128 @@ -463,6 +463,9 @@
129 GPIO76_LCD_PCLK, \
130 GPIO77_LCD_BIAS
131
132 +/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
133 +#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
134 +#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
135
136 extern int keypad_set_wake(unsigned int on);
137 #endif /* __ASM_ARCH_MFP_PXA27X_H */
138 diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
139 index 8047ee0..616cb87 100644
140 --- a/arch/arm/mach-pxa/pxa27x.c
141 +++ b/arch/arm/mach-pxa/pxa27x.c
142 @@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
143 EXPORT_SYMBOL(pxa27x_clear_otgph);
144
145 static unsigned long ac97_reset_config[] = {
146 - GPIO113_GPIO,
147 + GPIO113_AC97_nRESET_GPIO_HIGH,
148 GPIO113_AC97_nRESET,
149 - GPIO95_GPIO,
150 + GPIO95_AC97_nRESET_GPIO_HIGH,
151 GPIO95_AC97_nRESET,
152 };
153
154 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
155 index e9a5fd7..69b17a9 100644
156 --- a/arch/mips/kernel/process.c
157 +++ b/arch/mips/kernel/process.c
158 @@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
159 }
160 }
161 #ifdef CONFIG_HOTPLUG_CPU
162 - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
163 - (system_state == SYSTEM_RUNNING ||
164 - system_state == SYSTEM_BOOTING))
165 + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
166 play_dead();
167 #endif
168 rcu_idle_exit();
169 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
170 index 2833dcb..e7c383b 100644
171 --- a/arch/mips/mm/tlbex.c
172 +++ b/arch/mips/mm/tlbex.c
173 @@ -952,13 +952,6 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
174 #endif
175 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
176 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
177 -
178 - if (cpu_has_mips_r2) {
179 - uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
180 - uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
181 - return;
182 - }
183 -
184 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
185 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
186 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
187 @@ -994,15 +987,6 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
188
189 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
190 {
191 - if (cpu_has_mips_r2) {
192 - /* PTE ptr offset is obtained from BadVAddr */
193 - UASM_i_MFC0(p, tmp, C0_BADVADDR);
194 - UASM_i_LW(p, ptr, 0, ptr);
195 - uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1);
196 - uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1);
197 - return;
198 - }
199 -
200 /*
201 * Bug workaround for the Nevada. It seems as if under certain
202 * circumstances the move from cp0_context might produce a
203 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
204 index 58bddee..9e07bd0 100644
205 --- a/arch/powerpc/kernel/head_64.S
206 +++ b/arch/powerpc/kernel/head_64.S
207 @@ -422,7 +422,7 @@ _STATIC(__after_prom_start)
208 tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
209 #endif
210
211 -#ifdef CONFIG_CRASH_DUMP
212 +#ifdef CONFIG_RELOCATABLE
213 /*
214 * Check if the kernel has to be running as relocatable kernel based on the
215 * variable __run_at_load, if it is set the kernel is treated as relocatable
216 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
217 index ce4cb77..ba48a88 100644
218 --- a/arch/powerpc/kernel/time.c
219 +++ b/arch/powerpc/kernel/time.c
220 @@ -774,13 +774,8 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
221
222 void update_vsyscall_tz(void)
223 {
224 - /* Make userspace gettimeofday spin until we're done. */
225 - ++vdso_data->tb_update_count;
226 - smp_mb();
227 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
228 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
229 - smp_mb();
230 - ++vdso_data->tb_update_count;
231 }
232
233 static void __init clocksource_init(void)
234 diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
235 index c8c6157..c39cd0b 100644
236 --- a/arch/powerpc/kvm/44x_emulate.c
237 +++ b/arch/powerpc/kvm/44x_emulate.c
238 @@ -76,6 +76,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
239 run->dcr.dcrn = dcrn;
240 run->dcr.data = 0;
241 run->dcr.is_write = 0;
242 + vcpu->arch.dcr_is_write = 0;
243 vcpu->arch.io_gpr = rt;
244 vcpu->arch.dcr_needed = 1;
245 kvmppc_account_exit(vcpu, DCR_EXITS);
246 @@ -94,6 +95,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
247 run->dcr.dcrn = dcrn;
248 run->dcr.data = kvmppc_get_gpr(vcpu, rs);
249 run->dcr.is_write = 1;
250 + vcpu->arch.dcr_is_write = 1;
251 vcpu->arch.dcr_needed = 1;
252 kvmppc_account_exit(vcpu, DCR_EXITS);
253 emulated = EMULATE_DO_DCR;
254 diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
255 index ff38b66..ea30a90 100644
256 --- a/arch/powerpc/kvm/e500_tlb.c
257 +++ b/arch/powerpc/kvm/e500_tlb.c
258 @@ -1332,7 +1332,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
259 if (!vcpu_e500->gtlb_priv[1])
260 goto err;
261
262 - vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
263 + vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
264 vcpu_e500->gtlb_params[1].entries,
265 GFP_KERNEL);
266 if (!vcpu_e500->g2h_tlb1_map)
267 diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
268 index 969dddc..8f3920e 100644
269 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c
270 +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
271 @@ -57,7 +57,8 @@ static const char * const board[] __initconst = {
272 "amcc,makalu",
273 "apm,klondike",
274 "est,hotfoot",
275 - "plathome,obs600"
276 + "plathome,obs600",
277 + NULL
278 };
279
280 static int __init ppc40x_probe(void)
281 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
282 index 07d8de3..19b6080 100644
283 --- a/arch/s390/kernel/entry64.S
284 +++ b/arch/s390/kernel/entry64.S
285 @@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
286 #endif
287 .endm
288
289 - .macro HANDLE_SIE_INTERCEPT scratch
290 + .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
291 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
292 tmhh %r8,0x0001 # interrupting from user ?
293 jnz .+42
294 lgr \scratch,%r9
295 slg \scratch,BASED(.Lsie_loop)
296 clg \scratch,BASED(.Lsie_length)
297 + .if \pgmcheck
298 + # Some program interrupts are suppressing (e.g. protection).
299 + # We must also check the instruction after SIE in that case.
300 + # do_protection_exception will rewind to rewind_pad
301 + jh .+22
302 + .else
303 jhe .+22
304 + .endif
305 lg %r9,BASED(.Lsie_loop)
306 SPP BASED(.Lhost_id) # set host id
307 #endif
308 @@ -391,7 +398,7 @@ ENTRY(pgm_check_handler)
309 lg %r12,__LC_THREAD_INFO
310 larl %r13,system_call
311 lmg %r8,%r9,__LC_PGM_OLD_PSW
312 - HANDLE_SIE_INTERCEPT %r14
313 + HANDLE_SIE_INTERCEPT %r14,1
314 tmhh %r8,0x0001 # test problem state bit
315 jnz 1f # -> fault in user space
316 tmhh %r8,0x4000 # PER bit set in old PSW ?
317 @@ -467,7 +474,7 @@ ENTRY(io_int_handler)
318 lg %r12,__LC_THREAD_INFO
319 larl %r13,system_call
320 lmg %r8,%r9,__LC_IO_OLD_PSW
321 - HANDLE_SIE_INTERCEPT %r14
322 + HANDLE_SIE_INTERCEPT %r14,0
323 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
324 tmhh %r8,0x0001 # interrupting from user?
325 jz io_skip
326 @@ -613,7 +620,7 @@ ENTRY(ext_int_handler)
327 lg %r12,__LC_THREAD_INFO
328 larl %r13,system_call
329 lmg %r8,%r9,__LC_EXT_OLD_PSW
330 - HANDLE_SIE_INTERCEPT %r14
331 + HANDLE_SIE_INTERCEPT %r14,0
332 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
333 tmhh %r8,0x0001 # interrupting from user ?
334 jz ext_skip
335 @@ -661,7 +668,7 @@ ENTRY(mcck_int_handler)
336 lg %r12,__LC_THREAD_INFO
337 larl %r13,system_call
338 lmg %r8,%r9,__LC_MCK_OLD_PSW
339 - HANDLE_SIE_INTERCEPT %r14
340 + HANDLE_SIE_INTERCEPT %r14,0
341 tm __LC_MCCK_CODE,0x80 # system damage?
342 jo mcck_panic # yes -> rest of mcck code invalid
343 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
344 @@ -960,6 +967,13 @@ ENTRY(sie64a)
345 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
346 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
347 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
348 +# some program checks are suppressing. C code (e.g. do_protection_exception)
349 +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
350 +# instructions in the sie_loop should not cause program interrupts. So
351 +# lets use a nop (47 00 00 00) as a landing pad.
352 +# See also HANDLE_SIE_INTERCEPT
353 +rewind_pad:
354 + nop 0
355 sie_loop:
356 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
357 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
358 @@ -999,6 +1013,7 @@ sie_fault:
359 .Lhost_id:
360 .quad 0
361
362 + EX_TABLE(rewind_pad,sie_fault)
363 EX_TABLE(sie_loop,sie_fault)
364 #endif
365
366 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
367 index ecced9d..38883f0 100644
368 --- a/arch/s390/kvm/kvm-s390.c
369 +++ b/arch/s390/kvm/kvm-s390.c
370 @@ -997,7 +997,7 @@ static int __init kvm_s390_init(void)
371 }
372 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
373 facilities[0] &= 0xff00fff3f47c0000ULL;
374 - facilities[1] &= 0x201c000000000000ULL;
375 + facilities[1] &= 0x001c000000000000ULL;
376 return 0;
377 }
378
379 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
380 index c441834..1b888e8 100644
381 --- a/drivers/acpi/scan.c
382 +++ b/drivers/acpi/scan.c
383 @@ -859,8 +859,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
384 static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
385 {
386 struct acpi_device_id button_device_ids[] = {
387 - {"PNP0C0D", 0},
388 {"PNP0C0C", 0},
389 + {"PNP0C0D", 0},
390 {"PNP0C0E", 0},
391 {"", 0},
392 };
393 @@ -872,6 +872,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
394 /* Power button, Lid switch always enable wakeup */
395 if (!acpi_match_device_ids(device, button_device_ids)) {
396 device->wakeup.flags.run_wake = 1;
397 + if (!acpi_match_device_ids(device, &button_device_ids[1])) {
398 + /* Do not use Lid/sleep button for S5 wakeup */
399 + if (device->wakeup.sleep_state == ACPI_STATE_S5)
400 + device->wakeup.sleep_state = ACPI_STATE_S4;
401 + }
402 device_set_wakeup_capable(&device->dev, true);
403 return;
404 }
405 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
406 index f46fbd3..586362e 100644
407 --- a/drivers/ata/libata-core.c
408 +++ b/drivers/ata/libata-core.c
409 @@ -2560,6 +2560,7 @@ int ata_bus_probe(struct ata_port *ap)
410 * bus as we may be talking too fast.
411 */
412 dev->pio_mode = XFER_PIO_0;
413 + dev->dma_mode = 0xff;
414
415 /* If the controller has a pio mode setup function
416 * then use it to set the chipset to rights. Don't
417 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
418 index e60437c..bf039b0 100644
419 --- a/drivers/ata/libata-eh.c
420 +++ b/drivers/ata/libata-eh.c
421 @@ -2657,6 +2657,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
422 * bus as we may be talking too fast.
423 */
424 dev->pio_mode = XFER_PIO_0;
425 + dev->dma_mode = 0xff;
426
427 /* If the controller has a pio mode setup function
428 * then use it to set the chipset to rights. Don't
429 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
430 index a6df6a3..7c337e7 100644
431 --- a/drivers/ata/libata-scsi.c
432 +++ b/drivers/ata/libata-scsi.c
433 @@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
434 struct ata_port *ap = ata_shost_to_port(sdev->host);
435 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
436
437 - if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
438 + if (atadev && ap->ops->sw_activity_show &&
439 + (ap->flags & ATA_FLAG_SW_ACTIVITY))
440 return ap->ops->sw_activity_show(atadev, buf);
441 return -EINVAL;
442 }
443 @@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
444 enum sw_activity val;
445 int rc;
446
447 - if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
448 + if (atadev && ap->ops->sw_activity_store &&
449 + (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
450 val = simple_strtoul(buf, NULL, 0);
451 switch (val) {
452 case OFF: case BLINK_ON: case BLINK_OFF:
453 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
454 index 489c817..fb0dd87 100644
455 --- a/drivers/ata/sata_promise.c
456 +++ b/drivers/ata/sata_promise.c
457 @@ -147,6 +147,10 @@ struct pdc_port_priv {
458 dma_addr_t pkt_dma;
459 };
460
461 +struct pdc_host_priv {
462 + spinlock_t hard_reset_lock;
463 +};
464 +
465 static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
466 static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
467 static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
468 @@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
469 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
470 void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
471 unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
472 + struct pdc_host_priv *hpriv = ap->host->private_data;
473 u8 tmp;
474
475 - spin_lock(&ap->host->lock);
476 + spin_lock(&hpriv->hard_reset_lock);
477
478 tmp = readb(pcictl_b1_mmio);
479 tmp &= ~(0x10 << ata_no);
480 @@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
481 writeb(tmp, pcictl_b1_mmio);
482 readb(pcictl_b1_mmio); /* flush */
483
484 - spin_unlock(&ap->host->lock);
485 + spin_unlock(&hpriv->hard_reset_lock);
486 }
487
488 static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
489 @@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
490 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
491 const struct ata_port_info *ppi[PDC_MAX_PORTS];
492 struct ata_host *host;
493 + struct pdc_host_priv *hpriv;
494 void __iomem *host_mmio;
495 int n_ports, i, rc;
496 int is_sataii_tx4;
497 @@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
498 dev_err(&pdev->dev, "failed to allocate host\n");
499 return -ENOMEM;
500 }
501 + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
502 + if (!hpriv)
503 + return -ENOMEM;
504 + spin_lock_init(&hpriv->hard_reset_lock);
505 + host->private_data = hpriv;
506 host->iomap = pcim_iomap_table(pdev);
507
508 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
509 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
510 index a3c1404..2b7f77d 100644
511 --- a/drivers/base/power/main.c
512 +++ b/drivers/base/power/main.c
513 @@ -513,6 +513,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
514
515 Out:
516 TRACE_RESUME(error);
517 +
518 + pm_runtime_enable(dev);
519 return error;
520 }
521
522 @@ -589,8 +591,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
523 if (!dev->power.is_suspended)
524 goto Unlock;
525
526 - pm_runtime_enable(dev);
527 -
528 if (dev->pm_domain) {
529 info = "power domain ";
530 callback = pm_op(&dev->pm_domain->ops, state);
531 @@ -930,6 +930,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
532 pm_callback_t callback = NULL;
533 char *info = NULL;
534
535 + __pm_runtime_disable(dev, false);
536 +
537 if (dev->power.syscore)
538 return 0;
539
540 @@ -1133,11 +1135,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
541
542 Complete:
543 complete_all(&dev->power.completion);
544 -
545 if (error)
546 async_error = error;
547 - else if (dev->power.is_suspended)
548 - __pm_runtime_disable(dev, false);
549
550 return error;
551 }
552 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
553 index bb1ff17..c394041 100644
554 --- a/drivers/base/regmap/regmap-debugfs.c
555 +++ b/drivers/base/regmap/regmap-debugfs.c
556 @@ -90,7 +90,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
557 /* If we're in the region the user is trying to read */
558 if (p >= *ppos) {
559 /* ...but not beyond it */
560 - if (buf_pos >= count - 1 - tot_len)
561 + if (buf_pos + 1 + tot_len >= count)
562 break;
563
564 /* Format the register */
565 diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
566 index cc65b45..b4e83b8 100644
567 --- a/drivers/bcma/driver_mips.c
568 +++ b/drivers/bcma/driver_mips.c
569 @@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
570 bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
571 ~(1 << irqflag));
572 else
573 - bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
574 + bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
575
576 /* assign the new one */
577 if (irq == 0) {
578 diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
579 index 00dfc50..047baf0 100644
580 --- a/drivers/block/aoe/aoeblk.c
581 +++ b/drivers/block/aoe/aoeblk.c
582 @@ -231,18 +231,12 @@ aoeblk_gdalloc(void *vp)
583 if (q == NULL) {
584 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
585 d->aoemajor, d->aoeminor);
586 - mempool_destroy(mp);
587 - goto err_disk;
588 + goto err_mempool;
589 }
590
591 - d->blkq = blk_alloc_queue(GFP_KERNEL);
592 - if (!d->blkq)
593 - goto err_mempool;
594 - d->blkq->backing_dev_info.name = "aoe";
595 - if (bdi_init(&d->blkq->backing_dev_info))
596 - goto err_blkq;
597 spin_lock_irqsave(&d->lock, flags);
598 - blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS);
599 + blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
600 + q->backing_dev_info.name = "aoe";
601 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
602 d->bufpool = mp;
603 d->blkq = gd->queue = q;
604 @@ -265,11 +259,8 @@ aoeblk_gdalloc(void *vp)
605 aoedisk_add_sysfs(d);
606 return;
607
608 -err_blkq:
609 - blk_cleanup_queue(d->blkq);
610 - d->blkq = NULL;
611 err_mempool:
612 - mempool_destroy(d->bufpool);
613 + mempool_destroy(mp);
614 err_disk:
615 put_disk(gd);
616 err:
617 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
618 index bb3d9be..67de124 100644
619 --- a/drivers/block/rbd.c
620 +++ b/drivers/block/rbd.c
621 @@ -61,7 +61,10 @@
622
623 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
624
625 -#define RBD_MAX_SNAP_NAME_LEN 32
626 +#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
627 +#define RBD_MAX_SNAP_NAME_LEN \
628 + (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
629 +
630 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
631 #define RBD_MAX_OPT_LEN 1024
632
633 @@ -204,6 +207,7 @@ struct rbd_device {
634
635 /* sysfs related */
636 struct device dev;
637 + unsigned long open_count;
638 };
639
640 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
641 @@ -218,7 +222,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
642 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
643
644 static void rbd_dev_release(struct device *dev);
645 -static void __rbd_remove_snap_dev(struct rbd_snap *snap);
646 +static void rbd_remove_snap_dev(struct rbd_snap *snap);
647
648 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
649 size_t count);
650 @@ -277,8 +281,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
651 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
652 return -EROFS;
653
654 + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
655 rbd_get_dev(rbd_dev);
656 set_device_ro(bdev, rbd_dev->mapping.read_only);
657 + rbd_dev->open_count++;
658 + mutex_unlock(&ctl_mutex);
659
660 return 0;
661 }
662 @@ -287,7 +294,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
663 {
664 struct rbd_device *rbd_dev = disk->private_data;
665
666 + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
667 + rbd_assert(rbd_dev->open_count > 0);
668 + rbd_dev->open_count--;
669 rbd_put_dev(rbd_dev);
670 + mutex_unlock(&ctl_mutex);
671
672 return 0;
673 }
674 @@ -388,7 +399,7 @@ enum {
675 static match_table_t rbd_opts_tokens = {
676 /* int args above */
677 /* string args above */
678 - {Opt_read_only, "mapping.read_only"},
679 + {Opt_read_only, "read_only"},
680 {Opt_read_only, "ro"}, /* Alternate spelling */
681 {Opt_read_write, "read_write"},
682 {Opt_read_write, "rw"}, /* Alternate spelling */
683 @@ -695,13 +706,13 @@ static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
684 u64 segment;
685 int ret;
686
687 - name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
688 + name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
689 if (!name)
690 return NULL;
691 segment = offset >> rbd_dev->header.obj_order;
692 - ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
693 + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
694 rbd_dev->header.object_prefix, segment);
695 - if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
696 + if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
697 pr_err("error formatting segment name for #%llu (%d)\n",
698 segment, ret);
699 kfree(name);
700 @@ -1707,13 +1718,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
701 return ret;
702 }
703
704 -static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
705 +static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
706 {
707 struct rbd_snap *snap;
708 struct rbd_snap *next;
709
710 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
711 - __rbd_remove_snap_dev(snap);
712 + rbd_remove_snap_dev(snap);
713 }
714
715 /*
716 @@ -2057,7 +2068,7 @@ static bool rbd_snap_registered(struct rbd_snap *snap)
717 return ret;
718 }
719
720 -static void __rbd_remove_snap_dev(struct rbd_snap *snap)
721 +static void rbd_remove_snap_dev(struct rbd_snap *snap)
722 {
723 list_del(&snap->node);
724 if (device_is_registered(&snap->dev))
725 @@ -2073,7 +2084,7 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
726 dev->type = &rbd_snap_device_type;
727 dev->parent = parent;
728 dev->release = rbd_snap_dev_release;
729 - dev_set_name(dev, "snap_%s", snap->name);
730 + dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
731 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
732
733 ret = device_register(dev);
734 @@ -2189,6 +2200,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
735 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
736 if (ret < 0)
737 goto out;
738 + ret = 0; /* rbd_req_sync_exec() can return positive */
739
740 p = reply_buf;
741 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
742 @@ -2438,7 +2450,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
743
744 if (rbd_dev->mapping.snap_id == snap->id)
745 rbd_dev->mapping.snap_exists = false;
746 - __rbd_remove_snap_dev(snap);
747 + rbd_remove_snap_dev(snap);
748 dout("%ssnap id %llu has been removed\n",
749 rbd_dev->mapping.snap_id == snap->id ?
750 "mapped " : "",
751 @@ -2621,8 +2633,8 @@ static void rbd_dev_id_put(struct rbd_device *rbd_dev)
752 struct rbd_device *rbd_dev;
753
754 rbd_dev = list_entry(tmp, struct rbd_device, node);
755 - if (rbd_id > max_id)
756 - max_id = rbd_id;
757 + if (rbd_dev->dev_id > max_id)
758 + max_id = rbd_dev->dev_id;
759 }
760 spin_unlock(&rbd_dev_list_lock);
761
762 @@ -2765,8 +2777,13 @@ static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
763 if (!rbd_dev->image_name)
764 goto out_err;
765
766 - /* Snapshot name is optional */
767 + /* Snapshot name is optional; default is to use "head" */
768 +
769 len = next_token(&buf);
770 + if (len > RBD_MAX_SNAP_NAME_LEN) {
771 + err_ptr = ERR_PTR(-ENAMETOOLONG);
772 + goto out_err;
773 + }
774 if (!len) {
775 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
776 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
777 @@ -2777,8 +2794,6 @@ static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
778 memcpy(snap_name, buf, len);
779 *(snap_name + len) = '\0';
780
781 -dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
782 -
783 return snap_name;
784
785 out_err:
786 @@ -2841,6 +2856,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
787 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
788 if (ret < 0)
789 goto out;
790 + ret = 0; /* rbd_req_sync_exec() can return positive */
791
792 p = response;
793 rbd_dev->image_id = ceph_extract_encoded_string(&p,
794 @@ -3045,11 +3061,11 @@ static ssize_t rbd_add(struct bus_type *bus,
795 /* no need to lock here, as rbd_dev is not registered yet */
796 rc = rbd_dev_snaps_update(rbd_dev);
797 if (rc)
798 - goto err_out_header;
799 + goto err_out_probe;
800
801 rc = rbd_dev_set_mapping(rbd_dev, snap_name);
802 if (rc)
803 - goto err_out_header;
804 + goto err_out_snaps;
805
806 /* generate unique id: find highest unique id, add one */
807 rbd_dev_id_get(rbd_dev);
808 @@ -3113,7 +3129,9 @@ err_out_blkdev:
809 unregister_blkdev(rbd_dev->major, rbd_dev->name);
810 err_out_id:
811 rbd_dev_id_put(rbd_dev);
812 -err_out_header:
813 +err_out_snaps:
814 + rbd_remove_all_snaps(rbd_dev);
815 +err_out_probe:
816 rbd_header_free(&rbd_dev->header);
817 err_out_client:
818 kfree(rbd_dev->header_name);
819 @@ -3211,7 +3229,12 @@ static ssize_t rbd_remove(struct bus_type *bus,
820 goto done;
821 }
822
823 - __rbd_remove_all_snaps(rbd_dev);
824 + if (rbd_dev->open_count) {
825 + ret = -EBUSY;
826 + goto done;
827 + }
828 +
829 + rbd_remove_all_snaps(rbd_dev);
830 rbd_bus_del_dev(rbd_dev);
831
832 done:
833 diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
834 index cbe77fa..49d77cb 100644
835 --- a/drivers/block/rbd_types.h
836 +++ b/drivers/block/rbd_types.h
837 @@ -46,8 +46,6 @@
838 #define RBD_MIN_OBJ_ORDER 16
839 #define RBD_MAX_OBJ_ORDER 30
840
841 -#define RBD_MAX_SEG_NAME_LEN 128
842 -
843 #define RBD_COMP_NONE 0
844 #define RBD_CRYPT_NONE 0
845
846 diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
847 index 3265844..2a297f8 100644
848 --- a/drivers/cpuidle/coupled.c
849 +++ b/drivers/cpuidle/coupled.c
850 @@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
851 int all;
852 int ret;
853
854 - all = coupled->online_count || (coupled->online_count << WAITING_BITS);
855 + all = coupled->online_count | (coupled->online_count << WAITING_BITS);
856 ret = atomic_add_unless(&coupled->ready_waiting_counts,
857 -MAX_WAITING_CPUS, all);
858
859 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
860 index ed0bc07..fe4fa1c 100644
861 --- a/drivers/edac/edac_mc_sysfs.c
862 +++ b/drivers/edac/edac_mc_sysfs.c
863 @@ -1145,7 +1145,7 @@ int __init edac_mc_sysfs_init(void)
864
865 void __exit edac_mc_sysfs_exit(void)
866 {
867 - put_device(mci_pdev);
868 device_del(mci_pdev);
869 + put_device(mci_pdev);
870 edac_put_sysfs_subsys();
871 }
872 diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
873 index 08c6749..638e1f7 100644
874 --- a/drivers/firewire/net.c
875 +++ b/drivers/firewire/net.c
876 @@ -861,8 +861,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
877 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
878 buf_ptr += 2;
879 length -= IEEE1394_GASP_HDR_SIZE;
880 - fwnet_incoming_packet(dev, buf_ptr, length,
881 - source_node_id, -1, true);
882 + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
883 + context->card->generation, true);
884 }
885
886 packet.payload_length = dev->rcv_buffer_size;
887 @@ -958,7 +958,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
888 break;
889 }
890
891 - skb_pull(skb, ptask->max_payload);
892 + if (ptask->dest_node == IEEE1394_ALL_NODES) {
893 + skb_pull(skb,
894 + ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
895 + } else {
896 + skb_pull(skb, ptask->max_payload);
897 + }
898 if (ptask->outstanding_pkts > 1) {
899 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
900 dg_size, fg_off, datagram_label);
901 @@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
902 smp_rmb();
903 node_id = dev->card->node_id;
904
905 - p = skb_push(ptask->skb, 8);
906 + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
907 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
908 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
909 | RFC2734_SW_VERSION, &p[4]);
910 diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
911 index 0761a03..665553c 100644
912 --- a/drivers/gpu/drm/drm_mm.c
913 +++ b/drivers/gpu/drm/drm_mm.c
914 @@ -213,11 +213,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
915
916 BUG_ON(!hole_node->hole_follows || node->allocated);
917
918 - if (mm->color_adjust)
919 - mm->color_adjust(hole_node, color, &adj_start, &adj_end);
920 -
921 if (adj_start < start)
922 adj_start = start;
923 + if (adj_end > end)
924 + adj_end = end;
925 +
926 + if (mm->color_adjust)
927 + mm->color_adjust(hole_node, color, &adj_start, &adj_end);
928
929 if (alignment) {
930 unsigned tmp = adj_start % alignment;
931 @@ -489,7 +491,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
932 mm->scan_size = size;
933 mm->scanned_blocks = 0;
934 mm->scan_hit_start = 0;
935 - mm->scan_hit_size = 0;
936 + mm->scan_hit_end = 0;
937 mm->scan_check_range = 0;
938 mm->prev_scanned_node = NULL;
939 }
940 @@ -516,7 +518,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
941 mm->scan_size = size;
942 mm->scanned_blocks = 0;
943 mm->scan_hit_start = 0;
944 - mm->scan_hit_size = 0;
945 + mm->scan_hit_end = 0;
946 mm->scan_start = start;
947 mm->scan_end = end;
948 mm->scan_check_range = 1;
949 @@ -535,8 +537,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
950 struct drm_mm *mm = node->mm;
951 struct drm_mm_node *prev_node;
952 unsigned long hole_start, hole_end;
953 - unsigned long adj_start;
954 - unsigned long adj_end;
955 + unsigned long adj_start, adj_end;
956
957 mm->scanned_blocks++;
958
959 @@ -553,14 +554,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
960 node->node_list.next = &mm->prev_scanned_node->node_list;
961 mm->prev_scanned_node = node;
962
963 - hole_start = drm_mm_hole_node_start(prev_node);
964 - hole_end = drm_mm_hole_node_end(prev_node);
965 -
966 - adj_start = hole_start;
967 - adj_end = hole_end;
968 -
969 - if (mm->color_adjust)
970 - mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
971 + adj_start = hole_start = drm_mm_hole_node_start(prev_node);
972 + adj_end = hole_end = drm_mm_hole_node_end(prev_node);
973
974 if (mm->scan_check_range) {
975 if (adj_start < mm->scan_start)
976 @@ -569,11 +564,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
977 adj_end = mm->scan_end;
978 }
979
980 + if (mm->color_adjust)
981 + mm->color_adjust(prev_node, mm->scan_color,
982 + &adj_start, &adj_end);
983 +
984 if (check_free_hole(adj_start, adj_end,
985 mm->scan_size, mm->scan_alignment)) {
986 mm->scan_hit_start = hole_start;
987 - mm->scan_hit_size = hole_end;
988 -
989 + mm->scan_hit_end = hole_end;
990 return 1;
991 }
992
993 @@ -609,19 +607,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
994 node_list);
995
996 prev_node->hole_follows = node->scanned_preceeds_hole;
997 - INIT_LIST_HEAD(&node->node_list);
998 list_add(&node->node_list, &prev_node->node_list);
999
1000 - /* Only need to check for containement because start&size for the
1001 - * complete resulting free block (not just the desired part) is
1002 - * stored. */
1003 - if (node->start >= mm->scan_hit_start &&
1004 - node->start + node->size
1005 - <= mm->scan_hit_start + mm->scan_hit_size) {
1006 - return 1;
1007 - }
1008 -
1009 - return 0;
1010 + return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
1011 + node->start < mm->scan_hit_end);
1012 }
1013 EXPORT_SYMBOL(drm_mm_scan_remove_block);
1014
1015 diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1016 index fae1f2e..f2b2f01 100644
1017 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1018 +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1019 @@ -210,7 +210,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
1020
1021 /* is it from our device? */
1022 if (obj->dev == drm_dev) {
1023 + /*
1024 + * Importing dmabuf exported from out own gem increases
1025 + * refcount on gem itself instead of f_count of dmabuf.
1026 + */
1027 drm_gem_object_reference(obj);
1028 + dma_buf_put(dma_buf);
1029 return obj;
1030 }
1031 }
1032 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
1033 index dde8b50..da21b11 100644
1034 --- a/drivers/gpu/drm/i915/i915_debugfs.c
1035 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
1036 @@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
1037 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
1038 pipe, plane);
1039 } else {
1040 - if (!work->pending) {
1041 + if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
1042 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
1043 pipe, plane);
1044 } else {
1045 @@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
1046 seq_printf(m, "Stall check enabled, ");
1047 else
1048 seq_printf(m, "Stall check waiting for page flip ioctl, ");
1049 - seq_printf(m, "%d prepares\n", work->pending);
1050 + seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
1051
1052 if (work->old_fb_obj) {
1053 struct drm_i915_gem_object *obj = work->old_fb_obj;
1054 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1055 index 6770ee6..1f20ead 100644
1056 --- a/drivers/gpu/drm/i915/i915_drv.c
1057 +++ b/drivers/gpu/drm/i915/i915_drv.c
1058 @@ -552,7 +552,7 @@ static int i915_drm_thaw(struct drm_device *dev)
1059 mutex_unlock(&dev->struct_mutex);
1060
1061 intel_modeset_init_hw(dev);
1062 - intel_modeset_setup_hw_state(dev);
1063 + intel_modeset_setup_hw_state(dev, false);
1064 drm_mode_config_reset(dev);
1065 drm_irq_install(dev);
1066 }
1067 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1068 index f511fa2..92f1750 100644
1069 --- a/drivers/gpu/drm/i915/i915_drv.h
1070 +++ b/drivers/gpu/drm/i915/i915_drv.h
1071 @@ -1595,7 +1595,8 @@ extern void intel_modeset_init(struct drm_device *dev);
1072 extern void intel_modeset_gem_init(struct drm_device *dev);
1073 extern void intel_modeset_cleanup(struct drm_device *dev);
1074 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1075 -extern void intel_modeset_setup_hw_state(struct drm_device *dev);
1076 +extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1077 + bool force_restore);
1078 extern bool intel_fbc_enabled(struct drm_device *dev);
1079 extern void intel_disable_fbc(struct drm_device *dev);
1080 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1081 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1082 index 9b285da..fe3a778 100644
1083 --- a/drivers/gpu/drm/i915/i915_gem.c
1084 +++ b/drivers/gpu/drm/i915/i915_gem.c
1085 @@ -1718,7 +1718,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1086 }
1087
1088 static long
1089 -i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1090 +__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1091 + bool purgeable_only)
1092 {
1093 struct drm_i915_gem_object *obj, *next;
1094 long count = 0;
1095 @@ -1726,7 +1727,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1096 list_for_each_entry_safe(obj, next,
1097 &dev_priv->mm.unbound_list,
1098 gtt_list) {
1099 - if (i915_gem_object_is_purgeable(obj) &&
1100 + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1101 i915_gem_object_put_pages(obj) == 0) {
1102 count += obj->base.size >> PAGE_SHIFT;
1103 if (count >= target)
1104 @@ -1737,7 +1738,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1105 list_for_each_entry_safe(obj, next,
1106 &dev_priv->mm.inactive_list,
1107 mm_list) {
1108 - if (i915_gem_object_is_purgeable(obj) &&
1109 + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1110 i915_gem_object_unbind(obj) == 0 &&
1111 i915_gem_object_put_pages(obj) == 0) {
1112 count += obj->base.size >> PAGE_SHIFT;
1113 @@ -1749,6 +1750,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1114 return count;
1115 }
1116
1117 +static long
1118 +i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1119 +{
1120 + return __i915_gem_shrink(dev_priv, target, true);
1121 +}
1122 +
1123 static void
1124 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1125 {
1126 @@ -3511,14 +3518,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1127 goto out;
1128 }
1129
1130 - obj->user_pin_count++;
1131 - obj->pin_filp = file;
1132 - if (obj->user_pin_count == 1) {
1133 + if (obj->user_pin_count == 0) {
1134 ret = i915_gem_object_pin(obj, args->alignment, true, false);
1135 if (ret)
1136 goto out;
1137 }
1138
1139 + obj->user_pin_count++;
1140 + obj->pin_filp = file;
1141 +
1142 /* XXX - flush the CPU caches for pinned objects
1143 * as the X server doesn't manage domains yet
1144 */
1145 @@ -4425,6 +4433,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
1146 if (nr_to_scan) {
1147 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
1148 if (nr_to_scan > 0)
1149 + nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
1150 + false);
1151 + if (nr_to_scan > 0)
1152 i915_gem_shrink_all(dev_priv);
1153 }
1154
1155 @@ -4432,7 +4443,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
1156 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
1157 if (obj->pages_pin_count == 0)
1158 cnt += obj->base.size >> PAGE_SHIFT;
1159 - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1160 + list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
1161 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
1162 cnt += obj->base.size >> PAGE_SHIFT;
1163
1164 diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1165 index 773ef77..abeaafe 100644
1166 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1167 +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1168 @@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1169 {
1170 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1171
1172 - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
1173 + return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
1174 }
1175
1176 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
1177 @@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1178 obj = dma_buf->priv;
1179 /* is it from our device? */
1180 if (obj->base.dev == dev) {
1181 + /*
1182 + * Importing dmabuf exported from out own gem increases
1183 + * refcount on gem itself instead of f_count of dmabuf.
1184 + */
1185 drm_gem_object_reference(&obj->base);
1186 + dma_buf_put(dma_buf);
1187 return &obj->base;
1188 }
1189 }
1190 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1191 index 32e1bda..dc29ace 100644
1192 --- a/drivers/gpu/drm/i915/i915_irq.c
1193 +++ b/drivers/gpu/drm/i915/i915_irq.c
1194 @@ -1464,7 +1464,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1195 spin_lock_irqsave(&dev->event_lock, flags);
1196 work = intel_crtc->unpin_work;
1197
1198 - if (work == NULL || work->pending || !work->enable_stall_check) {
1199 + if (work == NULL ||
1200 + atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1201 + !work->enable_stall_check) {
1202 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1203 spin_unlock_irqrestore(&dev->event_lock, flags);
1204 return;
1205 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1206 index a4162dd..09ae4b0 100644
1207 --- a/drivers/gpu/drm/i915/i915_reg.h
1208 +++ b/drivers/gpu/drm/i915/i915_reg.h
1209 @@ -3315,6 +3315,8 @@
1210 #define _PFA_CTL_1 0x68080
1211 #define _PFB_CTL_1 0x68880
1212 #define PF_ENABLE (1<<31)
1213 +#define PF_PIPE_SEL_MASK_IVB (3<<29)
1214 +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
1215 #define PF_FILTER_MASK (3<<23)
1216 #define PF_FILTER_PROGRAMMED (0<<23)
1217 #define PF_FILTER_MED_3x3 (1<<23)
1218 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1219 index b426d44..4d3c7c6 100644
1220 --- a/drivers/gpu/drm/i915/intel_display.c
1221 +++ b/drivers/gpu/drm/i915/intel_display.c
1222 @@ -2302,18 +2302,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
1223 FDI_FE_ERRC_ENABLE);
1224 }
1225
1226 -static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
1227 -{
1228 - struct drm_i915_private *dev_priv = dev->dev_private;
1229 - u32 flags = I915_READ(SOUTH_CHICKEN1);
1230 -
1231 - flags |= FDI_PHASE_SYNC_OVR(pipe);
1232 - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
1233 - flags |= FDI_PHASE_SYNC_EN(pipe);
1234 - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
1235 - POSTING_READ(SOUTH_CHICKEN1);
1236 -}
1237 -
1238 /* The FDI link training functions for ILK/Ibexpeak. */
1239 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1240 {
1241 @@ -2464,9 +2452,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1242 POSTING_READ(reg);
1243 udelay(150);
1244
1245 - if (HAS_PCH_CPT(dev))
1246 - cpt_phase_pointer_enable(dev, pipe);
1247 -
1248 for (i = 0; i < 4; i++) {
1249 reg = FDI_TX_CTL(pipe);
1250 temp = I915_READ(reg);
1251 @@ -2593,9 +2578,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
1252 POSTING_READ(reg);
1253 udelay(150);
1254
1255 - if (HAS_PCH_CPT(dev))
1256 - cpt_phase_pointer_enable(dev, pipe);
1257 -
1258 for (i = 0; i < 4; i++) {
1259 reg = FDI_TX_CTL(pipe);
1260 temp = I915_READ(reg);
1261 @@ -2737,17 +2719,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
1262 udelay(100);
1263 }
1264
1265 -static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
1266 -{
1267 - struct drm_i915_private *dev_priv = dev->dev_private;
1268 - u32 flags = I915_READ(SOUTH_CHICKEN1);
1269 -
1270 - flags &= ~(FDI_PHASE_SYNC_EN(pipe));
1271 - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
1272 - flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
1273 - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
1274 - POSTING_READ(SOUTH_CHICKEN1);
1275 -}
1276 static void ironlake_fdi_disable(struct drm_crtc *crtc)
1277 {
1278 struct drm_device *dev = crtc->dev;
1279 @@ -2777,8 +2748,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
1280 I915_WRITE(FDI_RX_CHICKEN(pipe),
1281 I915_READ(FDI_RX_CHICKEN(pipe) &
1282 ~FDI_RX_PHASE_SYNC_POINTER_EN));
1283 - } else if (HAS_PCH_CPT(dev)) {
1284 - cpt_phase_pointer_disable(dev, pipe);
1285 }
1286
1287 /* still set train pattern 1 */
1288 @@ -3225,7 +3194,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
1289 * as some pre-programmed values are broken,
1290 * e.g. x201.
1291 */
1292 - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
1293 + if (IS_IVYBRIDGE(dev))
1294 + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
1295 + PF_PIPE_SEL_IVB(pipe));
1296 + else
1297 + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
1298 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
1299 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
1300 }
1301 @@ -6183,14 +6156,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
1302 {
1303 struct intel_unpin_work *work =
1304 container_of(__work, struct intel_unpin_work, work);
1305 + struct drm_device *dev = work->crtc->dev;
1306
1307 - mutex_lock(&work->dev->struct_mutex);
1308 + mutex_lock(&dev->struct_mutex);
1309 intel_unpin_fb_obj(work->old_fb_obj);
1310 drm_gem_object_unreference(&work->pending_flip_obj->base);
1311 drm_gem_object_unreference(&work->old_fb_obj->base);
1312
1313 - intel_update_fbc(work->dev);
1314 - mutex_unlock(&work->dev->struct_mutex);
1315 + intel_update_fbc(dev);
1316 + mutex_unlock(&dev->struct_mutex);
1317 +
1318 + BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
1319 + atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
1320 +
1321 kfree(work);
1322 }
1323
1324 @@ -6211,11 +6189,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
1325
1326 spin_lock_irqsave(&dev->event_lock, flags);
1327 work = intel_crtc->unpin_work;
1328 - if (work == NULL || !work->pending) {
1329 +
1330 + /* Ensure we don't miss a work->pending update ... */
1331 + smp_rmb();
1332 +
1333 + if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
1334 spin_unlock_irqrestore(&dev->event_lock, flags);
1335 return;
1336 }
1337
1338 + /* and that the unpin work is consistent wrt ->pending. */
1339 + smp_rmb();
1340 +
1341 intel_crtc->unpin_work = NULL;
1342
1343 if (work->event) {
1344 @@ -6238,9 +6223,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
1345
1346 atomic_clear_mask(1 << intel_crtc->plane,
1347 &obj->pending_flip.counter);
1348 -
1349 wake_up(&dev_priv->pending_flip_queue);
1350 - schedule_work(&work->work);
1351 +
1352 + queue_work(dev_priv->wq, &work->work);
1353
1354 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
1355 }
1356 @@ -6268,16 +6253,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
1357 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
1358 unsigned long flags;
1359
1360 + /* NB: An MMIO update of the plane base pointer will also
1361 + * generate a page-flip completion irq, i.e. every modeset
1362 + * is also accompanied by a spurious intel_prepare_page_flip().
1363 + */
1364 spin_lock_irqsave(&dev->event_lock, flags);
1365 - if (intel_crtc->unpin_work) {
1366 - if ((++intel_crtc->unpin_work->pending) > 1)
1367 - DRM_ERROR("Prepared flip multiple times\n");
1368 - } else {
1369 - DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
1370 - }
1371 + if (intel_crtc->unpin_work)
1372 + atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
1373 spin_unlock_irqrestore(&dev->event_lock, flags);
1374 }
1375
1376 +inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
1377 +{
1378 + /* Ensure that the work item is consistent when activating it ... */
1379 + smp_wmb();
1380 + atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
1381 + /* and that it is marked active as soon as the irq could fire. */
1382 + smp_wmb();
1383 +}
1384 +
1385 static int intel_gen2_queue_flip(struct drm_device *dev,
1386 struct drm_crtc *crtc,
1387 struct drm_framebuffer *fb,
1388 @@ -6311,6 +6305,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
1389 intel_ring_emit(ring, fb->pitches[0]);
1390 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1391 intel_ring_emit(ring, 0); /* aux display base address, unused */
1392 +
1393 + intel_mark_page_flip_active(intel_crtc);
1394 intel_ring_advance(ring);
1395 return 0;
1396
1397 @@ -6351,6 +6347,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
1398 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1399 intel_ring_emit(ring, MI_NOOP);
1400
1401 + intel_mark_page_flip_active(intel_crtc);
1402 intel_ring_advance(ring);
1403 return 0;
1404
1405 @@ -6397,6 +6394,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
1406 pf = 0;
1407 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
1408 intel_ring_emit(ring, pf | pipesrc);
1409 +
1410 + intel_mark_page_flip_active(intel_crtc);
1411 intel_ring_advance(ring);
1412 return 0;
1413
1414 @@ -6439,6 +6438,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
1415 pf = 0;
1416 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
1417 intel_ring_emit(ring, pf | pipesrc);
1418 +
1419 + intel_mark_page_flip_active(intel_crtc);
1420 intel_ring_advance(ring);
1421 return 0;
1422
1423 @@ -6493,6 +6494,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
1424 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
1425 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1426 intel_ring_emit(ring, (MI_NOOP));
1427 +
1428 + intel_mark_page_flip_active(intel_crtc);
1429 intel_ring_advance(ring);
1430 return 0;
1431
1432 @@ -6541,7 +6544,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1433 return -ENOMEM;
1434
1435 work->event = event;
1436 - work->dev = crtc->dev;
1437 + work->crtc = crtc;
1438 intel_fb = to_intel_framebuffer(crtc->fb);
1439 work->old_fb_obj = intel_fb->obj;
1440 INIT_WORK(&work->work, intel_unpin_work_fn);
1441 @@ -6566,6 +6569,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1442 intel_fb = to_intel_framebuffer(fb);
1443 obj = intel_fb->obj;
1444
1445 + if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
1446 + flush_workqueue(dev_priv->wq);
1447 +
1448 ret = i915_mutex_lock_interruptible(dev);
1449 if (ret)
1450 goto cleanup;
1451 @@ -6584,6 +6590,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1452 * the flip occurs and the object is no longer visible.
1453 */
1454 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
1455 + atomic_inc(&intel_crtc->unpin_work_count);
1456
1457 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
1458 if (ret)
1459 @@ -6598,6 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1460 return 0;
1461
1462 cleanup_pending:
1463 + atomic_dec(&intel_crtc->unpin_work_count);
1464 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
1465 drm_gem_object_unreference(&work->old_fb_obj->base);
1466 drm_gem_object_unreference(&obj->base);
1467 @@ -7259,10 +7267,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
1468 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
1469 config->mode_changed = true;
1470 }
1471 -
1472 - /* Disable all disconnected encoders. */
1473 - if (connector->base.status == connector_status_disconnected)
1474 - connector->new_encoder = NULL;
1475 }
1476 /* connector->new_encoder is now updated for all connectors. */
1477
1478 @@ -8244,9 +8248,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
1479 * the crtc fixup. */
1480 }
1481
1482 +static void i915_redisable_vga(struct drm_device *dev)
1483 +{
1484 + struct drm_i915_private *dev_priv = dev->dev_private;
1485 + u32 vga_reg;
1486 +
1487 + if (HAS_PCH_SPLIT(dev))
1488 + vga_reg = CPU_VGACNTRL;
1489 + else
1490 + vga_reg = VGACNTRL;
1491 +
1492 + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
1493 + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
1494 + I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1495 + POSTING_READ(vga_reg);
1496 + }
1497 +}
1498 +
1499 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
1500 * and i915 state tracking structures. */
1501 -void intel_modeset_setup_hw_state(struct drm_device *dev)
1502 +void intel_modeset_setup_hw_state(struct drm_device *dev,
1503 + bool force_restore)
1504 {
1505 struct drm_i915_private *dev_priv = dev->dev_private;
1506 enum pipe pipe;
1507 @@ -8317,7 +8339,17 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
1508 intel_sanitize_crtc(crtc);
1509 }
1510
1511 - intel_modeset_update_staged_output_state(dev);
1512 + if (force_restore) {
1513 + for_each_pipe(pipe) {
1514 + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1515 + intel_set_mode(&crtc->base, &crtc->base.mode,
1516 + crtc->base.x, crtc->base.y, crtc->base.fb);
1517 + }
1518 +
1519 + i915_redisable_vga(dev);
1520 + } else {
1521 + intel_modeset_update_staged_output_state(dev);
1522 + }
1523
1524 intel_modeset_check_state(dev);
1525 }
1526 @@ -8328,7 +8360,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
1527
1528 intel_setup_overlay(dev);
1529
1530 - intel_modeset_setup_hw_state(dev);
1531 + intel_modeset_setup_hw_state(dev, false);
1532 }
1533
1534 void intel_modeset_cleanup(struct drm_device *dev)
1535 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1536 index fe71425..016e375 100644
1537 --- a/drivers/gpu/drm/i915/intel_drv.h
1538 +++ b/drivers/gpu/drm/i915/intel_drv.h
1539 @@ -198,6 +198,8 @@ struct intel_crtc {
1540 struct intel_unpin_work *unpin_work;
1541 int fdi_lanes;
1542
1543 + atomic_t unpin_work_count;
1544 +
1545 /* Display surface base address adjustement for pageflips. Note that on
1546 * gen4+ this only adjusts up to a tile, offsets within a tile are
1547 * handled in the hw itself (with the TILEOFF register). */
1548 @@ -380,11 +382,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
1549
1550 struct intel_unpin_work {
1551 struct work_struct work;
1552 - struct drm_device *dev;
1553 + struct drm_crtc *crtc;
1554 struct drm_i915_gem_object *old_fb_obj;
1555 struct drm_i915_gem_object *pending_flip_obj;
1556 struct drm_pending_vblank_event *event;
1557 - int pending;
1558 + atomic_t pending;
1559 +#define INTEL_FLIP_INACTIVE 0
1560 +#define INTEL_FLIP_PENDING 1
1561 +#define INTEL_FLIP_COMPLETE 2
1562 bool enable_stall_check;
1563 };
1564
1565 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1566 index edba93b..d4d9a6f 100644
1567 --- a/drivers/gpu/drm/i915/intel_lvds.c
1568 +++ b/drivers/gpu/drm/i915/intel_lvds.c
1569 @@ -526,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
1570 dev_priv->modeset_on_lid = 0;
1571
1572 mutex_lock(&dev->mode_config.mutex);
1573 - intel_modeset_check_state(dev);
1574 + intel_modeset_setup_hw_state(dev, true);
1575 mutex_unlock(&dev->mode_config.mutex);
1576
1577 return NOTIFY_OK;
1578 @@ -763,14 +763,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
1579 },
1580 {
1581 .callback = intel_no_lvds_dmi_callback,
1582 - .ident = "ZOTAC ZBOXSD-ID12/ID13",
1583 - .matches = {
1584 - DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
1585 - DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
1586 - },
1587 - },
1588 - {
1589 - .callback = intel_no_lvds_dmi_callback,
1590 .ident = "Gigabyte GA-D525TUD",
1591 .matches = {
1592 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
1593 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1594 index 442968f..eaaff3c 100644
1595 --- a/drivers/gpu/drm/i915/intel_pm.c
1596 +++ b/drivers/gpu/drm/i915/intel_pm.c
1597 @@ -44,6 +44,14 @@
1598 * i915.i915_enable_fbc parameter
1599 */
1600
1601 +static bool intel_crtc_active(struct drm_crtc *crtc)
1602 +{
1603 + /* Be paranoid as we can arrive here with only partial
1604 + * state retrieved from the hardware during setup.
1605 + */
1606 + return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
1607 +}
1608 +
1609 static void i8xx_disable_fbc(struct drm_device *dev)
1610 {
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 @@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
1613 * - going to an unsupported config (interlace, pixel multiply, etc.)
1614 */
1615 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1616 - if (tmp_crtc->enabled &&
1617 - !to_intel_crtc(tmp_crtc)->primary_disabled &&
1618 - tmp_crtc->fb) {
1619 + if (intel_crtc_active(tmp_crtc) &&
1620 + !to_intel_crtc(tmp_crtc)->primary_disabled) {
1621 if (crtc) {
1622 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1623 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1624 @@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1625 struct drm_crtc *crtc, *enabled = NULL;
1626
1627 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1628 - if (crtc->enabled && crtc->fb) {
1629 + if (intel_crtc_active(crtc)) {
1630 if (enabled)
1631 return NULL;
1632 enabled = crtc;
1633 @@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1634 int entries, tlb_miss;
1635
1636 crtc = intel_get_crtc_for_plane(dev, plane);
1637 - if (crtc->fb == NULL || !crtc->enabled) {
1638 + if (!intel_crtc_active(crtc)) {
1639 *cursor_wm = cursor->guard_size;
1640 *plane_wm = display->guard_size;
1641 return false;
1642 @@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1643 int entries;
1644
1645 crtc = intel_get_crtc_for_plane(dev, plane);
1646 - if (crtc->fb == NULL || !crtc->enabled)
1647 + if (!intel_crtc_active(crtc))
1648 return false;
1649
1650 clock = crtc->mode.clock; /* VESA DOT Clock */
1651 @@ -1478,7 +1485,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1652
1653 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1654 crtc = intel_get_crtc_for_plane(dev, 1);
1655 - if (crtc->enabled && crtc->fb) {
1656 + if (intel_crtc_active(crtc)) {
1657 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1658 wm_info, fifo_size,
1659 crtc->fb->bits_per_pixel / 8,
1660 @@ -1923,7 +1930,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1661 int entries, tlb_miss;
1662
1663 crtc = intel_get_crtc_for_plane(dev, plane);
1664 - if (crtc->fb == NULL || !crtc->enabled) {
1665 + if (!intel_crtc_active(crtc)) {
1666 *sprite_wm = display->guard_size;
1667 return false;
1668 }
1669 diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1670 index c345097..b2f3d4d 100644
1671 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1672 +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1673 @@ -38,6 +38,8 @@ enum nvbios_pll_type {
1674 PLL_UNK42 = 0x42,
1675 PLL_VPLL0 = 0x80,
1676 PLL_VPLL1 = 0x81,
1677 + PLL_VPLL2 = 0x82,
1678 + PLL_VPLL3 = 0x83,
1679 PLL_MAX = 0xff
1680 };
1681
1682 diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1683 index f6962c9..7c96262 100644
1684 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1685 +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1686 @@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
1687 switch (info.type) {
1688 case PLL_VPLL0:
1689 case PLL_VPLL1:
1690 + case PLL_VPLL2:
1691 + case PLL_VPLL3:
1692 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
1693 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
1694 nv_wr32(priv, info.reg + 0x10, fN << 16);
1695 diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1696 index 9f59f2b..73bedff 100644
1697 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1698 +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1699 @@ -86,14 +86,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
1700 mem->memtype = type;
1701 mem->size = size;
1702
1703 - mutex_lock(&mm->mutex);
1704 + mutex_lock(&pfb->base.mutex);
1705 do {
1706 if (back)
1707 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
1708 else
1709 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
1710 if (ret) {
1711 - mutex_unlock(&mm->mutex);
1712 + mutex_unlock(&pfb->base.mutex);
1713 pfb->ram.put(pfb, &mem);
1714 return ret;
1715 }
1716 @@ -101,7 +101,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
1717 list_add_tail(&r->rl_entry, &mem->regions);
1718 size -= r->length;
1719 } while (size);
1720 - mutex_unlock(&mm->mutex);
1721 + mutex_unlock(&pfb->base.mutex);
1722
1723 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
1724 mem->offset = (u64)r->offset << 12;
1725 diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1726 index 1188227..6565f3d 100644
1727 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1728 +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1729 @@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
1730 if (ret)
1731 return ret;
1732
1733 + mutex_lock(&imem->base.mutex);
1734 list_add(&iobj->head, &imem->list);
1735 + mutex_unlock(&imem->base.mutex);
1736 return 0;
1737 }
1738
1739 void
1740 nouveau_instobj_destroy(struct nouveau_instobj *iobj)
1741 {
1742 - if (iobj->head.prev)
1743 - list_del(&iobj->head);
1744 + struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
1745 +
1746 + mutex_lock(&subdev->mutex);
1747 + list_del(&iobj->head);
1748 + mutex_unlock(&subdev->mutex);
1749 +
1750 return nouveau_object_destroy(&iobj->base);
1751 }
1752
1753 @@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
1754 if (ret)
1755 return ret;
1756
1757 + mutex_lock(&imem->base.mutex);
1758 +
1759 list_for_each_entry(iobj, &imem->list, head) {
1760 if (iobj->suspend) {
1761 for (i = 0; i < iobj->size; i += 4)
1762 @@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
1763 }
1764 }
1765
1766 + mutex_unlock(&imem->base.mutex);
1767 +
1768 return 0;
1769 }
1770
1771 @@ -104,17 +114,26 @@ int
1772 nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
1773 {
1774 struct nouveau_instobj *iobj;
1775 - int i;
1776 + int i, ret = 0;
1777
1778 if (suspend) {
1779 + mutex_lock(&imem->base.mutex);
1780 +
1781 list_for_each_entry(iobj, &imem->list, head) {
1782 iobj->suspend = vmalloc(iobj->size);
1783 - if (iobj->suspend) {
1784 - for (i = 0; i < iobj->size; i += 4)
1785 - iobj->suspend[i / 4] = nv_ro32(iobj, i);
1786 - } else
1787 - return -ENOMEM;
1788 + if (!iobj->suspend) {
1789 + ret = -ENOMEM;
1790 + break;
1791 + }
1792 +
1793 + for (i = 0; i < iobj->size; i += 4)
1794 + iobj->suspend[i / 4] = nv_ro32(iobj, i);
1795 }
1796 +
1797 + mutex_unlock(&imem->base.mutex);
1798 +
1799 + if (ret)
1800 + return ret;
1801 }
1802
1803 return nouveau_subdev_fini(&imem->base, suspend);
1804 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
1805 index 35ac57f..5f0e7ef 100644
1806 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
1807 +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
1808 @@ -1279,7 +1279,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1809 if (drm->agp.stat == ENABLED) {
1810 mem->bus.offset = mem->start << PAGE_SHIFT;
1811 mem->bus.base = drm->agp.base;
1812 - mem->bus.is_iomem = true;
1813 + mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1814 }
1815 #endif
1816 break;
1817 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
1818 index bedafd1..cdb83ac 100644
1819 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h
1820 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
1821 @@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
1822 void nv10_fence_context_del(struct nouveau_channel *);
1823 void nv10_fence_destroy(struct nouveau_drm *);
1824 int nv10_fence_create(struct nouveau_drm *);
1825 +void nv17_fence_resume(struct nouveau_drm *drm);
1826
1827 int nv50_fence_create(struct nouveau_drm *);
1828 int nv84_fence_create(struct nouveau_drm *);
1829 diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
1830 index 366462c..4f604cd 100644
1831 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
1832 +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
1833 @@ -197,6 +197,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
1834 if (nvbo->gem) {
1835 if (nvbo->gem->dev == dev) {
1836 drm_gem_object_reference(nvbo->gem);
1837 + dma_buf_put(dma_buf);
1838 return nvbo->gem;
1839 }
1840 }
1841 diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
1842 index 184cdf8..39ffc07 100644
1843 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c
1844 +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
1845 @@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
1846
1847 static inline bool is_powersaving_dpms(int mode)
1848 {
1849 - return (mode != DRM_MODE_DPMS_ON);
1850 + return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
1851 }
1852
1853 static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
1854 diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
1855 index ce752bf..0b34d23 100644
1856 --- a/drivers/gpu/drm/nouveau/nv10_fence.c
1857 +++ b/drivers/gpu/drm/nouveau/nv10_fence.c
1858 @@ -160,6 +160,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
1859 kfree(priv);
1860 }
1861
1862 +void nv17_fence_resume(struct nouveau_drm *drm)
1863 +{
1864 + struct nv10_fence_priv *priv = drm->fence;
1865 +
1866 + nouveau_bo_wr32(priv->bo, 0, priv->sequence);
1867 +}
1868 +
1869 int
1870 nv10_fence_create(struct nouveau_drm *drm)
1871 {
1872 @@ -192,6 +199,7 @@ nv10_fence_create(struct nouveau_drm *drm)
1873 if (ret == 0) {
1874 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
1875 priv->base.sync = nv17_fence_sync;
1876 + priv->base.resume = nv17_fence_resume;
1877 }
1878 }
1879
1880 diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
1881 index e0763ea..ecd22f5 100644
1882 --- a/drivers/gpu/drm/nouveau/nv50_fence.c
1883 +++ b/drivers/gpu/drm/nouveau/nv50_fence.c
1884 @@ -119,6 +119,7 @@ nv50_fence_create(struct nouveau_drm *drm)
1885 if (ret == 0) {
1886 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
1887 priv->base.sync = nv17_fence_sync;
1888 + priv->base.resume = nv17_fence_resume;
1889 }
1890
1891 if (ret)
1892 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1893 index 24d932f..9175615 100644
1894 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1895 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1896 @@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1897 /* use frac fb div on APUs */
1898 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1899 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1900 + if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
1901 + radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1902 } else {
1903 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
1904
1905 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1906 index 010bae1..4552d4a 100644
1907 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1908 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1909 @@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
1910 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
1911 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
1912 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1913 - radeon_dp_set_link_config(connector, mode);
1914 + radeon_dp_set_link_config(connector, adjusted_mode);
1915 }
1916
1917 return true;
1918 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1919 index 219942c..18a5382 100644
1920 --- a/drivers/gpu/drm/radeon/evergreen.c
1921 +++ b/drivers/gpu/drm/radeon/evergreen.c
1922 @@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1923 case CHIP_SUMO:
1924 rdev->config.evergreen.num_ses = 1;
1925 rdev->config.evergreen.max_pipes = 4;
1926 - rdev->config.evergreen.max_tile_pipes = 2;
1927 + rdev->config.evergreen.max_tile_pipes = 4;
1928 if (rdev->pdev->device == 0x9648)
1929 rdev->config.evergreen.max_simds = 3;
1930 else if ((rdev->pdev->device == 0x9647) ||
1931 @@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1932 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1933 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1934 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1935 - gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1936 + gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
1937 break;
1938 case CHIP_SUMO2:
1939 rdev->config.evergreen.num_ses = 1;
1940 @@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1941 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1942 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1943 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1944 - gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1945 + gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
1946 break;
1947 case CHIP_BARTS:
1948 rdev->config.evergreen.num_ses = 2;
1949 @@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1950 break;
1951 case CHIP_CAICOS:
1952 rdev->config.evergreen.num_ses = 1;
1953 - rdev->config.evergreen.max_pipes = 4;
1954 + rdev->config.evergreen.max_pipes = 2;
1955 rdev->config.evergreen.max_tile_pipes = 2;
1956 rdev->config.evergreen.max_simds = 2;
1957 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1958 @@ -3093,6 +3093,16 @@ restart_ih:
1959 break;
1960 }
1961 break;
1962 + case 146:
1963 + case 147:
1964 + dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
1965 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1966 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
1967 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1968 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
1969 + /* reset addr and status */
1970 + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
1971 + break;
1972 case 176: /* CP_INT in ring buffer */
1973 case 177: /* CP_INT in IB1 */
1974 case 178: /* CP_INT in IB2 */
1975 diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
1976 index c042e49..69ffae2 100644
1977 --- a/drivers/gpu/drm/radeon/evergreen_cs.c
1978 +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
1979 @@ -2724,6 +2724,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
1980
1981 /* check config regs */
1982 switch (reg) {
1983 + case WAIT_UNTIL:
1984 case GRBM_GFX_INDEX:
1985 case CP_STRMOUT_CNTL:
1986 case CP_COHER_CNTL:
1987 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
1988 index 2bc0f6a..442732f 100644
1989 --- a/drivers/gpu/drm/radeon/evergreend.h
1990 +++ b/drivers/gpu/drm/radeon/evergreend.h
1991 @@ -45,6 +45,8 @@
1992 #define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
1993 #define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
1994 #define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
1995 +#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
1996 +#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
1997
1998 /* Registers */
1999
2000 @@ -651,6 +653,7 @@
2001 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
2002 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
2003 #define VM_CONTEXT1_CNTL 0x1414
2004 +#define VM_CONTEXT1_CNTL2 0x1434
2005 #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
2006 #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
2007 #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
2008 @@ -672,6 +675,8 @@
2009 #define CACHE_UPDATE_MODE(x) ((x) << 6)
2010 #define VM_L2_STATUS 0x140C
2011 #define L2_BUSY (1 << 0)
2012 +#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
2013 +#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
2014
2015 #define WAIT_UNTIL 0x8040
2016
2017 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
2018 index 81e6a56..30c18a6 100644
2019 --- a/drivers/gpu/drm/radeon/ni.c
2020 +++ b/drivers/gpu/drm/radeon/ni.c
2021 @@ -784,10 +784,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
2022 /* enable context1-7 */
2023 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2024 (u32)(rdev->dummy_page.addr >> 12));
2025 - WREG32(VM_CONTEXT1_CNTL2, 0);
2026 - WREG32(VM_CONTEXT1_CNTL, 0);
2027 + WREG32(VM_CONTEXT1_CNTL2, 4);
2028 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2029 - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2030 + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2031 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2032 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2033 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2034 + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2035 + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2036 + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2037 + VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2038 + READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2039 + READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2040 + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2041 + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2042
2043 cayman_pcie_gart_tlb_flush(rdev);
2044 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2045 diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
2046 index cbef681..f5e54a7 100644
2047 --- a/drivers/gpu/drm/radeon/nid.h
2048 +++ b/drivers/gpu/drm/radeon/nid.h
2049 @@ -80,7 +80,18 @@
2050 #define VM_CONTEXT0_CNTL 0x1410
2051 #define ENABLE_CONTEXT (1 << 0)
2052 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
2053 +#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
2054 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
2055 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
2056 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
2057 +#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
2058 +#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
2059 +#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
2060 +#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
2061 +#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
2062 +#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
2063 +#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
2064 +#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
2065 #define VM_CONTEXT1_CNTL 0x1414
2066 #define VM_CONTEXT0_CNTL2 0x1430
2067 #define VM_CONTEXT1_CNTL2 0x1434
2068 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2069 index 8c42d54..b3f1459 100644
2070 --- a/drivers/gpu/drm/radeon/radeon.h
2071 +++ b/drivers/gpu/drm/radeon/radeon.h
2072 @@ -220,12 +220,13 @@ struct radeon_fence {
2073 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
2074 int radeon_fence_driver_init(struct radeon_device *rdev);
2075 void radeon_fence_driver_fini(struct radeon_device *rdev);
2076 +void radeon_fence_driver_force_completion(struct radeon_device *rdev);
2077 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
2078 void radeon_fence_process(struct radeon_device *rdev, int ring);
2079 bool radeon_fence_signaled(struct radeon_fence *fence);
2080 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
2081 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
2082 -void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
2083 +int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
2084 int radeon_fence_wait_any(struct radeon_device *rdev,
2085 struct radeon_fence **fences,
2086 bool intr);
2087 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2088 index 45b660b..ced9a81 100644
2089 --- a/drivers/gpu/drm/radeon/radeon_combios.c
2090 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
2091 @@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2092 of_machine_is_compatible("PowerBook6,7")) {
2093 /* ibook */
2094 rdev->mode_info.connector_table = CT_IBOOK;
2095 + } else if (of_machine_is_compatible("PowerMac3,5")) {
2096 + /* PowerMac G4 Silver radeon 7500 */
2097 + rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
2098 } else if (of_machine_is_compatible("PowerMac4,4")) {
2099 /* emac */
2100 rdev->mode_info.connector_table = CT_EMAC;
2101 @@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2102 CONNECTOR_OBJECT_ID_SVIDEO,
2103 &hpd);
2104 break;
2105 + case CT_MAC_G4_SILVER:
2106 + DRM_INFO("Connector Table: %d (mac g4 silver)\n",
2107 + rdev->mode_info.connector_table);
2108 + /* DVI-I - tv dac, int tmds */
2109 + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2110 + hpd.hpd = RADEON_HPD_1; /* ??? */
2111 + radeon_add_legacy_encoder(dev,
2112 + radeon_get_encoder_enum(dev,
2113 + ATOM_DEVICE_DFP1_SUPPORT,
2114 + 0),
2115 + ATOM_DEVICE_DFP1_SUPPORT);
2116 + radeon_add_legacy_encoder(dev,
2117 + radeon_get_encoder_enum(dev,
2118 + ATOM_DEVICE_CRT2_SUPPORT,
2119 + 2),
2120 + ATOM_DEVICE_CRT2_SUPPORT);
2121 + radeon_add_legacy_connector(dev, 0,
2122 + ATOM_DEVICE_DFP1_SUPPORT |
2123 + ATOM_DEVICE_CRT2_SUPPORT,
2124 + DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2125 + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2126 + &hpd);
2127 + /* VGA - primary dac */
2128 + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2129 + hpd.hpd = RADEON_HPD_NONE;
2130 + radeon_add_legacy_encoder(dev,
2131 + radeon_get_encoder_enum(dev,
2132 + ATOM_DEVICE_CRT1_SUPPORT,
2133 + 1),
2134 + ATOM_DEVICE_CRT1_SUPPORT);
2135 + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
2136 + DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2137 + CONNECTOR_OBJECT_ID_VGA,
2138 + &hpd);
2139 + /* TV - TV DAC */
2140 + ddc_i2c.valid = false;
2141 + hpd.hpd = RADEON_HPD_NONE;
2142 + radeon_add_legacy_encoder(dev,
2143 + radeon_get_encoder_enum(dev,
2144 + ATOM_DEVICE_TV1_SUPPORT,
2145 + 2),
2146 + ATOM_DEVICE_TV1_SUPPORT);
2147 + radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
2148 + DRM_MODE_CONNECTOR_SVIDEO,
2149 + &ddc_i2c,
2150 + CONNECTOR_OBJECT_ID_SVIDEO,
2151 + &hpd);
2152 + break;
2153 default:
2154 DRM_INFO("Connector table: %d (invalid)\n",
2155 rdev->mode_info.connector_table);
2156 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2157 index b884c36..810268b 100644
2158 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2159 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2160 @@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2161 ret = connector_status_disconnected;
2162
2163 if (radeon_connector->ddc_bus)
2164 - dret = radeon_ddc_probe(radeon_connector);
2165 + dret = radeon_ddc_probe(radeon_connector, false);
2166 if (dret) {
2167 radeon_connector->detected_by_load = false;
2168 if (radeon_connector->edid) {
2169 @@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2170 return connector->status;
2171
2172 if (radeon_connector->ddc_bus)
2173 - dret = radeon_ddc_probe(radeon_connector);
2174 + dret = radeon_ddc_probe(radeon_connector, false);
2175 if (dret) {
2176 radeon_connector->detected_by_load = false;
2177 if (radeon_connector->edid) {
2178 @@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2179 if (encoder) {
2180 /* setup ddc on the bridge */
2181 radeon_atom_ext_encoder_setup_ddc(encoder);
2182 - if (radeon_ddc_probe(radeon_connector)) /* try DDC */
2183 + /* bridge chips are always aux */
2184 + if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
2185 ret = connector_status_connected;
2186 else if (radeon_connector->dac_load_detect) { /* try load detection */
2187 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2188 @@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2189 if (radeon_dp_getdpcd(radeon_connector))
2190 ret = connector_status_connected;
2191 } else {
2192 - if (radeon_ddc_probe(radeon_connector))
2193 + /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
2194 + if (radeon_ddc_probe(radeon_connector, false))
2195 ret = connector_status_connected;
2196 }
2197 }
2198 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2199 index e2f5f88..ad4c973 100644
2200 --- a/drivers/gpu/drm/radeon/radeon_device.c
2201 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2202 @@ -1163,6 +1163,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2203 struct drm_crtc *crtc;
2204 struct drm_connector *connector;
2205 int i, r;
2206 + bool force_completion = false;
2207
2208 if (dev == NULL || dev->dev_private == NULL) {
2209 return -ENODEV;
2210 @@ -1205,8 +1206,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2211
2212 mutex_lock(&rdev->ring_lock);
2213 /* wait for gpu to finish processing current batch */
2214 - for (i = 0; i < RADEON_NUM_RINGS; i++)
2215 - radeon_fence_wait_empty_locked(rdev, i);
2216 + for (i = 0; i < RADEON_NUM_RINGS; i++) {
2217 + r = radeon_fence_wait_empty_locked(rdev, i);
2218 + if (r) {
2219 + /* delay GPU reset to resume */
2220 + force_completion = true;
2221 + }
2222 + }
2223 + if (force_completion) {
2224 + radeon_fence_driver_force_completion(rdev);
2225 + }
2226 mutex_unlock(&rdev->ring_lock);
2227
2228 radeon_save_bios_scratch_regs(rdev);
2229 @@ -1337,7 +1346,6 @@ retry:
2230 }
2231
2232 radeon_restore_bios_scratch_regs(rdev);
2233 - drm_helper_resume_force_mode(rdev->ddev);
2234
2235 if (!r) {
2236 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
2237 @@ -1357,11 +1365,14 @@ retry:
2238 }
2239 }
2240 } else {
2241 + radeon_fence_driver_force_completion(rdev);
2242 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
2243 kfree(ring_data[i]);
2244 }
2245 }
2246
2247 + drm_helper_resume_force_mode(rdev->ddev);
2248 +
2249 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
2250 if (r) {
2251 /* bad news, how to tell it to userspace ? */
2252 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
2253 index bfa2a60..2bddddd 100644
2254 --- a/drivers/gpu/drm/radeon/radeon_display.c
2255 +++ b/drivers/gpu/drm/radeon/radeon_display.c
2256 @@ -695,10 +695,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
2257 if (radeon_connector->router.ddc_valid)
2258 radeon_router_select_ddc_port(radeon_connector);
2259
2260 - if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
2261 - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
2262 - (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
2263 - ENCODER_OBJECT_ID_NONE)) {
2264 + if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
2265 + ENCODER_OBJECT_ID_NONE) {
2266 + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2267 +
2268 + if (dig->dp_i2c_bus)
2269 + radeon_connector->edid = drm_get_edid(&radeon_connector->base,
2270 + &dig->dp_i2c_bus->adapter);
2271 + } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
2272 + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
2273 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2274
2275 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
2276 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
2277 index 22bd6c2..28c09b6 100644
2278 --- a/drivers/gpu/drm/radeon/radeon_fence.c
2279 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
2280 @@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
2281 * Returns 0 if the fences have passed, error for all other cases.
2282 * Caller must hold ring lock.
2283 */
2284 -void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
2285 +int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
2286 {
2287 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
2288 + int r;
2289
2290 - while(1) {
2291 - int r;
2292 - r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
2293 + r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
2294 + if (r) {
2295 if (r == -EDEADLK) {
2296 - mutex_unlock(&rdev->ring_lock);
2297 - r = radeon_gpu_reset(rdev);
2298 - mutex_lock(&rdev->ring_lock);
2299 - if (!r)
2300 - continue;
2301 - }
2302 - if (r) {
2303 - dev_err(rdev->dev, "error waiting for ring to become"
2304 - " idle (%d)\n", r);
2305 + return -EDEADLK;
2306 }
2307 - return;
2308 + dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
2309 + ring, r);
2310 }
2311 + return 0;
2312 }
2313
2314 /**
2315 @@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
2316 */
2317 void radeon_fence_driver_fini(struct radeon_device *rdev)
2318 {
2319 - int ring;
2320 + int ring, r;
2321
2322 mutex_lock(&rdev->ring_lock);
2323 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
2324 if (!rdev->fence_drv[ring].initialized)
2325 continue;
2326 - radeon_fence_wait_empty_locked(rdev, ring);
2327 + r = radeon_fence_wait_empty_locked(rdev, ring);
2328 + if (r) {
2329 + /* no need to trigger GPU reset as we are unloading */
2330 + radeon_fence_driver_force_completion(rdev);
2331 + }
2332 wake_up_all(&rdev->fence_queue);
2333 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
2334 rdev->fence_drv[ring].initialized = false;
2335 @@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
2336 mutex_unlock(&rdev->ring_lock);
2337 }
2338
2339 +/**
2340 + * radeon_fence_driver_force_completion - force all fence waiter to complete
2341 + *
2342 + * @rdev: radeon device pointer
2343 + *
2344 + * In case of GPU reset failure make sure no process keep waiting on fence
2345 + * that will never complete.
2346 + */
2347 +void radeon_fence_driver_force_completion(struct radeon_device *rdev)
2348 +{
2349 + int ring;
2350 +
2351 + for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
2352 + if (!rdev->fence_drv[ring].initialized)
2353 + continue;
2354 + radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
2355 + }
2356 +}
2357 +
2358
2359 /*
2360 * Fence debugfs
2361 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
2362 index c5bddd6..fc60b74 100644
2363 --- a/drivers/gpu/drm/radeon/radeon_i2c.c
2364 +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
2365 @@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
2366 * radeon_ddc_probe
2367 *
2368 */
2369 -bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
2370 +bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
2371 {
2372 u8 out = 0x0;
2373 u8 buf[8];
2374 @@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
2375 if (radeon_connector->router.ddc_valid)
2376 radeon_router_select_ddc_port(radeon_connector);
2377
2378 - ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
2379 + if (use_aux) {
2380 + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2381 + ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
2382 + } else {
2383 + ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
2384 + }
2385 +
2386 if (ret != 2)
2387 /* Couldn't find an accessible DDC on this connector */
2388 return false;
2389 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2390 index f5ba224..62cd512 100644
2391 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2392 +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2393 @@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
2394 enum drm_connector_status found = connector_status_disconnected;
2395 bool color = true;
2396
2397 + /* just don't bother on RN50 those chip are often connected to remoting
2398 + * console hw and often we get failure to load detect those. So to make
2399 + * everyone happy report the encoder as always connected.
2400 + */
2401 + if (ASIC_IS_RN50(rdev)) {
2402 + return connector_status_connected;
2403 + }
2404 +
2405 /* save the regs we need */
2406 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
2407 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
2408 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
2409 index 92c5f47..a9c3f06 100644
2410 --- a/drivers/gpu/drm/radeon/radeon_mode.h
2411 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
2412 @@ -209,7 +209,8 @@ enum radeon_connector_table {
2413 CT_RN50_POWER,
2414 CT_MAC_X800,
2415 CT_MAC_G5_9600,
2416 - CT_SAM440EP
2417 + CT_SAM440EP,
2418 + CT_MAC_G4_SILVER
2419 };
2420
2421 enum radeon_dvo_chip {
2422 @@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
2423 u8 val);
2424 extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
2425 extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
2426 -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
2427 +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
2428 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
2429
2430 extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
2431 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
2432 index aa14dbb..0bfa656 100644
2433 --- a/drivers/gpu/drm/radeon/radeon_pm.c
2434 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
2435 @@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
2436
2437 static void radeon_pm_set_clocks(struct radeon_device *rdev)
2438 {
2439 - int i;
2440 + int i, r;
2441
2442 /* no need to take locks, etc. if nothing's going to change */
2443 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
2444 @@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
2445 /* wait for the rings to drain */
2446 for (i = 0; i < RADEON_NUM_RINGS; i++) {
2447 struct radeon_ring *ring = &rdev->ring[i];
2448 - if (ring->ready)
2449 - radeon_fence_wait_empty_locked(rdev, i);
2450 + if (!ring->ready) {
2451 + continue;
2452 + }
2453 + r = radeon_fence_wait_empty_locked(rdev, i);
2454 + if (r) {
2455 + /* needs a GPU reset dont reset here */
2456 + mutex_unlock(&rdev->ring_lock);
2457 + up_write(&rdev->pm.mclk_lock);
2458 + mutex_unlock(&rdev->ddev->struct_mutex);
2459 + return;
2460 + }
2461 }
2462
2463 radeon_unmap_vram_bos(rdev);
2464 diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
2465 index e095218..26c23bb 100644
2466 --- a/drivers/gpu/drm/radeon/radeon_prime.c
2467 +++ b/drivers/gpu/drm/radeon/radeon_prime.c
2468 @@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
2469 bo = dma_buf->priv;
2470 if (bo->gem_base.dev == dev) {
2471 drm_gem_object_reference(&bo->gem_base);
2472 + dma_buf_put(dma_buf);
2473 return &bo->gem_base;
2474 }
2475 }
2476 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
2477 index 4422d63..c4d9eb6 100644
2478 --- a/drivers/gpu/drm/radeon/si.c
2479 +++ b/drivers/gpu/drm/radeon/si.c
2480 @@ -2426,9 +2426,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2481 /* enable context1-15 */
2482 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2483 (u32)(rdev->dummy_page.addr >> 12));
2484 - WREG32(VM_CONTEXT1_CNTL2, 0);
2485 + WREG32(VM_CONTEXT1_CNTL2, 4);
2486 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2487 - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2488 + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2489 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2490 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2491 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2492 + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2493 + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2494 + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2495 + VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2496 + READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2497 + READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2498 + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2499 + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2500
2501 si_pcie_gart_tlb_flush(rdev);
2502 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2503 @@ -3684,6 +3695,16 @@ restart_ih:
2504 break;
2505 }
2506 break;
2507 + case 146:
2508 + case 147:
2509 + dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
2510 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
2511 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
2512 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
2513 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
2514 + /* reset addr and status */
2515 + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
2516 + break;
2517 case 176: /* RINGID0 CP_INT */
2518 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2519 break;
2520 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
2521 index a8871af..53b4d45 100644
2522 --- a/drivers/gpu/drm/radeon/sid.h
2523 +++ b/drivers/gpu/drm/radeon/sid.h
2524 @@ -91,7 +91,18 @@
2525 #define VM_CONTEXT0_CNTL 0x1410
2526 #define ENABLE_CONTEXT (1 << 0)
2527 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
2528 +#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
2529 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
2530 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
2531 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
2532 +#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
2533 +#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
2534 +#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
2535 +#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
2536 +#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
2537 +#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
2538 +#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
2539 +#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
2540 #define VM_CONTEXT1_CNTL 0x1414
2541 #define VM_CONTEXT0_CNTL2 0x1430
2542 #define VM_CONTEXT1_CNTL2 0x1434
2543 @@ -104,6 +115,9 @@
2544 #define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
2545 #define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
2546
2547 +#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
2548 +#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
2549 +
2550 #define VM_INVALIDATE_REQUEST 0x1478
2551 #define VM_INVALIDATE_RESPONSE 0x147c
2552
2553 diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
2554 index b3b2ced..6d7acf4 100644
2555 --- a/drivers/gpu/drm/udl/udl_connector.c
2556 +++ b/drivers/gpu/drm/udl/udl_connector.c
2557 @@ -22,13 +22,17 @@
2558 static u8 *udl_get_edid(struct udl_device *udl)
2559 {
2560 u8 *block;
2561 - char rbuf[3];
2562 + char *rbuf;
2563 int ret, i;
2564
2565 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
2566 if (block == NULL)
2567 return NULL;
2568
2569 + rbuf = kmalloc(2, GFP_KERNEL);
2570 + if (rbuf == NULL)
2571 + goto error;
2572 +
2573 for (i = 0; i < EDID_LENGTH; i++) {
2574 ret = usb_control_msg(udl->ddev->usbdev,
2575 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
2576 @@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
2577 HZ);
2578 if (ret < 1) {
2579 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
2580 - i--;
2581 goto error;
2582 }
2583 block[i] = rbuf[1];
2584 }
2585
2586 + kfree(rbuf);
2587 return block;
2588
2589 error:
2590 kfree(block);
2591 + kfree(rbuf);
2592 return NULL;
2593 }
2594
2595 @@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
2596
2597 edid = (struct edid *)udl_get_edid(udl);
2598
2599 + /*
2600 + * We only read the main block, but if the monitor reports extension
2601 + * blocks then the drm edid code expects them to be present, so patch
2602 + * the extension count to 0.
2603 + */
2604 + edid->checksum += edid->extensions;
2605 + edid->extensions = 0;
2606 +
2607 drm_mode_connector_update_edid_property(connector, edid);
2608 ret = drm_add_edid_modes(connector, edid);
2609 kfree(edid);
2610 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2611 index 9d7a428..1ef9a9e 100644
2612 --- a/drivers/hid/hid-ids.h
2613 +++ b/drivers/hid/hid-ids.h
2614 @@ -696,6 +696,9 @@
2615 #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
2616 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
2617
2618 +#define USB_VENDOR_ID_SIGMATEL 0x066F
2619 +#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
2620 +
2621 #define USB_VENDOR_ID_SKYCABLE 0x1223
2622 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
2623
2624 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2625 index 11c7932..0a1429f 100644
2626 --- a/drivers/hid/usbhid/hid-quirks.c
2627 +++ b/drivers/hid/usbhid/hid-quirks.c
2628 @@ -79,6 +79,7 @@ static const struct hid_blacklist {
2629 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
2630 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
2631 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
2632 + { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
2633 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
2634 { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
2635 { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
2636 diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
2637 index 8fa2632..7272176 100644
2638 --- a/drivers/hwmon/lm73.c
2639 +++ b/drivers/hwmon/lm73.c
2640 @@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
2641 struct i2c_client *client = to_i2c_client(dev);
2642 long temp;
2643 short value;
2644 + s32 err;
2645
2646 int status = kstrtol(buf, 10, &temp);
2647 if (status < 0)
2648 @@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
2649 /* Write value */
2650 value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
2651 (LM73_TEMP_MAX*4)) << 5;
2652 - i2c_smbus_write_word_swapped(client, attr->index, value);
2653 - return count;
2654 + err = i2c_smbus_write_word_swapped(client, attr->index, value);
2655 + return (err < 0) ? err : count;
2656 }
2657
2658 static ssize_t show_temp(struct device *dev, struct device_attribute *da,
2659 @@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
2660 {
2661 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
2662 struct i2c_client *client = to_i2c_client(dev);
2663 + int temp;
2664 +
2665 + s32 err = i2c_smbus_read_word_swapped(client, attr->index);
2666 + if (err < 0)
2667 + return err;
2668 +
2669 /* use integer division instead of equivalent right shift to
2670 guarantee arithmetic shift and preserve the sign */
2671 - int temp = ((s16) (i2c_smbus_read_word_swapped(client,
2672 - attr->index))*250) / 32;
2673 - return sprintf(buf, "%d\n", temp);
2674 + temp = (((s16) err) * 250) / 32;
2675 + return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
2676 }
2677
2678
2679 diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
2680 index 80079e5..dbc99d4 100644
2681 --- a/drivers/infiniband/hw/mlx4/cm.c
2682 +++ b/drivers/infiniband/hw/mlx4/cm.c
2683 @@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
2684 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2685 unsigned long flags;
2686
2687 - spin_lock_irqsave(&sriov->going_down_lock, flags);
2688 spin_lock(&sriov->id_map_lock);
2689 + spin_lock_irqsave(&sriov->going_down_lock, flags);
2690 /*make sure that there is no schedule inside the scheduled work.*/
2691 if (!sriov->is_going_down) {
2692 id->scheduled_delete = 1;
2693 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
2694 }
2695 - spin_unlock(&sriov->id_map_lock);
2696 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
2697 + spin_unlock(&sriov->id_map_lock);
2698 }
2699
2700 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
2701 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
2702 index 5cac29e..33cc589 100644
2703 --- a/drivers/infiniband/hw/nes/nes.h
2704 +++ b/drivers/infiniband/hw/nes/nes.h
2705 @@ -532,6 +532,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
2706 int nes_destroy_cqp(struct nes_device *);
2707 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
2708 void nes_recheck_link_status(struct work_struct *work);
2709 +void nes_terminate_timeout(unsigned long context);
2710
2711 /* nes_nic.c */
2712 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
2713 diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
2714 index fe7965e..67647e2 100644
2715 --- a/drivers/infiniband/hw/nes/nes_hw.c
2716 +++ b/drivers/infiniband/hw/nes/nes_hw.c
2717 @@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2718 static void process_critical_error(struct nes_device *nesdev);
2719 static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
2720 static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
2721 -static void nes_terminate_timeout(unsigned long context);
2722 static void nes_terminate_start_timer(struct nes_qp *nesqp);
2723
2724 #ifdef CONFIG_INFINIBAND_NES_DEBUG
2725 @@ -3520,7 +3519,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
2726 }
2727
2728 /* Timeout routine in case terminate fails to complete */
2729 -static void nes_terminate_timeout(unsigned long context)
2730 +void nes_terminate_timeout(unsigned long context)
2731 {
2732 struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
2733
2734 @@ -3530,11 +3529,7 @@ static void nes_terminate_timeout(unsigned long context)
2735 /* Set a timer in case hw cannot complete the terminate sequence */
2736 static void nes_terminate_start_timer(struct nes_qp *nesqp)
2737 {
2738 - init_timer(&nesqp->terminate_timer);
2739 - nesqp->terminate_timer.function = nes_terminate_timeout;
2740 - nesqp->terminate_timer.expires = jiffies + HZ;
2741 - nesqp->terminate_timer.data = (unsigned long)nesqp;
2742 - add_timer(&nesqp->terminate_timer);
2743 + mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
2744 }
2745
2746 /**
2747 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
2748 index cd0ecb2..07e4fba 100644
2749 --- a/drivers/infiniband/hw/nes/nes_verbs.c
2750 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
2751 @@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
2752 }
2753
2754 nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
2755 + init_timer(&nesqp->terminate_timer);
2756 + nesqp->terminate_timer.function = nes_terminate_timeout;
2757 + nesqp->terminate_timer.data = (unsigned long)nesqp;
2758
2759 /* update the QP table */
2760 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
2761 @@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
2762 return &nesqp->ibqp;
2763 }
2764
2765 -
2766 /**
2767 * nes_clean_cq
2768 */
2769 @@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2770 return ibmr;
2771 case IWNES_MEMREG_TYPE_QP:
2772 case IWNES_MEMREG_TYPE_CQ:
2773 + if (!region->length) {
2774 + nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
2775 + ib_umem_release(region);
2776 + return ERR_PTR(-EINVAL);
2777 + }
2778 nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
2779 if (!nespbl) {
2780 nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
2781 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2782 index 0badfa4..9476c1b 100644
2783 --- a/drivers/iommu/intel-iommu.c
2784 +++ b/drivers/iommu/intel-iommu.c
2785 @@ -1827,10 +1827,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2786 if (!pte)
2787 return -ENOMEM;
2788 /* It is large page*/
2789 - if (largepage_lvl > 1)
2790 + if (largepage_lvl > 1) {
2791 pteval |= DMA_PTE_LARGE_PAGE;
2792 - else
2793 + /* Ensure that old small page tables are removed to make room
2794 + for superpage, if they exist. */
2795 + dma_pte_clear_range(domain, iov_pfn,
2796 + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2797 + dma_pte_free_pagetable(domain, iov_pfn,
2798 + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2799 + } else {
2800 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2801 + }
2802
2803 }
2804 /* We don't need lock here, nobody else
2805 diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
2806 index e4e8415..aefb78e 100644
2807 --- a/drivers/md/dm-bio-prison.c
2808 +++ b/drivers/md/dm-bio-prison.c
2809 @@ -208,31 +208,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
2810 EXPORT_SYMBOL_GPL(dm_cell_release);
2811
2812 /*
2813 - * There are a couple of places where we put a bio into a cell briefly
2814 - * before taking it out again. In these situations we know that no other
2815 - * bio may be in the cell. This function releases the cell, and also does
2816 - * a sanity check.
2817 - */
2818 -static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
2819 -{
2820 - BUG_ON(cell->holder != bio);
2821 - BUG_ON(!bio_list_empty(&cell->bios));
2822 -
2823 - __cell_release(cell, NULL);
2824 -}
2825 -
2826 -void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
2827 -{
2828 - unsigned long flags;
2829 - struct dm_bio_prison *prison = cell->prison;
2830 -
2831 - spin_lock_irqsave(&prison->lock, flags);
2832 - __cell_release_singleton(cell, bio);
2833 - spin_unlock_irqrestore(&prison->lock, flags);
2834 -}
2835 -EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
2836 -
2837 -/*
2838 * Sometimes we don't want the holder, just the additional bios.
2839 */
2840 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
2841 diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
2842 index 4e0ac37..53d1a7a 100644
2843 --- a/drivers/md/dm-bio-prison.h
2844 +++ b/drivers/md/dm-bio-prison.h
2845 @@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
2846 struct bio *inmate, struct dm_bio_prison_cell **ref);
2847
2848 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
2849 -void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
2850 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
2851 void dm_cell_error(struct dm_bio_prison_cell *cell);
2852
2853 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
2854 index afd9598..a651d52 100644
2855 --- a/drivers/md/dm-ioctl.c
2856 +++ b/drivers/md/dm-ioctl.c
2857 @@ -1566,6 +1566,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
2858 if (copy_from_user(dmi, user, tmp.data_size))
2859 goto bad;
2860
2861 + /*
2862 + * Abort if something changed the ioctl data while it was being copied.
2863 + */
2864 + if (dmi->data_size != tmp.data_size) {
2865 + DMERR("rejecting ioctl: data size modified while processing parameters");
2866 + goto bad;
2867 + }
2868 +
2869 /* Wipe the user buffer so we do not return it to userspace */
2870 if (secure_data && clear_user(user, tmp.data_size))
2871 goto bad;
2872 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
2873 index 100368e..fa29557 100644
2874 --- a/drivers/md/dm-table.c
2875 +++ b/drivers/md/dm-table.c
2876 @@ -1445,6 +1445,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
2877 else
2878 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
2879
2880 + q->limits.max_write_same_sectors = 0;
2881 +
2882 dm_table_set_integrity(t);
2883
2884 /*
2885 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
2886 index 058acf3..41c9e81 100644
2887 --- a/drivers/md/dm-thin.c
2888 +++ b/drivers/md/dm-thin.c
2889 @@ -368,6 +368,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
2890 dm_thin_changed_this_transaction(tc->td);
2891 }
2892
2893 +static void inc_all_io_entry(struct pool *pool, struct bio *bio)
2894 +{
2895 + struct dm_thin_endio_hook *h;
2896 +
2897 + if (bio->bi_rw & REQ_DISCARD)
2898 + return;
2899 +
2900 + h = dm_get_mapinfo(bio)->ptr;
2901 + h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
2902 +}
2903 +
2904 static void issue(struct thin_c *tc, struct bio *bio)
2905 {
2906 struct pool *pool = tc->pool;
2907 @@ -513,8 +524,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
2908 }
2909
2910 /*
2911 - * Same as cell_defer above, except it omits one particular detainee,
2912 - * a write bio that covers the block and has already been processed.
2913 + * Same as cell_defer except it omits the original holder of the cell.
2914 */
2915 static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2916 {
2917 @@ -597,13 +607,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
2918 {
2919 struct thin_c *tc = m->tc;
2920
2921 + inc_all_io_entry(tc->pool, m->bio);
2922 + cell_defer_except(tc, m->cell);
2923 + cell_defer_except(tc, m->cell2);
2924 +
2925 if (m->pass_discard)
2926 remap_and_issue(tc, m->bio, m->data_block);
2927 else
2928 bio_endio(m->bio, 0);
2929
2930 - cell_defer_except(tc, m->cell);
2931 - cell_defer_except(tc, m->cell2);
2932 mempool_free(m, tc->pool->mapping_pool);
2933 }
2934
2935 @@ -711,6 +723,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2936 h->overwrite_mapping = m;
2937 m->bio = bio;
2938 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
2939 + inc_all_io_entry(pool, bio);
2940 remap_and_issue(tc, bio, data_dest);
2941 } else {
2942 struct dm_io_region from, to;
2943 @@ -780,6 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
2944 h->overwrite_mapping = m;
2945 m->bio = bio;
2946 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
2947 + inc_all_io_entry(pool, bio);
2948 remap_and_issue(tc, bio, data_block);
2949 } else {
2950 int r;
2951 @@ -936,7 +950,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2952 */
2953 build_data_key(tc->td, lookup_result.block, &key2);
2954 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
2955 - dm_cell_release_singleton(cell, bio);
2956 + cell_defer_except(tc, cell);
2957 break;
2958 }
2959
2960 @@ -962,13 +976,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2961 wake_worker(pool);
2962 }
2963 } else {
2964 + inc_all_io_entry(pool, bio);
2965 + cell_defer_except(tc, cell);
2966 + cell_defer_except(tc, cell2);
2967 +
2968 /*
2969 * The DM core makes sure that the discard doesn't span
2970 * a block boundary. So we submit the discard of a
2971 * partial block appropriately.
2972 */
2973 - dm_cell_release_singleton(cell, bio);
2974 - dm_cell_release_singleton(cell2, bio);
2975 if ((!lookup_result.shared) && pool->pf.discard_passdown)
2976 remap_and_issue(tc, bio, lookup_result.block);
2977 else
2978 @@ -980,13 +996,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2979 /*
2980 * It isn't provisioned, just forget it.
2981 */
2982 - dm_cell_release_singleton(cell, bio);
2983 + cell_defer_except(tc, cell);
2984 bio_endio(bio, 0);
2985 break;
2986
2987 default:
2988 DMERR("discard: find block unexpectedly returned %d", r);
2989 - dm_cell_release_singleton(cell, bio);
2990 + cell_defer_except(tc, cell);
2991 bio_io_error(bio);
2992 break;
2993 }
2994 @@ -1040,8 +1056,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
2995 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
2996
2997 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
2998 + inc_all_io_entry(pool, bio);
2999 + cell_defer_except(tc, cell);
3000
3001 - dm_cell_release_singleton(cell, bio);
3002 remap_and_issue(tc, bio, lookup_result->block);
3003 }
3004 }
3005 @@ -1056,7 +1073,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
3006 * Remap empty bios (flushes) immediately, without provisioning.
3007 */
3008 if (!bio->bi_size) {
3009 - dm_cell_release_singleton(cell, bio);
3010 + inc_all_io_entry(tc->pool, bio);
3011 + cell_defer_except(tc, cell);
3012 +
3013 remap_and_issue(tc, bio, 0);
3014 return;
3015 }
3016 @@ -1066,7 +1085,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
3017 */
3018 if (bio_data_dir(bio) == READ) {
3019 zero_fill_bio(bio);
3020 - dm_cell_release_singleton(cell, bio);
3021 + cell_defer_except(tc, cell);
3022 bio_endio(bio, 0);
3023 return;
3024 }
3025 @@ -1111,26 +1130,22 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
3026 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
3027 switch (r) {
3028 case 0:
3029 - /*
3030 - * We can release this cell now. This thread is the only
3031 - * one that puts bios into a cell, and we know there were
3032 - * no preceding bios.
3033 - */
3034 - /*
3035 - * TODO: this will probably have to change when discard goes
3036 - * back in.
3037 - */
3038 - dm_cell_release_singleton(cell, bio);
3039 -
3040 - if (lookup_result.shared)
3041 + if (lookup_result.shared) {
3042 process_shared_bio(tc, bio, block, &lookup_result);
3043 - else
3044 + cell_defer_except(tc, cell);
3045 + } else {
3046 + inc_all_io_entry(tc->pool, bio);
3047 + cell_defer_except(tc, cell);
3048 +
3049 remap_and_issue(tc, bio, lookup_result.block);
3050 + }
3051 break;
3052
3053 case -ENODATA:
3054 if (bio_data_dir(bio) == READ && tc->origin_dev) {
3055 - dm_cell_release_singleton(cell, bio);
3056 + inc_all_io_entry(tc->pool, bio);
3057 + cell_defer_except(tc, cell);
3058 +
3059 remap_to_origin_and_issue(tc, bio);
3060 } else
3061 provision_block(tc, bio, block, cell);
3062 @@ -1138,7 +1153,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
3063
3064 default:
3065 DMERR("dm_thin_find_block() failed, error = %d", r);
3066 - dm_cell_release_singleton(cell, bio);
3067 + cell_defer_except(tc, cell);
3068 bio_io_error(bio);
3069 break;
3070 }
3071 @@ -1156,8 +1171,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
3072 case 0:
3073 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
3074 bio_io_error(bio);
3075 - else
3076 + else {
3077 + inc_all_io_entry(tc->pool, bio);
3078 remap_and_issue(tc, bio, lookup_result.block);
3079 + }
3080 break;
3081
3082 case -ENODATA:
3083 @@ -1167,6 +1184,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
3084 }
3085
3086 if (tc->origin_dev) {
3087 + inc_all_io_entry(tc->pool, bio);
3088 remap_to_origin_and_issue(tc, bio);
3089 break;
3090 }
3091 @@ -1347,7 +1365,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b
3092
3093 h->tc = tc;
3094 h->shared_read_entry = NULL;
3095 - h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
3096 + h->all_io_entry = NULL;
3097 h->overwrite_mapping = NULL;
3098
3099 return h;
3100 @@ -1364,6 +1382,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
3101 dm_block_t block = get_bio_block(tc, bio);
3102 struct dm_thin_device *td = tc->td;
3103 struct dm_thin_lookup_result result;
3104 + struct dm_bio_prison_cell *cell1, *cell2;
3105 + struct dm_cell_key key;
3106
3107 map_context->ptr = thin_hook_bio(tc, bio);
3108
3109 @@ -1400,12 +1420,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
3110 * shared flag will be set in their case.
3111 */
3112 thin_defer_bio(tc, bio);
3113 - r = DM_MAPIO_SUBMITTED;
3114 - } else {
3115 - remap(tc, bio, result.block);
3116 - r = DM_MAPIO_REMAPPED;
3117 + return DM_MAPIO_SUBMITTED;
3118 }
3119 - break;
3120 +
3121 + build_virtual_key(tc->td, block, &key);
3122 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
3123 + return DM_MAPIO_SUBMITTED;
3124 +
3125 + build_data_key(tc->td, result.block, &key);
3126 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
3127 + cell_defer_except(tc, cell1);
3128 + return DM_MAPIO_SUBMITTED;
3129 + }
3130 +
3131 + inc_all_io_entry(tc->pool, bio);
3132 + cell_defer_except(tc, cell2);
3133 + cell_defer_except(tc, cell1);
3134 +
3135 + remap(tc, bio, result.block);
3136 + return DM_MAPIO_REMAPPED;
3137
3138 case -ENODATA:
3139 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
3140 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
3141 index 5709bfe..accbb05 100644
3142 --- a/drivers/md/persistent-data/dm-btree-internal.h
3143 +++ b/drivers/md/persistent-data/dm-btree-internal.h
3144 @@ -36,13 +36,13 @@ struct node_header {
3145 __le32 padding;
3146 } __packed;
3147
3148 -struct node {
3149 +struct btree_node {
3150 struct node_header header;
3151 __le64 keys[0];
3152 } __packed;
3153
3154
3155 -void inc_children(struct dm_transaction_manager *tm, struct node *n,
3156 +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
3157 struct dm_btree_value_type *vt);
3158
3159 int new_block(struct dm_btree_info *info, struct dm_block **result);
3160 @@ -64,7 +64,7 @@ struct ro_spine {
3161 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
3162 int exit_ro_spine(struct ro_spine *s);
3163 int ro_step(struct ro_spine *s, dm_block_t new_child);
3164 -struct node *ro_node(struct ro_spine *s);
3165 +struct btree_node *ro_node(struct ro_spine *s);
3166
3167 struct shadow_spine {
3168 struct dm_btree_info *info;
3169 @@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
3170 /*
3171 * Some inlines.
3172 */
3173 -static inline __le64 *key_ptr(struct node *n, uint32_t index)
3174 +static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
3175 {
3176 return n->keys + index;
3177 }
3178
3179 -static inline void *value_base(struct node *n)
3180 +static inline void *value_base(struct btree_node *n)
3181 {
3182 return &n->keys[le32_to_cpu(n->header.max_entries)];
3183 }
3184
3185 -static inline void *value_ptr(struct node *n, uint32_t index)
3186 +static inline void *value_ptr(struct btree_node *n, uint32_t index)
3187 {
3188 uint32_t value_size = le32_to_cpu(n->header.value_size);
3189 return value_base(n) + (value_size * index);
3190 @@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
3191 /*
3192 * Assumes the values are suitably-aligned and converts to core format.
3193 */
3194 -static inline uint64_t value64(struct node *n, uint32_t index)
3195 +static inline uint64_t value64(struct btree_node *n, uint32_t index)
3196 {
3197 __le64 *values_le = value_base(n);
3198
3199 @@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
3200 /*
3201 * Searching for a key within a single node.
3202 */
3203 -int lower_bound(struct node *n, uint64_t key);
3204 +int lower_bound(struct btree_node *n, uint64_t key);
3205
3206 extern struct dm_block_validator btree_node_validator;
3207
3208 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
3209 index aa71e23..c4f2813 100644
3210 --- a/drivers/md/persistent-data/dm-btree-remove.c
3211 +++ b/drivers/md/persistent-data/dm-btree-remove.c
3212 @@ -53,7 +53,7 @@
3213 /*
3214 * Some little utilities for moving node data around.
3215 */
3216 -static void node_shift(struct node *n, int shift)
3217 +static void node_shift(struct btree_node *n, int shift)
3218 {
3219 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
3220 uint32_t value_size = le32_to_cpu(n->header.value_size);
3221 @@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
3222 }
3223 }
3224
3225 -static void node_copy(struct node *left, struct node *right, int shift)
3226 +static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
3227 {
3228 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3229 uint32_t value_size = le32_to_cpu(left->header.value_size);
3230 @@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
3231 /*
3232 * Delete a specific entry from a leaf node.
3233 */
3234 -static void delete_at(struct node *n, unsigned index)
3235 +static void delete_at(struct btree_node *n, unsigned index)
3236 {
3237 unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
3238 unsigned nr_to_copy = nr_entries - (index + 1);
3239 @@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
3240 n->header.nr_entries = cpu_to_le32(nr_entries - 1);
3241 }
3242
3243 -static unsigned merge_threshold(struct node *n)
3244 +static unsigned merge_threshold(struct btree_node *n)
3245 {
3246 return le32_to_cpu(n->header.max_entries) / 3;
3247 }
3248 @@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
3249 struct child {
3250 unsigned index;
3251 struct dm_block *block;
3252 - struct node *n;
3253 + struct btree_node *n;
3254 };
3255
3256 static struct dm_btree_value_type le64_type = {
3257 @@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
3258 .equal = NULL
3259 };
3260
3261 -static int init_child(struct dm_btree_info *info, struct node *parent,
3262 +static int init_child(struct dm_btree_info *info, struct btree_node *parent,
3263 unsigned index, struct child *result)
3264 {
3265 int r, inc;
3266 @@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
3267 return dm_tm_unlock(info->tm, c->block);
3268 }
3269
3270 -static void shift(struct node *left, struct node *right, int count)
3271 +static void shift(struct btree_node *left, struct btree_node *right, int count)
3272 {
3273 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3274 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
3275 @@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
3276 right->header.nr_entries = cpu_to_le32(nr_right + count);
3277 }
3278
3279 -static void __rebalance2(struct dm_btree_info *info, struct node *parent,
3280 +static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
3281 struct child *l, struct child *r)
3282 {
3283 - struct node *left = l->n;
3284 - struct node *right = r->n;
3285 + struct btree_node *left = l->n;
3286 + struct btree_node *right = r->n;
3287 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3288 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
3289 unsigned threshold = 2 * merge_threshold(left) + 1;
3290 @@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
3291 unsigned left_index)
3292 {
3293 int r;
3294 - struct node *parent;
3295 + struct btree_node *parent;
3296 struct child left, right;
3297
3298 parent = dm_block_data(shadow_current(s));
3299 @@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
3300 * in right, then rebalance2. This wastes some cpu, but I want something
3301 * simple atm.
3302 */
3303 -static void delete_center_node(struct dm_btree_info *info, struct node *parent,
3304 +static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
3305 struct child *l, struct child *c, struct child *r,
3306 - struct node *left, struct node *center, struct node *right,
3307 + struct btree_node *left, struct btree_node *center, struct btree_node *right,
3308 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
3309 {
3310 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
3311 @@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
3312 /*
3313 * Redistributes entries among 3 sibling nodes.
3314 */
3315 -static void redistribute3(struct dm_btree_info *info, struct node *parent,
3316 +static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
3317 struct child *l, struct child *c, struct child *r,
3318 - struct node *left, struct node *center, struct node *right,
3319 + struct btree_node *left, struct btree_node *center, struct btree_node *right,
3320 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
3321 {
3322 int s;
3323 @@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
3324 *key_ptr(parent, r->index) = right->keys[0];
3325 }
3326
3327 -static void __rebalance3(struct dm_btree_info *info, struct node *parent,
3328 +static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
3329 struct child *l, struct child *c, struct child *r)
3330 {
3331 - struct node *left = l->n;
3332 - struct node *center = c->n;
3333 - struct node *right = r->n;
3334 + struct btree_node *left = l->n;
3335 + struct btree_node *center = c->n;
3336 + struct btree_node *right = r->n;
3337
3338 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3339 uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
3340 @@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
3341 unsigned left_index)
3342 {
3343 int r;
3344 - struct node *parent = dm_block_data(shadow_current(s));
3345 + struct btree_node *parent = dm_block_data(shadow_current(s));
3346 struct child left, center, right;
3347
3348 /*
3349 @@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
3350 {
3351 int r;
3352 struct dm_block *block;
3353 - struct node *n;
3354 + struct btree_node *n;
3355
3356 r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
3357 if (r)
3358 @@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
3359 {
3360 int i, r, has_left_sibling, has_right_sibling;
3361 uint32_t child_entries;
3362 - struct node *n;
3363 + struct btree_node *n;
3364
3365 n = dm_block_data(shadow_current(s));
3366
3367 @@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
3368 return r;
3369 }
3370
3371 -static int do_leaf(struct node *n, uint64_t key, unsigned *index)
3372 +static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
3373 {
3374 int i = lower_bound(n, key);
3375
3376 @@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
3377 uint64_t key, unsigned *index)
3378 {
3379 int i = *index, r;
3380 - struct node *n;
3381 + struct btree_node *n;
3382
3383 for (;;) {
3384 r = shadow_step(s, root, vt);
3385 @@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
3386 unsigned level, last_level = info->levels - 1;
3387 int index = 0, r = 0;
3388 struct shadow_spine spine;
3389 - struct node *n;
3390 + struct btree_node *n;
3391
3392 init_shadow_spine(&spine, info);
3393 for (level = 0; level < info->levels; level++) {
3394 diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
3395 index d9a7912..2f0805c 100644
3396 --- a/drivers/md/persistent-data/dm-btree-spine.c
3397 +++ b/drivers/md/persistent-data/dm-btree-spine.c
3398 @@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
3399 struct dm_block *b,
3400 size_t block_size)
3401 {
3402 - struct node *n = dm_block_data(b);
3403 + struct btree_node *n = dm_block_data(b);
3404 struct node_header *h = &n->header;
3405
3406 h->blocknr = cpu_to_le64(dm_block_location(b));
3407 @@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v,
3408 struct dm_block *b,
3409 size_t block_size)
3410 {
3411 - struct node *n = dm_block_data(b);
3412 + struct btree_node *n = dm_block_data(b);
3413 struct node_header *h = &n->header;
3414 size_t value_size;
3415 __le32 csum_disk;
3416 @@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
3417 return r;
3418 }
3419
3420 -struct node *ro_node(struct ro_spine *s)
3421 +struct btree_node *ro_node(struct ro_spine *s)
3422 {
3423 struct dm_block *block;
3424
3425 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
3426 index d12b2cc..371f3d4 100644
3427 --- a/drivers/md/persistent-data/dm-btree.c
3428 +++ b/drivers/md/persistent-data/dm-btree.c
3429 @@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
3430 /*----------------------------------------------------------------*/
3431
3432 /* makes the assumption that no two keys are the same. */
3433 -static int bsearch(struct node *n, uint64_t key, int want_hi)
3434 +static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
3435 {
3436 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
3437
3438 @@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
3439 return want_hi ? hi : lo;
3440 }
3441
3442 -int lower_bound(struct node *n, uint64_t key)
3443 +int lower_bound(struct btree_node *n, uint64_t key)
3444 {
3445 return bsearch(n, key, 0);
3446 }
3447
3448 -void inc_children(struct dm_transaction_manager *tm, struct node *n,
3449 +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
3450 struct dm_btree_value_type *vt)
3451 {
3452 unsigned i;
3453 @@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
3454 vt->inc(vt->context, value_ptr(n, i));
3455 }
3456
3457 -static int insert_at(size_t value_size, struct node *node, unsigned index,
3458 +static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
3459 uint64_t key, void *value)
3460 __dm_written_to_disk(value)
3461 {
3462 @@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
3463 {
3464 int r;
3465 struct dm_block *b;
3466 - struct node *n;
3467 + struct btree_node *n;
3468 size_t block_size;
3469 uint32_t max_entries;
3470
3471 @@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
3472 #define MAX_SPINE_DEPTH 64
3473 struct frame {
3474 struct dm_block *b;
3475 - struct node *n;
3476 + struct btree_node *n;
3477 unsigned level;
3478 unsigned nr_children;
3479 unsigned current_child;
3480 @@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
3481 /*----------------------------------------------------------------*/
3482
3483 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
3484 - int (*search_fn)(struct node *, uint64_t),
3485 + int (*search_fn)(struct btree_node *, uint64_t),
3486 uint64_t *result_key, void *v, size_t value_size)
3487 {
3488 int i, r;
3489 @@ -406,7 +406,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
3490 size_t size;
3491 unsigned nr_left, nr_right;
3492 struct dm_block *left, *right, *parent;
3493 - struct node *ln, *rn, *pn;
3494 + struct btree_node *ln, *rn, *pn;
3495 __le64 location;
3496
3497 left = shadow_current(s);
3498 @@ -491,7 +491,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
3499 size_t size;
3500 unsigned nr_left, nr_right;
3501 struct dm_block *left, *right, *new_parent;
3502 - struct node *pn, *ln, *rn;
3503 + struct btree_node *pn, *ln, *rn;
3504 __le64 val;
3505
3506 new_parent = shadow_current(s);
3507 @@ -576,7 +576,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
3508 uint64_t key, unsigned *index)
3509 {
3510 int r, i = *index, top = 1;
3511 - struct node *node;
3512 + struct btree_node *node;
3513
3514 for (;;) {
3515 r = shadow_step(s, root, vt);
3516 @@ -643,7 +643,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
3517 unsigned level, index = -1, last_level = info->levels - 1;
3518 dm_block_t block = root;
3519 struct shadow_spine spine;
3520 - struct node *n;
3521 + struct btree_node *n;
3522 struct dm_btree_value_type le64_type;
3523
3524 le64_type.context = NULL;
3525 diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
3526 index f8b7771..7604f4e 100644
3527 --- a/drivers/mfd/mfd-core.c
3528 +++ b/drivers/mfd/mfd-core.c
3529 @@ -21,6 +21,10 @@
3530 #include <linux/irqdomain.h>
3531 #include <linux/of.h>
3532
3533 +static struct device_type mfd_dev_type = {
3534 + .name = "mfd_device",
3535 +};
3536 +
3537 int mfd_cell_enable(struct platform_device *pdev)
3538 {
3539 const struct mfd_cell *cell = mfd_get_cell(pdev);
3540 @@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
3541 goto fail_device;
3542
3543 pdev->dev.parent = parent;
3544 + pdev->dev.type = &mfd_dev_type;
3545
3546 if (parent->of_node && cell->of_compatible) {
3547 for_each_child_of_node(parent->of_node, np) {
3548 @@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
3549
3550 static int mfd_remove_devices_fn(struct device *dev, void *c)
3551 {
3552 - struct platform_device *pdev = to_platform_device(dev);
3553 - const struct mfd_cell *cell = mfd_get_cell(pdev);
3554 + struct platform_device *pdev;
3555 + const struct mfd_cell *cell;
3556 atomic_t **usage_count = c;
3557
3558 + if (dev->type != &mfd_dev_type)
3559 + return 0;
3560 +
3561 + pdev = to_platform_device(dev);
3562 + cell = mfd_get_cell(pdev);
3563 +
3564 /* find the base address of usage_count pointers (for freeing) */
3565 if (!*usage_count || (cell->usage_count < *usage_count))
3566 *usage_count = cell->usage_count;
3567 diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
3568 index 8fefc96..f1ac288 100644
3569 --- a/drivers/mfd/wm8994-core.c
3570 +++ b/drivers/mfd/wm8994-core.c
3571 @@ -557,6 +557,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
3572 case 1:
3573 case 2:
3574 case 3:
3575 + case 4:
3576 regmap_patch = wm1811_reva_patch;
3577 patch_regs = ARRAY_SIZE(wm1811_reva_patch);
3578 break;
3579 diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
3580 index adb6c3e..2cdeab8 100644
3581 --- a/drivers/mtd/nand/cs553x_nand.c
3582 +++ b/drivers/mtd/nand/cs553x_nand.c
3583 @@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
3584 this->ecc.hwctl = cs_enable_hwecc;
3585 this->ecc.calculate = cs_calculate_ecc;
3586 this->ecc.correct = nand_correct_data;
3587 + this->ecc.strength = 1;
3588
3589 /* Enable the following for a flash based bad block table */
3590 this->bbt_options = NAND_BBT_USE_FLASH;
3591 @@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
3592 goto out_ior;
3593 }
3594
3595 - this->ecc.strength = 1;
3596 -
3597 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
3598
3599 cs553x_mtd[cs] = new_mtd;
3600 diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3601 index 3502acc..84f0526 100644
3602 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3603 +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3604 @@ -166,6 +166,15 @@ int gpmi_init(struct gpmi_nand_data *this)
3605 if (ret)
3606 goto err_out;
3607
3608 + /*
3609 + * Reset BCH here, too. We got failures otherwise :(
3610 + * See later BCH reset for explanation of MX23 handling
3611 + */
3612 + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
3613 + if (ret)
3614 + goto err_out;
3615 +
3616 +
3617 /* Choose NAND mode. */
3618 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
3619
3620 diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
3621 index 3d1899f..c4c80f6 100644
3622 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
3623 +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
3624 @@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3625 u32 reply;
3626 u8 is_going_down = 0;
3627 int i;
3628 + unsigned long flags;
3629
3630 slave_state[slave].comm_toggle ^= 1;
3631 reply = (u32) slave_state[slave].comm_toggle << 31;
3632 @@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3633 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
3634 goto reset_slave;
3635 }
3636 - spin_lock(&priv->mfunc.master.slave_state_lock);
3637 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3638 if (!slave_state[slave].is_slave_going_down)
3639 slave_state[slave].last_cmd = cmd;
3640 else
3641 is_going_down = 1;
3642 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3643 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3644 if (is_going_down) {
3645 mlx4_warn(dev, "Slave is going down aborting command(%d)"
3646 " executing from slave:%d\n",
3647 @@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3648 reset_slave:
3649 /* cleanup any slave resources */
3650 mlx4_delete_all_resources_for_slave(dev, slave);
3651 - spin_lock(&priv->mfunc.master.slave_state_lock);
3652 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3653 if (!slave_state[slave].is_slave_going_down)
3654 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
3655 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3656 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3657 /*with slave in the middle of flr, no need to clean resources again.*/
3658 inform_slave_state:
3659 memset(&slave_state[slave].event_eq, 0,
3660 diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
3661 index b84a88b..cda430b 100644
3662 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
3663 +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
3664 @@ -401,6 +401,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
3665 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
3666 int i;
3667 int err;
3668 + unsigned long flags;
3669
3670 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
3671
3672 @@ -412,10 +413,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
3673
3674 mlx4_delete_all_resources_for_slave(dev, i);
3675 /*return the slave to running mode*/
3676 - spin_lock(&priv->mfunc.master.slave_state_lock);
3677 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3678 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
3679 slave_state[i].is_slave_going_down = 0;
3680 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3681 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3682 /*notify the FW:*/
3683 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
3684 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
3685 @@ -440,6 +441,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
3686 u8 update_slave_state;
3687 int i;
3688 enum slave_port_gen_event gen_event;
3689 + unsigned long flags;
3690
3691 while ((eqe = next_eqe_sw(eq))) {
3692 /*
3693 @@ -647,13 +649,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
3694 } else
3695 update_slave_state = 1;
3696
3697 - spin_lock(&priv->mfunc.master.slave_state_lock);
3698 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3699 if (update_slave_state) {
3700 priv->mfunc.master.slave_state[flr_slave].active = false;
3701 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
3702 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
3703 }
3704 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3705 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3706 queue_work(priv->mfunc.master.comm_wq,
3707 &priv->mfunc.master.slave_flr_event_work);
3708 break;
3709 diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
3710 index 6650fde..9f1e947 100644
3711 --- a/drivers/net/wimax/i2400m/i2400m-usb.h
3712 +++ b/drivers/net/wimax/i2400m/i2400m-usb.h
3713 @@ -152,6 +152,9 @@ enum {
3714 /* Device IDs */
3715 USB_DEVICE_ID_I6050 = 0x0186,
3716 USB_DEVICE_ID_I6050_2 = 0x0188,
3717 + USB_DEVICE_ID_I6150 = 0x07d6,
3718 + USB_DEVICE_ID_I6150_2 = 0x07d7,
3719 + USB_DEVICE_ID_I6150_3 = 0x07d9,
3720 USB_DEVICE_ID_I6250 = 0x0187,
3721 };
3722
3723 diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
3724 index 713d033..080f363 100644
3725 --- a/drivers/net/wimax/i2400m/usb.c
3726 +++ b/drivers/net/wimax/i2400m/usb.c
3727 @@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
3728 switch (id->idProduct) {
3729 case USB_DEVICE_ID_I6050:
3730 case USB_DEVICE_ID_I6050_2:
3731 + case USB_DEVICE_ID_I6150:
3732 + case USB_DEVICE_ID_I6150_2:
3733 + case USB_DEVICE_ID_I6150_3:
3734 case USB_DEVICE_ID_I6250:
3735 i2400mu->i6050 = 1;
3736 break;
3737 @@ -759,6 +762,9 @@ static
3738 struct usb_device_id i2400mu_id_table[] = {
3739 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
3740 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
3741 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
3742 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
3743 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
3744 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
3745 { USB_DEVICE(0x8086, 0x0181) },
3746 { USB_DEVICE(0x8086, 0x1403) },
3747 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
3748 index 9f31cfa..a4ee253 100644
3749 --- a/drivers/net/wireless/ath/ath5k/base.c
3750 +++ b/drivers/net/wireless/ath/ath5k/base.c
3751 @@ -848,7 +848,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
3752 return;
3753 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
3754 DMA_TO_DEVICE);
3755 - dev_kfree_skb_any(bf->skb);
3756 + ieee80211_free_txskb(ah->hw, bf->skb);
3757 bf->skb = NULL;
3758 bf->skbaddr = 0;
3759 bf->desc->ds_data = 0;
3760 @@ -1575,7 +1575,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
3761 return;
3762
3763 drop_packet:
3764 - dev_kfree_skb_any(skb);
3765 + ieee80211_free_txskb(hw, skb);
3766 }
3767
3768 static void
3769 diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3770 index 7a28538..c1369ff 100644
3771 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3772 +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3773 @@ -62,7 +62,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
3774 u16 qnum = skb_get_queue_mapping(skb);
3775
3776 if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
3777 - dev_kfree_skb_any(skb);
3778 + ieee80211_free_txskb(hw, skb);
3779 return;
3780 }
3781
3782 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3783 index 6f7cf49..262e1e0 100644
3784 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3785 +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3786 @@ -534,98 +534,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
3787
3788 static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
3789 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
3790 - {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3791 - {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3792 - {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3793 + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3794 + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3795 + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3796 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3797 - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
3798 - {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3799 - {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
3800 - {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
3801 - {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
3802 - {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
3803 - {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
3804 - {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
3805 - {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
3806 - {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
3807 - {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
3808 - {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
3809 - {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
3810 - {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
3811 - {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
3812 - {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
3813 - {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
3814 - {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
3815 - {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
3816 - {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
3817 - {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
3818 - {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
3819 - {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
3820 - {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
3821 - {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
3822 - {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
3823 - {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3824 - {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3825 - {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3826 - {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3827 - {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3828 - {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3829 - {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3830 - {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
3831 - {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
3832 - {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
3833 - {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
3834 - {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
3835 - {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
3836 - {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
3837 - {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
3838 - {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
3839 - {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
3840 - {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
3841 - {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
3842 - {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
3843 - {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
3844 - {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
3845 - {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
3846 - {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
3847 - {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
3848 - {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
3849 - {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
3850 - {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
3851 - {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
3852 - {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
3853 - {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
3854 - {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
3855 - {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3856 - {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3857 - {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3858 - {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3859 - {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3860 - {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3861 - {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3862 + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
3863 + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
3864 + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
3865 + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
3866 + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
3867 + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
3868 + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
3869 + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
3870 + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
3871 + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
3872 + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
3873 + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
3874 + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
3875 + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
3876 + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
3877 + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
3878 + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
3879 + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
3880 + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
3881 + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
3882 + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
3883 + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
3884 + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
3885 + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
3886 + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
3887 + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
3888 + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3889 + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3890 + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3891 + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3892 + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3893 + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3894 + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3895 + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
3896 + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
3897 + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
3898 + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
3899 + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
3900 + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
3901 + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
3902 + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
3903 + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
3904 + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
3905 + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
3906 + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
3907 + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
3908 + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
3909 + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
3910 + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
3911 + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
3912 + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
3913 + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
3914 + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
3915 + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
3916 + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
3917 + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
3918 + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
3919 + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
3920 + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3921 + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3922 + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3923 + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3924 + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3925 + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3926 + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3927 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3928 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3929 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3930 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3931 - {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3932 - {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
3933 - {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
3934 - {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
3935 - {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
3936 - {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
3937 - {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
3938 - {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3939 - {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3940 - {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3941 - {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3942 - {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3943 - {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3944 - {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3945 - {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3946 + {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
3947 + {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
3948 + {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
3949 + {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
3950 + {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
3951 + {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
3952 + {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
3953 + {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3954 + {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3955 + {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3956 + {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3957 + {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3958 + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3959 + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3960 + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3961 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3962 - {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3963 - {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3964 - {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3965 + {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3966 + {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3967 + {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3968 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3969 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
3970 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
3971 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3972 index 41b1a75..54ba42f 100644
3973 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3974 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3975 @@ -68,13 +68,13 @@
3976 #define AR9300_BASE_ADDR 0x3ff
3977 #define AR9300_BASE_ADDR_512 0x1ff
3978
3979 -#define AR9300_OTP_BASE 0x14000
3980 -#define AR9300_OTP_STATUS 0x15f18
3981 +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
3982 +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
3983 #define AR9300_OTP_STATUS_TYPE 0x7
3984 #define AR9300_OTP_STATUS_VALID 0x4
3985 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
3986 #define AR9300_OTP_STATUS_SM_BUSY 0x1
3987 -#define AR9300_OTP_READ_DATA 0x15f1c
3988 +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
3989
3990 enum targetPowerHTRates {
3991 HT_TARGET_RATE_0_8_16,
3992 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3993 index 1a36fa2..226dd13 100644
3994 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3995 +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3996 @@ -219,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
3997
3998 /* Awake -> Sleep Setting */
3999 INIT_INI_ARRAY(&ah->iniPcieSerdes,
4000 - ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
4001 + ar9462_pciephy_clkreq_disable_L1_2p0);
4002 /* Sleep -> Awake Setting */
4003 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
4004 - ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
4005 + ar9462_pciephy_clkreq_disable_L1_2p0);
4006
4007 /* Fast clock modal settings */
4008 INIT_INI_ARRAY(&ah->iniModesFastClock,
4009 @@ -540,7 +540,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
4010 ar9340Common_rx_gain_table_1p0);
4011 else if (AR_SREV_9485_11(ah))
4012 INIT_INI_ARRAY(&ah->iniModesRxGain,
4013 - ar9485Common_wo_xlna_rx_gain_1_1);
4014 + ar9485_common_rx_gain_1_1);
4015 else if (AR_SREV_9550(ah)) {
4016 INIT_INI_ARRAY(&ah->iniModesRxGain,
4017 ar955x_1p0_common_rx_gain_table);
4018 diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
4019 index e5cceb0..bbd249d 100644
4020 --- a/drivers/net/wireless/ath/ath9k/calib.c
4021 +++ b/drivers/net/wireless/ath/ath9k/calib.c
4022 @@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
4023
4024 if (chan && chan->noisefloor) {
4025 s8 delta = chan->noisefloor -
4026 + ATH9K_NF_CAL_NOISE_THRESH -
4027 ath9k_hw_get_default_nf(ah, chan);
4028 if (delta > 0)
4029 noise += delta;
4030 diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
4031 index 1060c19..60dcb6c 100644
4032 --- a/drivers/net/wireless/ath/ath9k/calib.h
4033 +++ b/drivers/net/wireless/ath/ath9k/calib.h
4034 @@ -21,6 +21,9 @@
4035
4036 #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
4037
4038 +/* Internal noise floor can vary by about 6db depending on the frequency */
4039 +#define ATH9K_NF_CAL_NOISE_THRESH 6
4040 +
4041 #define NUM_NF_READINGS 6
4042 #define ATH9K_NF_CAL_HIST_MAX 5
4043
4044 diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
4045 index 24ac287..98f4010 100644
4046 --- a/drivers/net/wireless/ath/carl9170/fw.c
4047 +++ b/drivers/net/wireless/ath/carl9170/fw.c
4048 @@ -341,8 +341,12 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
4049 if (SUPP(CARL9170FW_WLANTX_CAB)) {
4050 if_comb_types |=
4051 BIT(NL80211_IFTYPE_AP) |
4052 - BIT(NL80211_IFTYPE_MESH_POINT) |
4053 BIT(NL80211_IFTYPE_P2P_GO);
4054 +
4055 +#ifdef CONFIG_MAC80211_MESH
4056 + if_comb_types |=
4057 + BIT(NL80211_IFTYPE_MESH_POINT);
4058 +#endif /* CONFIG_MAC80211_MESH */
4059 }
4060 }
4061
4062 diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
4063 index b298e5d..10e288d 100644
4064 --- a/drivers/net/wireless/b43/b43.h
4065 +++ b/drivers/net/wireless/b43/b43.h
4066 @@ -7,6 +7,7 @@
4067 #include <linux/hw_random.h>
4068 #include <linux/bcma/bcma.h>
4069 #include <linux/ssb/ssb.h>
4070 +#include <linux/completion.h>
4071 #include <net/mac80211.h>
4072
4073 #include "debugfs.h"
4074 @@ -722,6 +723,10 @@ enum b43_firmware_file_type {
4075 struct b43_request_fw_context {
4076 /* The device we are requesting the fw for. */
4077 struct b43_wldev *dev;
4078 + /* a completion event structure needed if this call is asynchronous */
4079 + struct completion fw_load_complete;
4080 + /* a pointer to the firmware object */
4081 + const struct firmware *blob;
4082 /* The type of firmware to request. */
4083 enum b43_firmware_file_type req_type;
4084 /* Error messages for each firmware type. */
4085 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
4086 index 2911e20..263667f 100644
4087 --- a/drivers/net/wireless/b43/main.c
4088 +++ b/drivers/net/wireless/b43/main.c
4089 @@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
4090 b43warn(wl, text);
4091 }
4092
4093 +static void b43_fw_cb(const struct firmware *firmware, void *context)
4094 +{
4095 + struct b43_request_fw_context *ctx = context;
4096 +
4097 + ctx->blob = firmware;
4098 + complete(&ctx->fw_load_complete);
4099 +}
4100 +
4101 int b43_do_request_fw(struct b43_request_fw_context *ctx,
4102 const char *name,
4103 - struct b43_firmware_file *fw)
4104 + struct b43_firmware_file *fw, bool async)
4105 {
4106 - const struct firmware *blob;
4107 struct b43_fw_header *hdr;
4108 u32 size;
4109 int err;
4110 @@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4111 B43_WARN_ON(1);
4112 return -ENOSYS;
4113 }
4114 - err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
4115 + if (async) {
4116 + /* do this part asynchronously */
4117 + init_completion(&ctx->fw_load_complete);
4118 + err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
4119 + ctx->dev->dev->dev, GFP_KERNEL,
4120 + ctx, b43_fw_cb);
4121 + if (err < 0) {
4122 + pr_err("Unable to load firmware\n");
4123 + return err;
4124 + }
4125 + /* stall here until fw ready */
4126 + wait_for_completion(&ctx->fw_load_complete);
4127 + if (ctx->blob)
4128 + goto fw_ready;
4129 + /* On some ARM systems, the async request will fail, but the next sync
4130 + * request works. For this reason, we dall through here
4131 + */
4132 + }
4133 + err = request_firmware(&ctx->blob, ctx->fwname,
4134 + ctx->dev->dev->dev);
4135 if (err == -ENOENT) {
4136 snprintf(ctx->errors[ctx->req_type],
4137 sizeof(ctx->errors[ctx->req_type]),
4138 - "Firmware file \"%s\" not found\n", ctx->fwname);
4139 + "Firmware file \"%s\" not found\n",
4140 + ctx->fwname);
4141 return err;
4142 } else if (err) {
4143 snprintf(ctx->errors[ctx->req_type],
4144 @@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4145 ctx->fwname, err);
4146 return err;
4147 }
4148 - if (blob->size < sizeof(struct b43_fw_header))
4149 +fw_ready:
4150 + if (ctx->blob->size < sizeof(struct b43_fw_header))
4151 goto err_format;
4152 - hdr = (struct b43_fw_header *)(blob->data);
4153 + hdr = (struct b43_fw_header *)(ctx->blob->data);
4154 switch (hdr->type) {
4155 case B43_FW_TYPE_UCODE:
4156 case B43_FW_TYPE_PCM:
4157 size = be32_to_cpu(hdr->size);
4158 - if (size != blob->size - sizeof(struct b43_fw_header))
4159 + if (size != ctx->blob->size - sizeof(struct b43_fw_header))
4160 goto err_format;
4161 /* fallthrough */
4162 case B43_FW_TYPE_IV:
4163 @@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4164 goto err_format;
4165 }
4166
4167 - fw->data = blob;
4168 + fw->data = ctx->blob;
4169 fw->filename = name;
4170 fw->type = ctx->req_type;
4171
4172 @@ -2172,7 +2200,7 @@ err_format:
4173 snprintf(ctx->errors[ctx->req_type],
4174 sizeof(ctx->errors[ctx->req_type]),
4175 "Firmware file \"%s\" format error.\n", ctx->fwname);
4176 - release_firmware(blob);
4177 + release_firmware(ctx->blob);
4178
4179 return -EPROTO;
4180 }
4181 @@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4182 goto err_no_ucode;
4183 }
4184 }
4185 - err = b43_do_request_fw(ctx, filename, &fw->ucode);
4186 + err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
4187 if (err)
4188 goto err_load;
4189
4190 @@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4191 else
4192 goto err_no_pcm;
4193 fw->pcm_request_failed = false;
4194 - err = b43_do_request_fw(ctx, filename, &fw->pcm);
4195 + err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
4196 if (err == -ENOENT) {
4197 /* We did not find a PCM file? Not fatal, but
4198 * core rev <= 10 must do without hwcrypto then. */
4199 @@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4200 default:
4201 goto err_no_initvals;
4202 }
4203 - err = b43_do_request_fw(ctx, filename, &fw->initvals);
4204 + err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
4205 if (err)
4206 goto err_load;
4207
4208 @@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4209 default:
4210 goto err_no_initvals;
4211 }
4212 - err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
4213 + err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
4214 if (err)
4215 goto err_load;
4216
4217 diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
4218 index 8c684cd..abac25e 100644
4219 --- a/drivers/net/wireless/b43/main.h
4220 +++ b/drivers/net/wireless/b43/main.h
4221 @@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
4222
4223
4224 struct b43_request_fw_context;
4225 -int b43_do_request_fw(struct b43_request_fw_context *ctx,
4226 - const char *name,
4227 - struct b43_firmware_file *fw);
4228 +int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
4229 + struct b43_firmware_file *fw, bool async);
4230 void b43_do_release_fw(struct b43_firmware_file *fw);
4231
4232 #endif /* B43_MAIN_H_ */
4233 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4234 index 481345c..0caa4c3 100644
4235 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4236 +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4237 @@ -3730,10 +3730,11 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
4238
4239 len = wpa_ie->len + TLV_HDR_LEN;
4240 data = (u8 *)wpa_ie;
4241 - offset = 0;
4242 + offset = TLV_HDR_LEN;
4243 if (!is_rsn_ie)
4244 offset += VS_IE_FIXED_HDR_LEN;
4245 - offset += WPA_IE_VERSION_LEN;
4246 + else
4247 + offset += WPA_IE_VERSION_LEN;
4248
4249 /* check for multicast cipher suite */
4250 if (offset + WPA_IE_MIN_OUI_LEN > len) {
4251 diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
4252 index f5ca73a..aecf1ce 100644
4253 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c
4254 +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
4255 @@ -1100,29 +1100,6 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
4256 }
4257 }
4258
4259 -static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
4260 - int txq_id, int ssn, struct sk_buff_head *skbs)
4261 -{
4262 - if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
4263 - tid != IWL_TID_NON_QOS &&
4264 - txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
4265 - /*
4266 - * FIXME: this is a uCode bug which need to be addressed,
4267 - * log the information and return for now.
4268 - * Since it is can possibly happen very often and in order
4269 - * not to fill the syslog, don't use IWL_ERR or IWL_WARN
4270 - */
4271 - IWL_DEBUG_TX_QUEUES(priv,
4272 - "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
4273 - txq_id, sta_id, tid,
4274 - priv->tid_data[sta_id][tid].agg.txq_id);
4275 - return 1;
4276 - }
4277 -
4278 - iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
4279 - return 0;
4280 -}
4281 -
4282 int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4283 struct iwl_device_cmd *cmd)
4284 {
4285 @@ -1177,16 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4286 next_reclaimed = ssn;
4287 }
4288
4289 - if (tid != IWL_TID_NON_QOS) {
4290 - priv->tid_data[sta_id][tid].next_reclaimed =
4291 - next_reclaimed;
4292 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
4293 - next_reclaimed);
4294 - }
4295 + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
4296
4297 - /*we can free until ssn % q.n_bd not inclusive */
4298 - WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
4299 - txq_id, ssn, &skbs));
4300 iwlagn_check_ratid_empty(priv, sta_id, tid);
4301 freed = 0;
4302
4303 @@ -1235,11 +1204,28 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4304 if (!is_agg)
4305 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
4306
4307 + /*
4308 + * W/A for FW bug - the seq_ctl isn't updated when the
4309 + * queues are flushed. Fetch it from the packet itself
4310 + */
4311 + if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
4312 + next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
4313 + next_reclaimed =
4314 + SEQ_TO_SN(next_reclaimed + 0x10);
4315 + }
4316 +
4317 is_offchannel_skb =
4318 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
4319 freed++;
4320 }
4321
4322 + if (tid != IWL_TID_NON_QOS) {
4323 + priv->tid_data[sta_id][tid].next_reclaimed =
4324 + next_reclaimed;
4325 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
4326 + next_reclaimed);
4327 + }
4328 +
4329 WARN_ON(!is_agg && freed != 1);
4330
4331 /*
4332 @@ -1311,16 +1297,27 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
4333 return 0;
4334 }
4335
4336 + if (unlikely(scd_flow != agg->txq_id)) {
4337 + /*
4338 + * FIXME: this is a uCode bug which need to be addressed,
4339 + * log the information and return for now.
4340 + * Since it is can possibly happen very often and in order
4341 + * not to fill the syslog, don't use IWL_ERR or IWL_WARN
4342 + */
4343 + IWL_DEBUG_TX_QUEUES(priv,
4344 + "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
4345 + scd_flow, sta_id, tid, agg->txq_id);
4346 + spin_unlock(&priv->sta_lock);
4347 + return 0;
4348 + }
4349 +
4350 __skb_queue_head_init(&reclaimed_skbs);
4351
4352 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4353 * block-ack window (we assume that they've been successfully
4354 * transmitted ... if not, it's too late anyway). */
4355 - if (iwl_reclaim(priv, sta_id, tid, scd_flow,
4356 - ba_resp_scd_ssn, &reclaimed_skbs)) {
4357 - spin_unlock(&priv->sta_lock);
4358 - return 0;
4359 - }
4360 + iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
4361 + &reclaimed_skbs);
4362
4363 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
4364 "sta_id = %d\n",
4365 diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
4366 index bb69f8f..3654de2 100644
4367 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c
4368 +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
4369 @@ -927,12 +927,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
4370 * back-to-back ISRs and sporadic interrupts from our NIC.
4371 * If we have something to service, the tasklet will re-enable ints.
4372 * If we *don't* have something, we'll re-enable before leaving here. */
4373 - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
4374 + inta_mask = iwl_read32(trans, CSR_INT_MASK);
4375 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
4376
4377 /* Discover which interrupts are active/pending */
4378 inta = iwl_read32(trans, CSR_INT);
4379
4380 + if (inta & (~inta_mask)) {
4381 + IWL_DEBUG_ISR(trans,
4382 + "We got a masked interrupt (0x%08x)...Ack and ignore\n",
4383 + inta & (~inta_mask));
4384 + iwl_write32(trans, CSR_INT, inta & (~inta_mask));
4385 + inta &= inta_mask;
4386 + }
4387 +
4388 /* Ignore interrupt if there's nothing in NIC to service.
4389 * This may be due to IRQ shared with another device,
4390 * or due to sporadic interrupts thrown from our NIC. */
4391 @@ -963,6 +971,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
4392 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
4393 !trans_pcie->inta)
4394 iwl_enable_interrupts(trans);
4395 + return IRQ_HANDLED;
4396
4397 none:
4398 /* re-enable interrupts here since we don't have anything to service. */
4399 @@ -1015,7 +1024,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
4400 * If we have something to service, the tasklet will re-enable ints.
4401 * If we *don't* have something, we'll re-enable before leaving here.
4402 */
4403 - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
4404 + inta_mask = iwl_read32(trans, CSR_INT_MASK);
4405 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
4406
4407
4408 diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
4409 index 0c9f70b..786bc11 100644
4410 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c
4411 +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
4412 @@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
4413 */
4414 int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
4415 {
4416 - bool cancel_flag = false;
4417 int status;
4418 struct cmd_ctrl_node *cmd_queued;
4419
4420 @@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
4421 atomic_inc(&adapter->cmd_pending);
4422
4423 /* Wait for completion */
4424 - wait_event_interruptible(adapter->cmd_wait_q.wait,
4425 - *(cmd_queued->condition));
4426 - if (!*(cmd_queued->condition))
4427 - cancel_flag = true;
4428 -
4429 - if (cancel_flag) {
4430 - mwifiex_cancel_pending_ioctl(adapter);
4431 - dev_dbg(adapter->dev, "cmd cancel\n");
4432 + status = wait_event_interruptible(adapter->cmd_wait_q.wait,
4433 + *(cmd_queued->condition));
4434 + if (status) {
4435 + dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
4436 + return status;
4437 }
4438
4439 status = adapter->cmd_wait_q.status;
4440 @@ -480,8 +476,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
4441 return false;
4442 }
4443
4444 - wait_event_interruptible(adapter->hs_activate_wait_q,
4445 - adapter->hs_activate_wait_q_woken);
4446 + if (wait_event_interruptible(adapter->hs_activate_wait_q,
4447 + adapter->hs_activate_wait_q_woken)) {
4448 + dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
4449 + return false;
4450 + }
4451
4452 return true;
4453 }
4454 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
4455 index 59474ae..175a9b9 100644
4456 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
4457 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
4458 @@ -5036,7 +5036,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4459 IEEE80211_HW_SUPPORTS_PS |
4460 IEEE80211_HW_PS_NULLFUNC_STACK |
4461 IEEE80211_HW_AMPDU_AGGREGATION |
4462 - IEEE80211_HW_REPORTS_TX_ACK_STATUS;
4463 + IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4464 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
4465
4466 /*
4467 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
4468 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
4469 index 69097d1..ee38e4c 100644
4470 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
4471 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
4472 @@ -391,10 +391,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
4473 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
4474 tx_info->status.ampdu_len = 1;
4475 tx_info->status.ampdu_ack_len = success ? 1 : 0;
4476 - /*
4477 - * TODO: Need to tear down BA session here
4478 - * if not successful.
4479 - */
4480 +
4481 + if (!success)
4482 + tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
4483 }
4484
4485 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
4486 @@ -1123,6 +1122,9 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
4487 struct ieee80211_iface_limit *if_limit;
4488 struct ieee80211_iface_combination *if_combination;
4489
4490 + if (rt2x00dev->ops->max_ap_intf < 2)
4491 + return;
4492 +
4493 /*
4494 * Build up AP interface limits structure.
4495 */
4496 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
4497 index fc24eb9..472c5c1 100644
4498 --- a/drivers/net/xen-netfront.c
4499 +++ b/drivers/net/xen-netfront.c
4500 @@ -1015,29 +1015,10 @@ err:
4501 i = xennet_fill_frags(np, skb, &tmpq);
4502
4503 /*
4504 - * Truesize approximates the size of true data plus
4505 - * any supervisor overheads. Adding hypervisor
4506 - * overheads has been shown to significantly reduce
4507 - * achievable bandwidth with the default receive
4508 - * buffer size. It is therefore not wise to account
4509 - * for it here.
4510 - *
4511 - * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
4512 - * to RX_COPY_THRESHOLD + the supervisor
4513 - * overheads. Here, we add the size of the data pulled
4514 - * in xennet_fill_frags().
4515 - *
4516 - * We also adjust for any unused space in the main
4517 - * data area by subtracting (RX_COPY_THRESHOLD -
4518 - * len). This is especially important with drivers
4519 - * which split incoming packets into header and data,
4520 - * using only 66 bytes of the main data area (see the
4521 - * e1000 driver for example.) On such systems,
4522 - * without this last adjustement, our achievable
4523 - * receive throughout using the standard receive
4524 - * buffer size was cut by 25%(!!!).
4525 - */
4526 - skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
4527 + * Truesize is the actual allocation size, even if the
4528 + * allocation is only partially used.
4529 + */
4530 + skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
4531 skb->len += skb->data_len;
4532
4533 if (rx->flags & XEN_NETRXF_csum_blank)
4534 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
4535 index dd90d15..71623a2 100644
4536 --- a/drivers/platform/x86/samsung-laptop.c
4537 +++ b/drivers/platform/x86/samsung-laptop.c
4538 @@ -1523,6 +1523,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
4539 },
4540 .driver_data = &samsung_broken_acpi_video,
4541 },
4542 + {
4543 + .callback = samsung_dmi_matched,
4544 + .ident = "N250P",
4545 + .matches = {
4546 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4547 + DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
4548 + DMI_MATCH(DMI_BOARD_NAME, "N250P"),
4549 + },
4550 + .driver_data = &samsung_broken_acpi_video,
4551 + },
4552 { },
4553 };
4554 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
4555 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
4556 index daaddec..b8ad71f 100644
4557 --- a/drivers/platform/x86/sony-laptop.c
4558 +++ b/drivers/platform/x86/sony-laptop.c
4559 @@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
4560 static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
4561 void *buffer, size_t buflen)
4562 {
4563 + int ret = 0;
4564 size_t len = len;
4565 union acpi_object *object = __call_snc_method(handle, name, value);
4566
4567 if (!object)
4568 return -EINVAL;
4569
4570 - if (object->type == ACPI_TYPE_BUFFER)
4571 + if (object->type == ACPI_TYPE_BUFFER) {
4572 len = MIN(buflen, object->buffer.length);
4573 + memcpy(buffer, object->buffer.pointer, len);
4574
4575 - else if (object->type == ACPI_TYPE_INTEGER)
4576 + } else if (object->type == ACPI_TYPE_INTEGER) {
4577 len = MIN(buflen, sizeof(object->integer.value));
4578 + memcpy(buffer, &object->integer.value, len);
4579
4580 - else {
4581 + } else {
4582 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
4583 ACPI_TYPE_BUFFER, object->type);
4584 - kfree(object);
4585 - return -EINVAL;
4586 + ret = -EINVAL;
4587 }
4588
4589 - memcpy(buffer, object->buffer.pointer, len);
4590 kfree(object);
4591 - return 0;
4592 + return ret;
4593 }
4594
4595 struct sony_nc_handles {
4596 diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
4597 index e39a0c7..70cd467 100644
4598 --- a/drivers/regulator/max8997.c
4599 +++ b/drivers/regulator/max8997.c
4600 @@ -69,26 +69,26 @@ struct voltage_map_desc {
4601 int step;
4602 };
4603
4604 -/* Voltage maps in mV */
4605 +/* Voltage maps in uV */
4606 static const struct voltage_map_desc ldo_voltage_map_desc = {
4607 - .min = 800, .max = 3950, .step = 50,
4608 + .min = 800000, .max = 3950000, .step = 50000,
4609 }; /* LDO1 ~ 18, 21 all */
4610
4611 static const struct voltage_map_desc buck1245_voltage_map_desc = {
4612 - .min = 650, .max = 2225, .step = 25,
4613 + .min = 650000, .max = 2225000, .step = 25000,
4614 }; /* Buck1, 2, 4, 5 */
4615
4616 static const struct voltage_map_desc buck37_voltage_map_desc = {
4617 - .min = 750, .max = 3900, .step = 50,
4618 + .min = 750000, .max = 3900000, .step = 50000,
4619 }; /* Buck3, 7 */
4620
4621 -/* current map in mA */
4622 +/* current map in uA */
4623 static const struct voltage_map_desc charger_current_map_desc = {
4624 - .min = 200, .max = 950, .step = 50,
4625 + .min = 200000, .max = 950000, .step = 50000,
4626 };
4627
4628 static const struct voltage_map_desc topoff_current_map_desc = {
4629 - .min = 50, .max = 200, .step = 10,
4630 + .min = 50000, .max = 200000, .step = 10000,
4631 };
4632
4633 static const struct voltage_map_desc *reg_voltage_map[] = {
4634 @@ -192,7 +192,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
4635 if (val > desc->max)
4636 return -EINVAL;
4637
4638 - return val * 1000;
4639 + return val;
4640 }
4641
4642 static int max8997_get_enable_register(struct regulator_dev *rdev,
4643 @@ -483,7 +483,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
4644 {
4645 struct max8997_data *max8997 = rdev_get_drvdata(rdev);
4646 struct i2c_client *i2c = max8997->iodev->i2c;
4647 - int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
4648 const struct voltage_map_desc *desc;
4649 int rid = rdev_get_id(rdev);
4650 int i, reg, shift, mask, ret;
4651 @@ -507,7 +506,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
4652
4653 desc = reg_voltage_map[rid];
4654
4655 - i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
4656 + i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
4657 if (i < 0)
4658 return i;
4659
4660 @@ -555,7 +554,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
4661 case MAX8997_BUCK4:
4662 case MAX8997_BUCK5:
4663 return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
4664 - max8997->ramp_delay);
4665 + max8997->ramp_delay * 1000);
4666 }
4667
4668 return 0;
4669 @@ -654,7 +653,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
4670 const struct voltage_map_desc *desc;
4671 int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
4672 bool gpio_dvs_mode = false;
4673 - int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
4674
4675 if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
4676 return -EINVAL;
4677 @@ -679,7 +677,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
4678 selector);
4679
4680 desc = reg_voltage_map[rid];
4681 - new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
4682 + new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
4683 if (new_val < 0)
4684 return new_val;
4685
4686 @@ -977,8 +975,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4687 max8997->buck1_vol[i] = ret =
4688 max8997_get_voltage_proper_val(
4689 &buck1245_voltage_map_desc,
4690 - pdata->buck1_voltage[i] / 1000,
4691 - pdata->buck1_voltage[i] / 1000 +
4692 + pdata->buck1_voltage[i],
4693 + pdata->buck1_voltage[i] +
4694 buck1245_voltage_map_desc.step);
4695 if (ret < 0)
4696 goto err_out;
4697 @@ -986,8 +984,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4698 max8997->buck2_vol[i] = ret =
4699 max8997_get_voltage_proper_val(
4700 &buck1245_voltage_map_desc,
4701 - pdata->buck2_voltage[i] / 1000,
4702 - pdata->buck2_voltage[i] / 1000 +
4703 + pdata->buck2_voltage[i],
4704 + pdata->buck2_voltage[i] +
4705 buck1245_voltage_map_desc.step);
4706 if (ret < 0)
4707 goto err_out;
4708 @@ -995,8 +993,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4709 max8997->buck5_vol[i] = ret =
4710 max8997_get_voltage_proper_val(
4711 &buck1245_voltage_map_desc,
4712 - pdata->buck5_voltage[i] / 1000,
4713 - pdata->buck5_voltage[i] / 1000 +
4714 + pdata->buck5_voltage[i],
4715 + pdata->buck5_voltage[i] +
4716 buck1245_voltage_map_desc.step);
4717 if (ret < 0)
4718 goto err_out;
4719 diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
4720 index 5dfa920..6a20019 100644
4721 --- a/drivers/regulator/max8998.c
4722 +++ b/drivers/regulator/max8998.c
4723 @@ -51,39 +51,39 @@ struct voltage_map_desc {
4724 int step;
4725 };
4726
4727 -/* Voltage maps */
4728 +/* Voltage maps in uV*/
4729 static const struct voltage_map_desc ldo23_voltage_map_desc = {
4730 - .min = 800, .step = 50, .max = 1300,
4731 + .min = 800000, .step = 50000, .max = 1300000,
4732 };
4733 static const struct voltage_map_desc ldo456711_voltage_map_desc = {
4734 - .min = 1600, .step = 100, .max = 3600,
4735 + .min = 1600000, .step = 100000, .max = 3600000,
4736 };
4737 static const struct voltage_map_desc ldo8_voltage_map_desc = {
4738 - .min = 3000, .step = 100, .max = 3600,
4739 + .min = 3000000, .step = 100000, .max = 3600000,
4740 };
4741 static const struct voltage_map_desc ldo9_voltage_map_desc = {
4742 - .min = 2800, .step = 100, .max = 3100,
4743 + .min = 2800000, .step = 100000, .max = 3100000,
4744 };
4745 static const struct voltage_map_desc ldo10_voltage_map_desc = {
4746 - .min = 950, .step = 50, .max = 1300,
4747 + .min = 95000, .step = 50000, .max = 1300000,
4748 };
4749 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
4750 - .min = 800, .step = 100, .max = 3300,
4751 + .min = 800000, .step = 100000, .max = 3300000,
4752 };
4753 static const struct voltage_map_desc ldo1415_voltage_map_desc = {
4754 - .min = 1200, .step = 100, .max = 3300,
4755 + .min = 1200000, .step = 100000, .max = 3300000,
4756 };
4757 static const struct voltage_map_desc ldo1617_voltage_map_desc = {
4758 - .min = 1600, .step = 100, .max = 3600,
4759 + .min = 1600000, .step = 100000, .max = 3600000,
4760 };
4761 static const struct voltage_map_desc buck12_voltage_map_desc = {
4762 - .min = 750, .step = 25, .max = 1525,
4763 + .min = 750000, .step = 25000, .max = 1525000,
4764 };
4765 static const struct voltage_map_desc buck3_voltage_map_desc = {
4766 - .min = 1600, .step = 100, .max = 3600,
4767 + .min = 1600000, .step = 100000, .max = 3600000,
4768 };
4769 static const struct voltage_map_desc buck4_voltage_map_desc = {
4770 - .min = 800, .step = 100, .max = 2300,
4771 + .min = 800000, .step = 100000, .max = 2300000,
4772 };
4773
4774 static const struct voltage_map_desc *ldo_voltage_map[] = {
4775 @@ -445,9 +445,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
4776 if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
4777 return 0;
4778
4779 - difference = (new_selector - old_selector) * desc->step;
4780 + difference = (new_selector - old_selector) * desc->step / 1000;
4781 if (difference > 0)
4782 - return difference / ((val & 0x0f) + 1);
4783 + return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
4784
4785 return 0;
4786 }
4787 @@ -702,7 +702,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4788 i = 0;
4789 while (buck12_voltage_map_desc.min +
4790 buck12_voltage_map_desc.step*i
4791 - < (pdata->buck1_voltage1 / 1000))
4792 + < pdata->buck1_voltage1)
4793 i++;
4794 max8998->buck1_vol[0] = i;
4795 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
4796 @@ -713,7 +713,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4797 i = 0;
4798 while (buck12_voltage_map_desc.min +
4799 buck12_voltage_map_desc.step*i
4800 - < (pdata->buck1_voltage2 / 1000))
4801 + < pdata->buck1_voltage2)
4802 i++;
4803
4804 max8998->buck1_vol[1] = i;
4805 @@ -725,7 +725,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4806 i = 0;
4807 while (buck12_voltage_map_desc.min +
4808 buck12_voltage_map_desc.step*i
4809 - < (pdata->buck1_voltage3 / 1000))
4810 + < pdata->buck1_voltage3)
4811 i++;
4812
4813 max8998->buck1_vol[2] = i;
4814 @@ -737,7 +737,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4815 i = 0;
4816 while (buck12_voltage_map_desc.min +
4817 buck12_voltage_map_desc.step*i
4818 - < (pdata->buck1_voltage4 / 1000))
4819 + < pdata->buck1_voltage4)
4820 i++;
4821
4822 max8998->buck1_vol[3] = i;
4823 @@ -763,7 +763,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4824 i = 0;
4825 while (buck12_voltage_map_desc.min +
4826 buck12_voltage_map_desc.step*i
4827 - < (pdata->buck2_voltage1 / 1000))
4828 + < pdata->buck2_voltage1)
4829 i++;
4830 max8998->buck2_vol[0] = i;
4831 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
4832 @@ -774,7 +774,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4833 i = 0;
4834 while (buck12_voltage_map_desc.min +
4835 buck12_voltage_map_desc.step*i
4836 - < (pdata->buck2_voltage2 / 1000))
4837 + < pdata->buck2_voltage2)
4838 i++;
4839 max8998->buck2_vol[1] = i;
4840 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
4841 @@ -792,8 +792,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4842 int count = (desc->max - desc->min) / desc->step + 1;
4843
4844 regulators[index].n_voltages = count;
4845 - regulators[index].min_uV = desc->min * 1000;
4846 - regulators[index].uV_step = desc->step * 1000;
4847 + regulators[index].min_uV = desc->min;
4848 + regulators[index].uV_step = desc->step;
4849 }
4850
4851 config.dev = max8998->dev;
4852 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
4853 index 926f9c8..3fd1b88 100644
4854 --- a/drivers/regulator/s2mps11.c
4855 +++ b/drivers/regulator/s2mps11.c
4856 @@ -269,16 +269,16 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
4857
4858 if (ramp_enable) {
4859 if (s2mps11->buck2_ramp)
4860 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6;
4861 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) << 6;
4862 if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
4863 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4;
4864 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) << 4;
4865 sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
4866 }
4867
4868 ramp_reg &= 0x00;
4869 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6;
4870 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4;
4871 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2;
4872 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) << 6;
4873 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) << 4;
4874 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) << 2;
4875 ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
4876 sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
4877
4878 diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
4879 index 782c228..416fe0a 100644
4880 --- a/drivers/regulator/wm831x-dcdc.c
4881 +++ b/drivers/regulator/wm831x-dcdc.c
4882 @@ -290,7 +290,7 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
4883 if (vsel > dcdc->dvs_vsel) {
4884 ret = wm831x_set_bits(wm831x, dvs_reg,
4885 WM831X_DC1_DVS_VSEL_MASK,
4886 - dcdc->dvs_vsel);
4887 + vsel);
4888 if (ret == 0)
4889 dcdc->dvs_vsel = vsel;
4890 else
4891 diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
4892 index 368368f..908d287 100644
4893 --- a/drivers/s390/cio/device_pgid.c
4894 +++ b/drivers/s390/cio/device_pgid.c
4895 @@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
4896 * Determine pathgroup state from PGID data.
4897 */
4898 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
4899 - int *mismatch, int *reserved, u8 *reset)
4900 + int *mismatch, u8 *reserved, u8 *reset)
4901 {
4902 struct pgid *pgid = &cdev->private->pgid[0];
4903 struct pgid *first = NULL;
4904 @@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
4905 if ((cdev->private->pgid_valid_mask & lpm) == 0)
4906 continue;
4907 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
4908 - *reserved = 1;
4909 + *reserved |= lpm;
4910 if (pgid_is_reset(pgid)) {
4911 *reset |= lpm;
4912 continue;
4913 @@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
4914 struct subchannel *sch = to_subchannel(cdev->dev.parent);
4915 struct pgid *pgid;
4916 int mismatch = 0;
4917 - int reserved = 0;
4918 + u8 reserved = 0;
4919 u8 reset = 0;
4920 u8 donepm;
4921
4922 if (rc)
4923 goto out;
4924 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
4925 - if (reserved)
4926 + if (reserved == cdev->private->pgid_valid_mask)
4927 rc = -EUSERS;
4928 else if (mismatch)
4929 rc = -EOPNOTSUPP;
4930 @@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
4931 }
4932 out:
4933 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
4934 - "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
4935 + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
4936 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
4937 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
4938 switch (rc) {
4939 diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
4940 index 8f7eb4f..487aa6f 100644
4941 --- a/drivers/scsi/mvsas/mv_94xx.h
4942 +++ b/drivers/scsi/mvsas/mv_94xx.h
4943 @@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
4944 #define SPI_ADDR_VLD_94XX (1U << 1)
4945 #define SPI_CTRL_SpiStart_94XX (1U << 0)
4946
4947 -#define mv_ffc(x) ffz(x)
4948 -
4949 static inline int
4950 mv_ffc64(u64 v)
4951 {
4952 - int i;
4953 - i = mv_ffc((u32)v);
4954 - if (i >= 0)
4955 - return i;
4956 - i = mv_ffc((u32)(v>>32));
4957 -
4958 - if (i != 0)
4959 - return 32 + i;
4960 -
4961 - return -1;
4962 + u64 x = ~v;
4963 + return x ? __ffs64(x) : -1;
4964 }
4965
4966 #define r_reg_set_enable(i) \
4967 diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
4968 index c04a4f5..da24955 100644
4969 --- a/drivers/scsi/mvsas/mv_sas.h
4970 +++ b/drivers/scsi/mvsas/mv_sas.h
4971 @@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
4972 #define DEV_IS_EXPANDER(type) \
4973 ((type == EDGE_DEV) || (type == FANOUT_DEV))
4974
4975 -#define bit(n) ((u32)1 << n)
4976 +#define bit(n) ((u64)1 << n)
4977
4978 #define for_each_phy(__lseq_mask, __mc, __lseq) \
4979 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
4980 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
4981 index 1c28215..83d7984 100644
4982 --- a/drivers/scsi/qla2xxx/qla_attr.c
4983 +++ b/drivers/scsi/qla2xxx/qla_attr.c
4984 @@ -1615,8 +1615,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
4985 * At this point all fcport's software-states are cleared. Perform any
4986 * final cleanup of firmware resources (PCBs and XCBs).
4987 */
4988 - if (fcport->loop_id != FC_NO_LOOP_ID &&
4989 - !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
4990 + if (fcport->loop_id != FC_NO_LOOP_ID) {
4991 if (IS_FWI2_CAPABLE(fcport->vha->hw))
4992 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
4993 fcport->loop_id, fcport->d_id.b.domain,
4994 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
4995 index 2f9bddd..9f34ded 100644
4996 --- a/drivers/scsi/qla2xxx/qla_bsg.c
4997 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
4998 @@ -219,7 +219,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
4999 break;
5000 }
5001 exit_fcp_prio_cfg:
5002 - bsg_job->job_done(bsg_job);
5003 + if (!ret)
5004 + bsg_job->job_done(bsg_job);
5005 return ret;
5006 }
5007
5008 @@ -741,9 +742,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5009 if (qla81xx_get_port_config(vha, config)) {
5010 ql_log(ql_log_warn, vha, 0x701f,
5011 "Get port config failed.\n");
5012 - bsg_job->reply->result = (DID_ERROR << 16);
5013 rval = -EPERM;
5014 - goto done_free_dma_req;
5015 + goto done_free_dma_rsp;
5016 }
5017
5018 ql_dbg(ql_dbg_user, vha, 0x70c0,
5019 @@ -761,9 +761,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5020 new_config, elreq.options);
5021
5022 if (rval) {
5023 - bsg_job->reply->result = (DID_ERROR << 16);
5024 rval = -EPERM;
5025 - goto done_free_dma_req;
5026 + goto done_free_dma_rsp;
5027 }
5028
5029 type = "FC_BSG_HST_VENDOR_LOOPBACK";
5030 @@ -795,9 +794,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5031 "MPI reset failed.\n");
5032 }
5033
5034 - bsg_job->reply->result = (DID_ERROR << 16);
5035 rval = -EIO;
5036 - goto done_free_dma_req;
5037 + goto done_free_dma_rsp;
5038 }
5039 } else {
5040 type = "FC_BSG_HST_VENDOR_LOOPBACK";
5041 @@ -812,34 +810,27 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5042 ql_log(ql_log_warn, vha, 0x702c,
5043 "Vendor request %s failed.\n", type);
5044
5045 - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5046 - sizeof(struct fc_bsg_reply);
5047 -
5048 - memcpy(fw_sts_ptr, response, sizeof(response));
5049 - fw_sts_ptr += sizeof(response);
5050 - *fw_sts_ptr = command_sent;
5051 rval = 0;
5052 bsg_job->reply->result = (DID_ERROR << 16);
5053 + bsg_job->reply->reply_payload_rcv_len = 0;
5054 } else {
5055 ql_dbg(ql_dbg_user, vha, 0x702d,
5056 "Vendor request %s completed.\n", type);
5057 -
5058 - bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5059 - sizeof(response) + sizeof(uint8_t);
5060 - bsg_job->reply->reply_payload_rcv_len =
5061 - bsg_job->reply_payload.payload_len;
5062 - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5063 - sizeof(struct fc_bsg_reply);
5064 - memcpy(fw_sts_ptr, response, sizeof(response));
5065 - fw_sts_ptr += sizeof(response);
5066 - *fw_sts_ptr = command_sent;
5067 - bsg_job->reply->result = DID_OK;
5068 + bsg_job->reply->result = (DID_OK << 16);
5069 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
5070 bsg_job->reply_payload.sg_cnt, rsp_data,
5071 rsp_data_len);
5072 }
5073 - bsg_job->job_done(bsg_job);
5074
5075 + bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5076 + sizeof(response) + sizeof(uint8_t);
5077 + fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5078 + sizeof(struct fc_bsg_reply);
5079 + memcpy(fw_sts_ptr, response, sizeof(response));
5080 + fw_sts_ptr += sizeof(response);
5081 + *fw_sts_ptr = command_sent;
5082 +
5083 +done_free_dma_rsp:
5084 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
5085 rsp_data, rsp_data_dma);
5086 done_free_dma_req:
5087 @@ -853,6 +844,8 @@ done_unmap_req_sg:
5088 dma_unmap_sg(&ha->pdev->dev,
5089 bsg_job->request_payload.sg_list,
5090 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
5091 + if (!rval)
5092 + bsg_job->job_done(bsg_job);
5093 return rval;
5094 }
5095
5096 @@ -877,16 +870,15 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
5097 if (rval) {
5098 ql_log(ql_log_warn, vha, 0x7030,
5099 "Vendor request 84xx reset failed.\n");
5100 - rval = 0;
5101 - bsg_job->reply->result = (DID_ERROR << 16);
5102 + rval = (DID_ERROR << 16);
5103
5104 } else {
5105 ql_dbg(ql_dbg_user, vha, 0x7031,
5106 "Vendor request 84xx reset completed.\n");
5107 bsg_job->reply->result = DID_OK;
5108 + bsg_job->job_done(bsg_job);
5109 }
5110
5111 - bsg_job->job_done(bsg_job);
5112 return rval;
5113 }
5114
5115 @@ -976,8 +968,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
5116 ql_log(ql_log_warn, vha, 0x7037,
5117 "Vendor request 84xx updatefw failed.\n");
5118
5119 - rval = 0;
5120 - bsg_job->reply->result = (DID_ERROR << 16);
5121 + rval = (DID_ERROR << 16);
5122 } else {
5123 ql_dbg(ql_dbg_user, vha, 0x7038,
5124 "Vendor request 84xx updatefw completed.\n");
5125 @@ -986,7 +977,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
5126 bsg_job->reply->result = DID_OK;
5127 }
5128
5129 - bsg_job->job_done(bsg_job);
5130 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
5131
5132 done_free_fw_buf:
5133 @@ -996,6 +986,8 @@ done_unmap_sg:
5134 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
5135 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
5136
5137 + if (!rval)
5138 + bsg_job->job_done(bsg_job);
5139 return rval;
5140 }
5141
5142 @@ -1163,8 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
5143 ql_log(ql_log_warn, vha, 0x7043,
5144 "Vendor request 84xx mgmt failed.\n");
5145
5146 - rval = 0;
5147 - bsg_job->reply->result = (DID_ERROR << 16);
5148 + rval = (DID_ERROR << 16);
5149
5150 } else {
5151 ql_dbg(ql_dbg_user, vha, 0x7044,
5152 @@ -1184,8 +1175,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
5153 }
5154 }
5155
5156 - bsg_job->job_done(bsg_job);
5157 -
5158 done_unmap_sg:
5159 if (mgmt_b)
5160 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
5161 @@ -1200,6 +1189,8 @@ done_unmap_sg:
5162 exit_mgmt:
5163 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
5164
5165 + if (!rval)
5166 + bsg_job->job_done(bsg_job);
5167 return rval;
5168 }
5169
5170 @@ -1276,9 +1267,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
5171 fcport->port_name[3], fcport->port_name[4],
5172 fcport->port_name[5], fcport->port_name[6],
5173 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
5174 - rval = 0;
5175 - bsg_job->reply->result = (DID_ERROR << 16);
5176 -
5177 + rval = (DID_ERROR << 16);
5178 } else {
5179 if (!port_param->mode) {
5180 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5181 @@ -1292,9 +1281,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
5182 }
5183
5184 bsg_job->reply->result = DID_OK;
5185 + bsg_job->job_done(bsg_job);
5186 }
5187
5188 - bsg_job->job_done(bsg_job);
5189 return rval;
5190 }
5191
5192 @@ -1887,8 +1876,6 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
5193 return qla24xx_process_bidir_cmd(bsg_job);
5194
5195 default:
5196 - bsg_job->reply->result = (DID_ERROR << 16);
5197 - bsg_job->job_done(bsg_job);
5198 return -ENOSYS;
5199 }
5200 }
5201 @@ -1919,8 +1906,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
5202 ql_dbg(ql_dbg_user, vha, 0x709f,
5203 "BSG: ISP abort active/needed -- cmd=%d.\n",
5204 bsg_job->request->msgcode);
5205 - bsg_job->reply->result = (DID_ERROR << 16);
5206 - bsg_job->job_done(bsg_job);
5207 return -EBUSY;
5208 }
5209
5210 @@ -1943,7 +1928,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
5211 case FC_BSG_RPT_CT:
5212 default:
5213 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
5214 - bsg_job->reply->result = ret;
5215 break;
5216 }
5217 return ret;
5218 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
5219 index d501bf5..f4b1fc8 100644
5220 --- a/drivers/scsi/qla2xxx/qla_os.c
5221 +++ b/drivers/scsi/qla2xxx/qla_os.c
5222 @@ -2755,6 +2755,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
5223
5224 ha->flags.host_shutting_down = 1;
5225
5226 + set_bit(UNLOADING, &base_vha->dpc_flags);
5227 mutex_lock(&ha->vport_lock);
5228 while (ha->cur_vport_count) {
5229 struct Scsi_Host *scsi_host;
5230 @@ -2784,8 +2785,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
5231 "Error while clearing DRV-Presence.\n");
5232 }
5233
5234 - set_bit(UNLOADING, &base_vha->dpc_flags);
5235 -
5236 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
5237
5238 qla2x00_dfs_remove(base_vha);
5239 @@ -4505,9 +4504,9 @@ qla2x00_do_dpc(void *data)
5240 "ISP abort end.\n");
5241 }
5242
5243 - if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
5244 + if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
5245 + &base_vha->dpc_flags)) {
5246 qla2x00_update_fcports(base_vha);
5247 - clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
5248 }
5249
5250 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
5251 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
5252 index 62aa558..661d33e 100644
5253 --- a/drivers/scsi/qla2xxx/qla_target.c
5254 +++ b/drivers/scsi/qla2xxx/qla_target.c
5255 @@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
5256 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
5257 {
5258 struct qla_hw_data *ha = vha->hw;
5259 + struct se_session *se_sess = sess->se_sess;
5260 struct qla_tgt_mgmt_cmd *mcmd;
5261 + struct se_cmd *se_cmd;
5262 + u32 lun = 0;
5263 int rc;
5264 + bool found_lun = false;
5265 +
5266 + spin_lock(&se_sess->sess_cmd_lock);
5267 + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
5268 + struct qla_tgt_cmd *cmd =
5269 + container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
5270 + if (cmd->tag == abts->exchange_addr_to_abort) {
5271 + lun = cmd->unpacked_lun;
5272 + found_lun = true;
5273 + break;
5274 + }
5275 + }
5276 + spin_unlock(&se_sess->sess_cmd_lock);
5277 +
5278 + if (!found_lun)
5279 + return -ENOENT;
5280
5281 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
5282 "qla_target(%d): task abort (tag=%d)\n",
5283 @@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
5284 mcmd->sess = sess;
5285 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
5286
5287 - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
5288 + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
5289 abts->exchange_addr_to_abort);
5290 if (rc != 0) {
5291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
5292 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
5293 index ce5224c..931a7d9 100644
5294 --- a/drivers/scsi/scsi_sysfs.c
5295 +++ b/drivers/scsi/scsi_sysfs.c
5296 @@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
5297
5298 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
5299
5300 -static int check_reset_type(char *str)
5301 +static int check_reset_type(const char *str)
5302 {
5303 - if (strncmp(str, "adapter", 10) == 0)
5304 + if (sysfs_streq(str, "adapter"))
5305 return SCSI_ADAPTER_RESET;
5306 - else if (strncmp(str, "firmware", 10) == 0)
5307 + else if (sysfs_streq(str, "firmware"))
5308 return SCSI_FIRMWARE_RESET;
5309 else
5310 return 0;
5311 @@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
5312 struct Scsi_Host *shost = class_to_shost(dev);
5313 struct scsi_host_template *sht = shost->hostt;
5314 int ret = -EINVAL;
5315 - char str[10];
5316 int type;
5317
5318 - sscanf(buf, "%s", str);
5319 - type = check_reset_type(str);
5320 -
5321 + type = check_reset_type(buf);
5322 if (!type)
5323 goto exit_store_host_reset;
5324
5325 diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
5326 index 2093403..3464d14 100644
5327 --- a/drivers/staging/comedi/Kconfig
5328 +++ b/drivers/staging/comedi/Kconfig
5329 @@ -444,6 +444,7 @@ config COMEDI_ADQ12B
5330
5331 config COMEDI_NI_AT_A2150
5332 tristate "NI AT-A2150 ISA card support"
5333 + select COMEDI_FC
5334 depends on VIRT_TO_BUS
5335 ---help---
5336 Enable support for National Instruments AT-A2150 cards
5337 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
5338 index c2a32cf..1ab8037 100644
5339 --- a/drivers/staging/comedi/comedi_fops.c
5340 +++ b/drivers/staging/comedi/comedi_fops.c
5341 @@ -1546,6 +1546,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
5342 if (cmd == COMEDI_DEVCONFIG) {
5343 rc = do_devconfig_ioctl(dev,
5344 (struct comedi_devconfig __user *)arg);
5345 + if (rc == 0)
5346 + /* Evade comedi_auto_unconfig(). */
5347 + dev_file_info->hardware_device = NULL;
5348 goto done;
5349 }
5350
5351 diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
5352 index 7817def..ec7cf62 100644
5353 --- a/drivers/staging/comedi/drivers/comedi_test.c
5354 +++ b/drivers/staging/comedi/drivers/comedi_test.c
5355 @@ -372,7 +372,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
5356 struct waveform_private *devpriv = dev->private;
5357
5358 devpriv->timer_running = 0;
5359 - del_timer(&devpriv->timer);
5360 + del_timer_sync(&devpriv->timer);
5361 return 0;
5362 }
5363
5364 diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
5365 index f284a90..f3f5478 100644
5366 --- a/drivers/staging/comedi/drivers/ni_pcimio.c
5367 +++ b/drivers/staging/comedi/drivers/ni_pcimio.c
5368 @@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
5369 .ao_range_table = &range_ni_M_625x_ao,
5370 .reg_type = ni_reg_625x,
5371 .ao_unipolar = 0,
5372 - .ao_speed = 357,
5373 + .ao_speed = 350,
5374 .num_p0_dio_channels = 8,
5375 .caldac = {caldac_none},
5376 .has_8255 = 0,
5377 @@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
5378 .ao_range_table = &range_ni_M_625x_ao,
5379 .reg_type = ni_reg_625x,
5380 .ao_unipolar = 0,
5381 - .ao_speed = 357,
5382 + .ao_speed = 350,
5383 .num_p0_dio_channels = 8,
5384 .caldac = {caldac_none},
5385 .has_8255 = 0,
5386 @@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
5387 .ao_range_table = &range_ni_M_625x_ao,
5388 .reg_type = ni_reg_625x,
5389 .ao_unipolar = 0,
5390 - .ao_speed = 357,
5391 + .ao_speed = 350,
5392 .num_p0_dio_channels = 8,
5393 .caldac = {caldac_none},
5394 .has_8255 = 0,
5395 @@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
5396 .ao_range_table = &range_ni_M_625x_ao,
5397 .reg_type = ni_reg_625x,
5398 .ao_unipolar = 0,
5399 - .ao_speed = 357,
5400 + .ao_speed = 350,
5401 .num_p0_dio_channels = 32,
5402 .caldac = {caldac_none},
5403 .has_8255 = 0,
5404 @@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
5405 .ao_range_table = &range_ni_M_625x_ao,
5406 .reg_type = ni_reg_625x,
5407 .ao_unipolar = 0,
5408 - .ao_speed = 357,
5409 + .ao_speed = 350,
5410 .num_p0_dio_channels = 32,
5411 .caldac = {caldac_none},
5412 .has_8255 = 0,
5413 @@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
5414 .ao_range_table = &range_ni_M_628x_ao,
5415 .reg_type = ni_reg_628x,
5416 .ao_unipolar = 1,
5417 - .ao_speed = 357,
5418 + .ao_speed = 350,
5419 .num_p0_dio_channels = 8,
5420 .caldac = {caldac_none},
5421 .has_8255 = 0,
5422 @@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
5423 .ao_range_table = &range_ni_M_628x_ao,
5424 .reg_type = ni_reg_628x,
5425 .ao_unipolar = 1,
5426 - .ao_speed = 357,
5427 + .ao_speed = 350,
5428 .num_p0_dio_channels = 8,
5429 .caldac = {caldac_none},
5430 .has_8255 = 0,
5431 @@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
5432 .ao_range_table = &range_ni_M_628x_ao,
5433 .reg_type = ni_reg_628x,
5434 .ao_unipolar = 1,
5435 - .ao_speed = 357,
5436 + .ao_speed = 350,
5437 .num_p0_dio_channels = 32,
5438 .caldac = {caldac_none},
5439 .has_8255 = 0,
5440 diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
5441 index c6f3ef6..784fa4d 100644
5442 --- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
5443 +++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
5444 @@ -207,7 +207,12 @@ struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
5445 obj = buffer->priv;
5446 /* is it from our device? */
5447 if (obj->dev == dev) {
5448 + /*
5449 + * Importing dmabuf exported from out own gem increases
5450 + * refcount on gem itself instead of f_count of dmabuf.
5451 + */
5452 drm_gem_object_reference(obj);
5453 + dma_buf_put(buffer);
5454 return obj;
5455 }
5456 }
5457 diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
5458 index 6b73843..a96cd06 100644
5459 --- a/drivers/staging/rtl8712/usb_intf.c
5460 +++ b/drivers/staging/rtl8712/usb_intf.c
5461 @@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
5462 {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
5463 /* Belkin */
5464 {USB_DEVICE(0x050D, 0x945A)},
5465 + /* ISY IWL - Belkin clone */
5466 + {USB_DEVICE(0x050D, 0x11F1)},
5467 /* Corega */
5468 {USB_DEVICE(0x07AA, 0x0047)},
5469 /* D-Link */
5470 diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
5471 index df95337..7616f05 100644
5472 --- a/drivers/staging/speakup/synth.c
5473 +++ b/drivers/staging/speakup/synth.c
5474 @@ -342,7 +342,7 @@ int synth_init(char *synth_name)
5475
5476 mutex_lock(&spk_mutex);
5477 /* First, check if we already have it loaded. */
5478 - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
5479 + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
5480 if (strcmp(synths[i]->name, synth_name) == 0)
5481 synth = synths[i];
5482
5483 @@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
5484 int i;
5485 int status = 0;
5486 mutex_lock(&spk_mutex);
5487 - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
5488 + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
5489 /* synth_remove() is responsible for rotating the array down */
5490 if (in_synth == synths[i]) {
5491 mutex_unlock(&spk_mutex);
5492 diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
5493 index 28edf9e..16a229d 100644
5494 --- a/drivers/staging/vt6656/dpc.c
5495 +++ b/drivers/staging/vt6656/dpc.c
5496 @@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption (
5497
5498 PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
5499 *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
5500 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
5501 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
5502 if (byDecMode == KEY_CTL_TKIP) {
5503 *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
5504 } else {
5505 @@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption (
5506
5507 PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
5508 *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
5509 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
5510 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
5511
5512 if (byDecMode == KEY_CTL_TKIP) {
5513 *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
5514 diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
5515 index a61fcb9..bf24adb 100644
5516 --- a/drivers/staging/vt6656/key.c
5517 +++ b/drivers/staging/vt6656/key.c
5518 @@ -223,7 +223,7 @@ BOOL KeybSetKey(
5519 PSKeyManagement pTable,
5520 PBYTE pbyBSSID,
5521 DWORD dwKeyIndex,
5522 - unsigned long uKeyLength,
5523 + u32 uKeyLength,
5524 PQWORD pKeyRSC,
5525 PBYTE pbyKey,
5526 BYTE byKeyDecMode
5527 @@ -235,7 +235,8 @@ BOOL KeybSetKey(
5528 PSKeyItem pKey;
5529 unsigned int uKeyIdx;
5530
5531 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
5532 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5533 + "Enter KeybSetKey: %X\n", dwKeyIndex);
5534
5535 j = (MAX_KEY_TABLE-1);
5536 for (i=0;i<(MAX_KEY_TABLE-1);i++) {
5537 @@ -261,7 +262,9 @@ BOOL KeybSetKey(
5538 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5539 // Group transmit key
5540 pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
5541 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
5542 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5543 + "Group transmit key(R)[%X]: %d\n",
5544 + pTable->KeyTable[i].dwGTKeyIndex, i);
5545 }
5546 pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
5547 pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
5548 @@ -302,9 +305,12 @@ BOOL KeybSetKey(
5549 }
5550 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5551
5552 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
5553 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
5554 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
5555 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
5556 + pKey->dwTSC47_16);
5557 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ",
5558 + pKey->wTSC15_0);
5559 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
5560 + pKey->dwKeyIndex);
5561
5562 return (TRUE);
5563 }
5564 @@ -326,7 +332,9 @@ BOOL KeybSetKey(
5565 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5566 // Group transmit key
5567 pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
5568 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j);
5569 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5570 + "Group transmit key(N)[%X]: %d\n",
5571 + pTable->KeyTable[j].dwGTKeyIndex, j);
5572 }
5573 pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
5574 pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
5575 @@ -367,9 +375,11 @@ BOOL KeybSetKey(
5576 }
5577 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5578
5579 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
5580 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
5581 + pKey->dwTSC47_16);
5582 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
5583 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
5584 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
5585 + pKey->dwKeyIndex);
5586
5587 return (TRUE);
5588 }
5589 @@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
5590 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]);
5591 }
5592 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5593 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
5594 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
5595 + pTable->KeyTable[i].dwGTKeyIndex);
5596
5597 return (TRUE);
5598 }
5599 @@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey(
5600 void *pDeviceHandler,
5601 PSKeyManagement pTable,
5602 DWORD dwKeyIndex,
5603 - unsigned long uKeyLength,
5604 + u32 uKeyLength,
5605 PQWORD pKeyRSC,
5606 PBYTE pbyKey,
5607 BYTE byKeyDecMode
5608 @@ -696,7 +707,10 @@ BOOL KeybSetDefaultKey(
5609 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5610 // Group transmit key
5611 pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
5612 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1);
5613 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5614 + "Group transmit key(R)[%X]: %d\n",
5615 + pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
5616 + MAX_KEY_TABLE-1);
5617
5618 }
5619 pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
5620 @@ -747,9 +761,11 @@ BOOL KeybSetDefaultKey(
5621 }
5622 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5623
5624 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
5625 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n",
5626 + pKey->dwTSC47_16);
5627 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
5628 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
5629 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
5630 + pKey->dwKeyIndex);
5631
5632 return (TRUE);
5633 }
5634 @@ -775,7 +791,7 @@ BOOL KeybSetAllGroupKey(
5635 void *pDeviceHandler,
5636 PSKeyManagement pTable,
5637 DWORD dwKeyIndex,
5638 - unsigned long uKeyLength,
5639 + u32 uKeyLength,
5640 PQWORD pKeyRSC,
5641 PBYTE pbyKey,
5642 BYTE byKeyDecMode
5643 @@ -787,7 +803,8 @@ BOOL KeybSetAllGroupKey(
5644 PSKeyItem pKey;
5645 unsigned int uKeyIdx;
5646
5647 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
5648 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
5649 + dwKeyIndex);
5650
5651
5652 if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
5653 @@ -804,7 +821,9 @@ BOOL KeybSetAllGroupKey(
5654 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5655 // Group transmit key
5656 pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
5657 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
5658 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5659 + "Group transmit key(R)[%X]: %d\n",
5660 + pTable->KeyTable[i].dwGTKeyIndex, i);
5661
5662 }
5663 pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
5664 diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
5665 index f749c7a..bd35d39 100644
5666 --- a/drivers/staging/vt6656/key.h
5667 +++ b/drivers/staging/vt6656/key.h
5668 @@ -58,7 +58,7 @@
5669 typedef struct tagSKeyItem
5670 {
5671 BOOL bKeyValid;
5672 - unsigned long uKeyLength;
5673 + u32 uKeyLength;
5674 BYTE abyKey[MAX_KEY_LEN];
5675 QWORD KeyRSC;
5676 DWORD dwTSC47_16;
5677 @@ -107,7 +107,7 @@ BOOL KeybSetKey(
5678 PSKeyManagement pTable,
5679 PBYTE pbyBSSID,
5680 DWORD dwKeyIndex,
5681 - unsigned long uKeyLength,
5682 + u32 uKeyLength,
5683 PQWORD pKeyRSC,
5684 PBYTE pbyKey,
5685 BYTE byKeyDecMode
5686 @@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey(
5687 void *pDeviceHandler,
5688 PSKeyManagement pTable,
5689 DWORD dwKeyIndex,
5690 - unsigned long uKeyLength,
5691 + u32 uKeyLength,
5692 PQWORD pKeyRSC,
5693 PBYTE pbyKey,
5694 BYTE byKeyDecMode
5695 @@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey(
5696 void *pDeviceHandler,
5697 PSKeyManagement pTable,
5698 DWORD dwKeyIndex,
5699 - unsigned long uKeyLength,
5700 + u32 uKeyLength,
5701 PQWORD pKeyRSC,
5702 PBYTE pbyKey,
5703 BYTE byKeyDecMode
5704 diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
5705 index af4a29d..8fddc7b 100644
5706 --- a/drivers/staging/vt6656/mac.c
5707 +++ b/drivers/staging/vt6656/mac.c
5708 @@ -260,7 +260,8 @@ BYTE pbyData[24];
5709 dwData1 <<= 16;
5710 dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
5711
5712 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
5713 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
5714 + " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
5715
5716 //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
5717 //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
5718 @@ -277,7 +278,8 @@ BYTE pbyData[24];
5719 dwData2 <<= 8;
5720 dwData2 |= *(pbyAddr+0);
5721
5722 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2);
5723 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
5724 + wOffset, dwData2);
5725
5726 //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
5727 //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
5728 diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
5729 index 593cdc7..74c0598 100644
5730 --- a/drivers/staging/vt6656/rf.c
5731 +++ b/drivers/staging/vt6656/rf.c
5732 @@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr;
5733 return TRUE;
5734 }
5735
5736 + if (uCH == 0)
5737 + return -EINVAL;
5738 +
5739 switch (uRATE) {
5740 case RATE_1M:
5741 case RATE_2M:
5742 diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
5743 index 3390838..5c154e3 100644
5744 --- a/drivers/staging/vt6656/rxtx.c
5745 +++ b/drivers/staging/vt6656/rxtx.c
5746 @@ -375,7 +375,8 @@ s_vFillTxKey (
5747 *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
5748 // Append IV&ExtIV after Mac Header
5749 *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
5750 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
5751 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
5752 + *pdwExtIV);
5753
5754 } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
5755 pTransmitKey->wTSC15_0++;
5756 @@ -1751,7 +1752,8 @@ s_bPacketToWirelessUsb(
5757 MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
5758 dwMIC_Priority = 0;
5759 MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
5760 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
5761 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n",
5762 + dwMICKey0, dwMICKey1);
5763
5764 ///////////////////////////////////////////////////////////////////
5765
5766 @@ -2633,7 +2635,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
5767 MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
5768 dwMIC_Priority = 0;
5769 MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
5770 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
5771 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\
5772 + " %X, %X\n", dwMICKey0, dwMICKey1);
5773
5774 uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
5775
5776 @@ -2653,7 +2656,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
5777
5778 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
5779 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
5780 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
5781 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n",
5782 + *pdwMIC_L, *pdwMIC_R);
5783
5784 }
5785
5786 @@ -3027,7 +3031,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
5787 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
5788 }
5789 else {
5790 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
5791 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
5792 + pTransmitKey->dwKeyIndex);
5793 bNeedEncryption = TRUE;
5794 }
5795 }
5796 @@ -3041,7 +3046,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
5797 if (pDevice->bEnableHostWEP) {
5798 if ((uNodeIndex != 0) &&
5799 (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
5800 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
5801 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
5802 + pTransmitKey->dwKeyIndex);
5803 bNeedEncryption = TRUE;
5804 }
5805 }
5806 diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
5807 index 8e9450e..dfbf747 100644
5808 --- a/drivers/staging/vt6656/ttype.h
5809 +++ b/drivers/staging/vt6656/ttype.h
5810 @@ -29,6 +29,8 @@
5811 #ifndef __TTYPE_H__
5812 #define __TTYPE_H__
5813
5814 +#include <linux/types.h>
5815 +
5816 /******* Common definitions and typedefs ***********************************/
5817
5818 typedef int BOOL;
5819 @@ -42,17 +44,17 @@ typedef int BOOL;
5820
5821 /****** Simple typedefs ***************************************************/
5822
5823 -typedef unsigned char BYTE; // 8-bit
5824 -typedef unsigned short WORD; // 16-bit
5825 -typedef unsigned long DWORD; // 32-bit
5826 +typedef u8 BYTE;
5827 +typedef u16 WORD;
5828 +typedef u32 DWORD;
5829
5830 // QWORD is for those situation that we want
5831 // an 8-byte-aligned 8 byte long structure
5832 // which is NOT really a floating point number.
5833 typedef union tagUQuadWord {
5834 struct {
5835 - DWORD dwLowDword;
5836 - DWORD dwHighDword;
5837 + u32 dwLowDword;
5838 + u32 dwHighDword;
5839 } u;
5840 double DoNotUseThisField;
5841 } UQuadWord;
5842 @@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit
5843
5844 /****** Common pointer types ***********************************************/
5845
5846 -typedef unsigned long ULONG_PTR; // 32-bit
5847 -typedef unsigned long DWORD_PTR; // 32-bit
5848 +typedef u32 ULONG_PTR;
5849 +typedef u32 DWORD_PTR;
5850
5851 // boolean pointer
5852
5853 diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
5854 index 586fbe1..b854d7e 100644
5855 --- a/drivers/staging/vt6656/wcmd.c
5856 +++ b/drivers/staging/vt6656/wcmd.c
5857 @@ -316,17 +316,19 @@ s_MgrMakeProbeRequest(
5858 return pTxPacket;
5859 }
5860
5861 -void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
5862 +void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
5863 {
5864 - PSDevice pDevice = (PSDevice)hDeviceContext;
5865 + PSDevice pDevice = (PSDevice)hDeviceContext;
5866
5867 - init_timer(&pDevice->sTimerCommand);
5868 - pDevice->sTimerCommand.data = (unsigned long)pDevice;
5869 - pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
5870 - // RUN_AT :1 msec ~= (HZ/1024)
5871 - pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
5872 - add_timer(&pDevice->sTimerCommand);
5873 - return;
5874 + init_timer(&pDevice->sTimerCommand);
5875 +
5876 + pDevice->sTimerCommand.data = (unsigned long)pDevice;
5877 + pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
5878 + pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
5879 +
5880 + add_timer(&pDevice->sTimerCommand);
5881 +
5882 + return;
5883 }
5884
5885 void vRunCommand(void *hDeviceContext)
5886 diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
5887 index 46c2959..c359252 100644
5888 --- a/drivers/staging/vt6656/wpa2.h
5889 +++ b/drivers/staging/vt6656/wpa2.h
5890 @@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo {
5891 } PMKIDInfo, *PPMKIDInfo;
5892
5893 typedef struct tagSPMKIDCache {
5894 - unsigned long BSSIDInfoCount;
5895 - PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
5896 + u32 BSSIDInfoCount;
5897 + PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
5898 } SPMKIDCache, *PSPMKIDCache;
5899
5900
5901 diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
5902 index 6edefde..f2a73bd 100644
5903 --- a/drivers/staging/zram/zram_drv.c
5904 +++ b/drivers/staging/zram/zram_drv.c
5905 @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
5906 return bvec->bv_len != PAGE_SIZE;
5907 }
5908
5909 -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5910 - u32 index, int offset, struct bio *bio)
5911 +static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
5912 {
5913 - int ret;
5914 - size_t clen;
5915 - struct page *page;
5916 - unsigned char *user_mem, *cmem, *uncmem = NULL;
5917 -
5918 - page = bvec->bv_page;
5919 -
5920 - if (zram_test_flag(zram, index, ZRAM_ZERO)) {
5921 - handle_zero_page(bvec);
5922 - return 0;
5923 - }
5924 + int ret = LZO_E_OK;
5925 + size_t clen = PAGE_SIZE;
5926 + unsigned char *cmem;
5927 + unsigned long handle = zram->table[index].handle;
5928
5929 - /* Requested page is not present in compressed area */
5930 - if (unlikely(!zram->table[index].handle)) {
5931 - pr_debug("Read before write: sector=%lu, size=%u",
5932 - (ulong)(bio->bi_sector), bio->bi_size);
5933 - handle_zero_page(bvec);
5934 + if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
5935 + memset(mem, 0, PAGE_SIZE);
5936 return 0;
5937 }
5938
5939 - if (is_partial_io(bvec)) {
5940 - /* Use a temporary buffer to decompress the page */
5941 - uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
5942 - if (!uncmem) {
5943 - pr_info("Error allocating temp memory!\n");
5944 - return -ENOMEM;
5945 - }
5946 - }
5947 -
5948 - user_mem = kmap_atomic(page);
5949 - if (!is_partial_io(bvec))
5950 - uncmem = user_mem;
5951 - clen = PAGE_SIZE;
5952 -
5953 - cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
5954 - ZS_MM_RO);
5955 -
5956 - if (zram->table[index].size == PAGE_SIZE) {
5957 - memcpy(uncmem, cmem, PAGE_SIZE);
5958 - ret = LZO_E_OK;
5959 - } else {
5960 + cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
5961 + if (zram->table[index].size == PAGE_SIZE)
5962 + memcpy(mem, cmem, PAGE_SIZE);
5963 + else
5964 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
5965 - uncmem, &clen);
5966 - }
5967 -
5968 - if (is_partial_io(bvec)) {
5969 - memcpy(user_mem + bvec->bv_offset, uncmem + offset,
5970 - bvec->bv_len);
5971 - kfree(uncmem);
5972 - }
5973 -
5974 - zs_unmap_object(zram->mem_pool, zram->table[index].handle);
5975 - kunmap_atomic(user_mem);
5976 + mem, &clen);
5977 + zs_unmap_object(zram->mem_pool, handle);
5978
5979 /* Should NEVER happen. Return bio error if it does. */
5980 if (unlikely(ret != LZO_E_OK)) {
5981 @@ -247,42 +210,62 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5982 return ret;
5983 }
5984
5985 - flush_dcache_page(page);
5986 -
5987 return 0;
5988 }
5989
5990 -static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
5991 +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5992 + u32 index, int offset, struct bio *bio)
5993 {
5994 int ret;
5995 - size_t clen = PAGE_SIZE;
5996 - unsigned char *cmem;
5997 - unsigned long handle = zram->table[index].handle;
5998 + struct page *page;
5999 + unsigned char *user_mem, *uncmem = NULL;
6000
6001 - if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
6002 - memset(mem, 0, PAGE_SIZE);
6003 + page = bvec->bv_page;
6004 +
6005 + if (unlikely(!zram->table[index].handle) ||
6006 + zram_test_flag(zram, index, ZRAM_ZERO)) {
6007 + handle_zero_page(bvec);
6008 return 0;
6009 }
6010
6011 - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
6012 - ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
6013 - mem, &clen);
6014 - zs_unmap_object(zram->mem_pool, handle);
6015 + user_mem = kmap_atomic(page);
6016 + if (is_partial_io(bvec))
6017 + /* Use a temporary buffer to decompress the page */
6018 + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
6019 + else
6020 + uncmem = user_mem;
6021 +
6022 + if (!uncmem) {
6023 + pr_info("Unable to allocate temp memory\n");
6024 + ret = -ENOMEM;
6025 + goto out_cleanup;
6026 + }
6027
6028 + ret = zram_decompress_page(zram, uncmem, index);
6029 /* Should NEVER happen. Return bio error if it does. */
6030 if (unlikely(ret != LZO_E_OK)) {
6031 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
6032 zram_stat64_inc(zram, &zram->stats.failed_reads);
6033 - return ret;
6034 + goto out_cleanup;
6035 }
6036
6037 - return 0;
6038 + if (is_partial_io(bvec))
6039 + memcpy(user_mem + bvec->bv_offset, uncmem + offset,
6040 + bvec->bv_len);
6041 +
6042 + flush_dcache_page(page);
6043 + ret = 0;
6044 +out_cleanup:
6045 + kunmap_atomic(user_mem);
6046 + if (is_partial_io(bvec))
6047 + kfree(uncmem);
6048 + return ret;
6049 }
6050
6051 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6052 int offset)
6053 {
6054 - int ret;
6055 + int ret = 0;
6056 size_t clen;
6057 unsigned long handle;
6058 struct page *page;
6059 @@ -302,11 +285,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6060 ret = -ENOMEM;
6061 goto out;
6062 }
6063 - ret = zram_read_before_write(zram, uncmem, index);
6064 - if (ret) {
6065 - kfree(uncmem);
6066 + ret = zram_decompress_page(zram, uncmem, index);
6067 + if (ret)
6068 goto out;
6069 - }
6070 }
6071
6072 /*
6073 @@ -319,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6074
6075 user_mem = kmap_atomic(page);
6076
6077 - if (is_partial_io(bvec))
6078 + if (is_partial_io(bvec)) {
6079 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
6080 bvec->bv_len);
6081 - else
6082 + kunmap_atomic(user_mem);
6083 + user_mem = NULL;
6084 + } else {
6085 uncmem = user_mem;
6086 + }
6087
6088 if (page_zero_filled(uncmem)) {
6089 - kunmap_atomic(user_mem);
6090 - if (is_partial_io(bvec))
6091 - kfree(uncmem);
6092 + if (!is_partial_io(bvec))
6093 + kunmap_atomic(user_mem);
6094 zram_stat_inc(&zram->stats.pages_zero);
6095 zram_set_flag(zram, index, ZRAM_ZERO);
6096 ret = 0;
6097 @@ -338,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6098 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
6099 zram->compress_workmem);
6100
6101 - kunmap_atomic(user_mem);
6102 - if (is_partial_io(bvec))
6103 - kfree(uncmem);
6104 + if (!is_partial_io(bvec)) {
6105 + kunmap_atomic(user_mem);
6106 + user_mem = NULL;
6107 + uncmem = NULL;
6108 + }
6109
6110 if (unlikely(ret != LZO_E_OK)) {
6111 pr_err("Compression failed! err=%d\n", ret);
6112 @@ -349,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6113
6114 if (unlikely(clen > max_zpage_size)) {
6115 zram_stat_inc(&zram->stats.bad_compress);
6116 - src = uncmem;
6117 clen = PAGE_SIZE;
6118 + src = NULL;
6119 + if (is_partial_io(bvec))
6120 + src = uncmem;
6121 }
6122
6123 handle = zs_malloc(zram->mem_pool, clen);
6124 @@ -362,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6125 }
6126 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
6127
6128 + if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
6129 + src = kmap_atomic(page);
6130 memcpy(cmem, src, clen);
6131 + if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
6132 + kunmap_atomic(src);
6133
6134 zs_unmap_object(zram->mem_pool, handle);
6135
6136 @@ -375,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6137 if (clen <= PAGE_SIZE / 2)
6138 zram_stat_inc(&zram->stats.good_compress);
6139
6140 - return 0;
6141 -
6142 out:
6143 + if (is_partial_io(bvec))
6144 + kfree(uncmem);
6145 +
6146 if (ret)
6147 zram_stat64_inc(zram, &zram->stats.failed_writes);
6148 return ret;
6149 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
6150 index 035c2c7..bb34855 100644
6151 --- a/drivers/target/iscsi/iscsi_target.c
6152 +++ b/drivers/target/iscsi/iscsi_target.c
6153 @@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
6154 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
6155 spin_lock(&cmd->istate_lock);
6156 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
6157 - (cmd->stat_sn < exp_statsn)) {
6158 + iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
6159 cmd->i_state = ISTATE_REMOVE;
6160 spin_unlock(&cmd->istate_lock);
6161 iscsit_add_cmd_to_immediate_queue(cmd, conn,
6162 @@ -2360,7 +2360,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
6163 if (!conn_p)
6164 return;
6165
6166 - cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
6167 + cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
6168 if (!cmd) {
6169 iscsit_dec_conn_usage_count(conn_p);
6170 return;
6171 diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
6172 index 17d8c20..ba6091b 100644
6173 --- a/drivers/target/iscsi/iscsi_target_erl2.c
6174 +++ b/drivers/target/iscsi/iscsi_target_erl2.c
6175 @@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
6176 * made generic here.
6177 */
6178 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
6179 - (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
6180 + iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
6181 list_del(&cmd->i_conn_node);
6182 spin_unlock_bh(&conn->cmd_lock);
6183 iscsit_free_cmd(cmd);
6184 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
6185 index f8dbec0..10b40bb 100644
6186 --- a/drivers/target/iscsi/iscsi_target_login.c
6187 +++ b/drivers/target/iscsi/iscsi_target_login.c
6188 @@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
6189
6190 initiatorname_param = iscsi_find_param_from_key(
6191 INITIATORNAME, conn->param_list);
6192 - if (!initiatorname_param)
6193 - return -1;
6194 -
6195 sessiontype_param = iscsi_find_param_from_key(
6196 SESSIONTYPE, conn->param_list);
6197 - if (!sessiontype_param)
6198 + if (!initiatorname_param || !sessiontype_param) {
6199 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6200 + ISCSI_LOGIN_STATUS_MISSING_FIELDS);
6201 return -1;
6202 + }
6203
6204 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
6205
6206 diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
6207 index e9053a0..9d902ae 100644
6208 --- a/drivers/target/iscsi/iscsi_target_nego.c
6209 +++ b/drivers/target/iscsi/iscsi_target_nego.c
6210 @@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
6211 login->req_buf,
6212 payload_length,
6213 conn);
6214 - if (ret < 0)
6215 + if (ret < 0) {
6216 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6217 + ISCSI_LOGIN_STATUS_INIT_ERR);
6218 return -1;
6219 + }
6220
6221 if (login->first_request)
6222 if (iscsi_target_check_first_request(conn, login) < 0)
6223 @@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
6224 login->rsp_buf,
6225 &login->rsp_length,
6226 conn->param_list);
6227 - if (ret < 0)
6228 + if (ret < 0) {
6229 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6230 + ISCSI_LOGIN_STATUS_INIT_ERR);
6231 return -1;
6232 + }
6233
6234 if (!login->auth_complete &&
6235 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
6236 diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
6237 index 4a99820..9d4417a 100644
6238 --- a/drivers/target/iscsi/iscsi_target_tmr.c
6239 +++ b/drivers/target/iscsi/iscsi_target_tmr.c
6240 @@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
6241 if (!ref_cmd) {
6242 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
6243 " %hu.\n", hdr->rtt, conn->cid);
6244 - return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
6245 - be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
6246 + return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
6247 + iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
6248 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
6249 }
6250 if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
6251 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
6252 index 0d6d7c1..f9e1e8a 100644
6253 --- a/drivers/target/sbp/sbp_target.c
6254 +++ b/drivers/target/sbp/sbp_target.c
6255 @@ -2207,20 +2207,23 @@ static struct se_portal_group *sbp_make_tpg(
6256 tport->mgt_agt = sbp_management_agent_register(tport);
6257 if (IS_ERR(tport->mgt_agt)) {
6258 ret = PTR_ERR(tport->mgt_agt);
6259 - kfree(tpg);
6260 - return ERR_PTR(ret);
6261 + goto out_free_tpg;
6262 }
6263
6264 ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
6265 &tpg->se_tpg, (void *)tpg,
6266 TRANSPORT_TPG_TYPE_NORMAL);
6267 - if (ret < 0) {
6268 - sbp_management_agent_unregister(tport->mgt_agt);
6269 - kfree(tpg);
6270 - return ERR_PTR(ret);
6271 - }
6272 + if (ret < 0)
6273 + goto out_unreg_mgt_agt;
6274
6275 return &tpg->se_tpg;
6276 +
6277 +out_unreg_mgt_agt:
6278 + sbp_management_agent_unregister(tport->mgt_agt);
6279 +out_free_tpg:
6280 + tport->tpg = NULL;
6281 + kfree(tpg);
6282 + return ERR_PTR(ret);
6283 }
6284
6285 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
6286 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
6287 index 0360383..c639b42 100644
6288 --- a/drivers/target/target_core_file.c
6289 +++ b/drivers/target/target_core_file.c
6290 @@ -260,7 +260,7 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
6291
6292 for_each_sg(sgl, sg, sgl_nents, i) {
6293 iov[i].iov_len = sg->length;
6294 - iov[i].iov_base = sg_virt(sg);
6295 + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
6296 }
6297
6298 old_fs = get_fs();
6299 @@ -268,6 +268,8 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
6300 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
6301 set_fs(old_fs);
6302
6303 + for_each_sg(sgl, sg, sgl_nents, i)
6304 + kunmap(sg_page(sg));
6305 kfree(iov);
6306 /*
6307 * Return zeros and GOOD status even if the READ did not return
6308 @@ -313,7 +315,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
6309
6310 for_each_sg(sgl, sg, sgl_nents, i) {
6311 iov[i].iov_len = sg->length;
6312 - iov[i].iov_base = sg_virt(sg);
6313 + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
6314 }
6315
6316 old_fs = get_fs();
6317 @@ -321,6 +323,9 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
6318 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
6319 set_fs(old_fs);
6320
6321 + for_each_sg(sgl, sg, sgl_nents, i)
6322 + kunmap(sg_page(sg));
6323 +
6324 kfree(iov);
6325
6326 if (ret < 0 || ret != cmd->data_length) {
6327 diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
6328 index 9585010..12d6fa2 100644
6329 --- a/drivers/target/tcm_fc/tfc_sess.c
6330 +++ b/drivers/target/tcm_fc/tfc_sess.c
6331 @@ -430,7 +430,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
6332 {
6333 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
6334
6335 - transport_deregister_session(sess->se_sess);
6336 kfree(sess);
6337 }
6338
6339 @@ -438,6 +437,7 @@ static void ft_sess_free(struct kref *kref)
6340 {
6341 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
6342
6343 + transport_deregister_session(sess->se_sess);
6344 call_rcu(&sess->rcu, ft_sess_rcu_free);
6345 }
6346
6347 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
6348 index 8d809a8..2d92cce 100644
6349 --- a/drivers/usb/class/cdc-acm.c
6350 +++ b/drivers/usb/class/cdc-acm.c
6351 @@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
6352 { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
6353 .driver_info = NO_UNION_NORMAL,
6354 },
6355 + { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
6356 + .driver_info = NO_UNION_NORMAL,
6357 + },
6358 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
6359 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
6360 },
6361 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
6362 index 1af04bd..e6cc4e6 100644
6363 --- a/drivers/usb/core/hub.c
6364 +++ b/drivers/usb/core/hub.c
6365 @@ -876,6 +876,60 @@ static int hub_hub_status(struct usb_hub *hub,
6366 return ret;
6367 }
6368
6369 +static int hub_set_port_link_state(struct usb_hub *hub, int port1,
6370 + unsigned int link_status)
6371 +{
6372 + return set_port_feature(hub->hdev,
6373 + port1 | (link_status << 3),
6374 + USB_PORT_FEAT_LINK_STATE);
6375 +}
6376 +
6377 +/*
6378 + * If USB 3.0 ports are placed into the Disabled state, they will no longer
6379 + * detect any device connects or disconnects. This is generally not what the
6380 + * USB core wants, since it expects a disabled port to produce a port status
6381 + * change event when a new device connects.
6382 + *
6383 + * Instead, set the link state to Disabled, wait for the link to settle into
6384 + * that state, clear any change bits, and then put the port into the RxDetect
6385 + * state.
6386 + */
6387 +static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
6388 +{
6389 + int ret;
6390 + int total_time;
6391 + u16 portchange, portstatus;
6392 +
6393 + if (!hub_is_superspeed(hub->hdev))
6394 + return -EINVAL;
6395 +
6396 + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
6397 + if (ret) {
6398 + dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
6399 + port1, ret);
6400 + return ret;
6401 + }
6402 +
6403 + /* Wait for the link to enter the disabled state. */
6404 + for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
6405 + ret = hub_port_status(hub, port1, &portstatus, &portchange);
6406 + if (ret < 0)
6407 + return ret;
6408 +
6409 + if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
6410 + USB_SS_PORT_LS_SS_DISABLED)
6411 + break;
6412 + if (total_time >= HUB_DEBOUNCE_TIMEOUT)
6413 + break;
6414 + msleep(HUB_DEBOUNCE_STEP);
6415 + }
6416 + if (total_time >= HUB_DEBOUNCE_TIMEOUT)
6417 + dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
6418 + port1, total_time);
6419 +
6420 + return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
6421 +}
6422 +
6423 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
6424 {
6425 struct usb_device *hdev = hub->hdev;
6426 @@ -884,8 +938,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
6427 if (hub->ports[port1 - 1]->child && set_state)
6428 usb_set_device_state(hub->ports[port1 - 1]->child,
6429 USB_STATE_NOTATTACHED);
6430 - if (!hub->error && !hub_is_superspeed(hub->hdev))
6431 - ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
6432 + if (!hub->error) {
6433 + if (hub_is_superspeed(hub->hdev))
6434 + ret = hub_usb3_port_disable(hub, port1);
6435 + else
6436 + ret = clear_port_feature(hdev, port1,
6437 + USB_PORT_FEAT_ENABLE);
6438 + }
6439 if (ret)
6440 dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
6441 port1, ret);
6442 @@ -2401,7 +2460,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
6443 #define HUB_SHORT_RESET_TIME 10
6444 #define HUB_BH_RESET_TIME 50
6445 #define HUB_LONG_RESET_TIME 200
6446 -#define HUB_RESET_TIMEOUT 500
6447 +#define HUB_RESET_TIMEOUT 800
6448
6449 static int hub_port_reset(struct usb_hub *hub, int port1,
6450 struct usb_device *udev, unsigned int delay, bool warm);
6451 @@ -2436,6 +2495,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6452 if (ret < 0)
6453 return ret;
6454
6455 + /* The port state is unknown until the reset completes. */
6456 + if ((portstatus & USB_PORT_STAT_RESET))
6457 + goto delay;
6458 +
6459 /*
6460 * Some buggy devices require a warm reset to be issued even
6461 * when the port appears not to be connected.
6462 @@ -2481,11 +2544,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6463 if ((portchange & USB_PORT_STAT_C_CONNECTION))
6464 return -ENOTCONN;
6465
6466 - /* if we`ve finished resetting, then break out of
6467 - * the loop
6468 - */
6469 - if (!(portstatus & USB_PORT_STAT_RESET) &&
6470 - (portstatus & USB_PORT_STAT_ENABLE)) {
6471 + if ((portstatus & USB_PORT_STAT_ENABLE)) {
6472 if (hub_is_wusb(hub))
6473 udev->speed = USB_SPEED_WIRELESS;
6474 else if (hub_is_superspeed(hub->hdev))
6475 @@ -2499,10 +2558,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6476 return 0;
6477 }
6478 } else {
6479 - if (portchange & USB_PORT_STAT_C_BH_RESET)
6480 - return 0;
6481 + if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
6482 + hub_port_warm_reset_required(hub,
6483 + portstatus))
6484 + return -ENOTCONN;
6485 +
6486 + return 0;
6487 }
6488
6489 +delay:
6490 /* switch to the long delay after two short delay failures */
6491 if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
6492 delay = HUB_LONG_RESET_TIME;
6493 @@ -2526,14 +2590,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
6494 msleep(10 + 40);
6495 update_devnum(udev, 0);
6496 hcd = bus_to_hcd(udev->bus);
6497 - if (hcd->driver->reset_device) {
6498 - *status = hcd->driver->reset_device(hcd, udev);
6499 - if (*status < 0) {
6500 - dev_err(&udev->dev, "Cannot reset "
6501 - "HCD device state\n");
6502 - break;
6503 - }
6504 - }
6505 + /* The xHC may think the device is already reset,
6506 + * so ignore the status.
6507 + */
6508 + if (hcd->driver->reset_device)
6509 + hcd->driver->reset_device(hcd, udev);
6510 }
6511 /* FALL THROUGH */
6512 case -ENOTCONN:
6513 @@ -2541,16 +2602,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
6514 clear_port_feature(hub->hdev,
6515 port1, USB_PORT_FEAT_C_RESET);
6516 /* FIXME need disconnect() for NOTATTACHED device */
6517 - if (warm) {
6518 + if (hub_is_superspeed(hub->hdev)) {
6519 clear_port_feature(hub->hdev, port1,
6520 USB_PORT_FEAT_C_BH_PORT_RESET);
6521 clear_port_feature(hub->hdev, port1,
6522 USB_PORT_FEAT_C_PORT_LINK_STATE);
6523 - } else {
6524 + }
6525 + if (!warm)
6526 usb_set_device_state(udev, *status
6527 ? USB_STATE_NOTATTACHED
6528 : USB_STATE_DEFAULT);
6529 - }
6530 break;
6531 }
6532 }
6533 @@ -2899,7 +2960,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
6534 static int finish_port_resume(struct usb_device *udev)
6535 {
6536 int status = 0;
6537 - u16 devstatus;
6538 + u16 devstatus = 0;
6539
6540 /* caller owns the udev device lock */
6541 dev_dbg(&udev->dev, "%s\n",
6542 @@ -2944,7 +3005,13 @@ static int finish_port_resume(struct usb_device *udev)
6543 if (status) {
6544 dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
6545 status);
6546 - } else if (udev->actconfig) {
6547 + /*
6548 + * There are a few quirky devices which violate the standard
6549 + * by claiming to have remote wakeup enabled after a reset,
6550 + * which crash if the feature is cleared, hence check for
6551 + * udev->reset_resume
6552 + */
6553 + } else if (udev->actconfig && !udev->reset_resume) {
6554 le16_to_cpus(&devstatus);
6555 if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
6556 status = usb_control_msg(udev,
6557 @@ -4572,9 +4639,14 @@ static void hub_events(void)
6558 * SS.Inactive state.
6559 */
6560 if (hub_port_warm_reset_required(hub, portstatus)) {
6561 + int status;
6562 +
6563 dev_dbg(hub_dev, "warm reset port %d\n", i);
6564 - hub_port_reset(hub, i, NULL,
6565 + status = hub_port_reset(hub, i, NULL,
6566 HUB_BH_RESET_TIME, true);
6567 + if (status < 0)
6568 + hub_port_disable(hub, i, 1);
6569 + connect_change = 0;
6570 }
6571
6572 if (connect_change)
6573 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6574 index fdefd9c..3113c1d 100644
6575 --- a/drivers/usb/core/quirks.c
6576 +++ b/drivers/usb/core/quirks.c
6577 @@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
6578 /* Creative SB Audigy 2 NX */
6579 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
6580
6581 + /* Microsoft LifeCam-VX700 v2.0 */
6582 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
6583 +
6584 /* Logitech Quickcam Fusion */
6585 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
6586
6587 diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
6588 index 0f7541b..559b06c 100644
6589 --- a/drivers/usb/gadget/dummy_hcd.c
6590 +++ b/drivers/usb/gadget/dummy_hcd.c
6591 @@ -126,10 +126,7 @@ static const char ep0name[] = "ep0";
6592 static const char *const ep_name[] = {
6593 ep0name, /* everyone has ep0 */
6594
6595 - /* act like a net2280: high speed, six configurable endpoints */
6596 - "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
6597 -
6598 - /* or like pxa250: fifteen fixed function endpoints */
6599 + /* act like a pxa250: fifteen fixed function endpoints */
6600 "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
6601 "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
6602 "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
6603 @@ -137,6 +134,10 @@ static const char *const ep_name[] = {
6604
6605 /* or like sa1100: two fixed function endpoints */
6606 "ep1out-bulk", "ep2in-bulk",
6607 +
6608 + /* and now some generic EPs so we have enough in multi config */
6609 + "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
6610 + "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
6611 };
6612 #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
6613
6614 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
6615 index f42b68e..d2ea004 100644
6616 --- a/drivers/usb/host/ehci-pci.c
6617 +++ b/drivers/usb/host/ehci-pci.c
6618 @@ -192,6 +192,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
6619 break;
6620 }
6621
6622 + /* optional debug port, normally in the first BAR */
6623 + temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
6624 + if (temp) {
6625 + pci_read_config_dword(pdev, temp, &temp);
6626 + temp >>= 16;
6627 + if (((temp >> 13) & 7) == 1) {
6628 + u32 hcs_params = ehci_readl(ehci,
6629 + &ehci->caps->hcs_params);
6630 +
6631 + temp &= 0x1fff;
6632 + ehci->debug = hcd->regs + temp;
6633 + temp = ehci_readl(ehci, &ehci->debug->control);
6634 + ehci_info(ehci, "debug port %d%s\n",
6635 + HCS_DEBUG_PORT(hcs_params),
6636 + (temp & DBGP_ENABLED) ? " IN USE" : "");
6637 + if (!(temp & DBGP_ENABLED))
6638 + ehci->debug = NULL;
6639 + }
6640 + }
6641 +
6642 retval = ehci_setup(hcd);
6643 if (retval)
6644 return retval;
6645 @@ -226,25 +246,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
6646 break;
6647 }
6648
6649 - /* optional debug port, normally in the first BAR */
6650 - temp = pci_find_capability(pdev, 0x0a);
6651 - if (temp) {
6652 - pci_read_config_dword(pdev, temp, &temp);
6653 - temp >>= 16;
6654 - if ((temp & (3 << 13)) == (1 << 13)) {
6655 - temp &= 0x1fff;
6656 - ehci->debug = hcd->regs + temp;
6657 - temp = ehci_readl(ehci, &ehci->debug->control);
6658 - ehci_info(ehci, "debug port %d%s\n",
6659 - HCS_DEBUG_PORT(ehci->hcs_params),
6660 - (temp & DBGP_ENABLED)
6661 - ? " IN USE"
6662 - : "");
6663 - if (!(temp & DBGP_ENABLED))
6664 - ehci->debug = NULL;
6665 - }
6666 - }
6667 -
6668 /* at least the Genesys GL880S needs fixup here */
6669 temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
6670 temp &= 0x0f;
6671 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
6672 index a686cf4..6891442 100644
6673 --- a/drivers/usb/host/xhci-hub.c
6674 +++ b/drivers/usb/host/xhci-hub.c
6675 @@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
6676 break;
6677 case USB_PORT_FEAT_LINK_STATE:
6678 temp = xhci_readl(xhci, port_array[wIndex]);
6679 +
6680 + /* Disable port */
6681 + if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
6682 + xhci_dbg(xhci, "Disable port %d\n", wIndex);
6683 + temp = xhci_port_state_to_neutral(temp);
6684 + /*
6685 + * Clear all change bits, so that we get a new
6686 + * connection event.
6687 + */
6688 + temp |= PORT_CSC | PORT_PEC | PORT_WRC |
6689 + PORT_OCC | PORT_RC | PORT_PLC |
6690 + PORT_CEC;
6691 + xhci_writel(xhci, temp | PORT_PE,
6692 + port_array[wIndex]);
6693 + temp = xhci_readl(xhci, port_array[wIndex]);
6694 + break;
6695 + }
6696 +
6697 + /* Put link in RxDetect (enable port) */
6698 + if (link_state == USB_SS_PORT_LS_RX_DETECT) {
6699 + xhci_dbg(xhci, "Enable port %d\n", wIndex);
6700 + xhci_set_link_state(xhci, port_array, wIndex,
6701 + link_state);
6702 + temp = xhci_readl(xhci, port_array[wIndex]);
6703 + break;
6704 + }
6705 +
6706 /* Software should not attempt to set
6707 - * port link state above '5' (Rx.Detect) and the port
6708 + * port link state above '3' (U3) and the port
6709 * must be enabled.
6710 */
6711 if ((temp & PORT_PE) == 0 ||
6712 - (link_state > USB_SS_PORT_LS_RX_DETECT)) {
6713 + (link_state > USB_SS_PORT_LS_U3)) {
6714 xhci_warn(xhci, "Cannot set link state.\n");
6715 goto error;
6716 }
6717 @@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
6718 int max_ports;
6719 __le32 __iomem **port_array;
6720 struct xhci_bus_state *bus_state;
6721 + bool reset_change = false;
6722
6723 max_ports = xhci_get_ports(hcd, &port_array);
6724 bus_state = &xhci->bus_state[hcd_index(hcd)];
6725 @@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
6726 buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
6727 status = 1;
6728 }
6729 + if ((temp & PORT_RC))
6730 + reset_change = true;
6731 + }
6732 + if (!status && !reset_change) {
6733 + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
6734 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6735 }
6736 spin_unlock_irqrestore(&xhci->lock, flags);
6737 return status ? retval : 0;
6738 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
6739 index fb51c70..35616ff 100644
6740 --- a/drivers/usb/host/xhci-mem.c
6741 +++ b/drivers/usb/host/xhci-mem.c
6742 @@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
6743 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
6744 struct usb_host_endpoint *ep)
6745 {
6746 + if (ep->desc.bInterval == 0)
6747 + return 0;
6748 return xhci_microframes_to_exponent(udev, ep,
6749 ep->desc.bInterval, 0, 15);
6750 }
6751 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
6752 index 1189cf3..e80c49d 100644
6753 --- a/drivers/usb/host/xhci-ring.c
6754 +++ b/drivers/usb/host/xhci-ring.c
6755 @@ -1725,6 +1725,15 @@ cleanup:
6756 if (bogus_port_status)
6757 return;
6758
6759 + /*
6760 + * xHCI port-status-change events occur when the "or" of all the
6761 + * status-change bits in the portsc register changes from 0 to 1.
6762 + * New status changes won't cause an event if any other change
6763 + * bits are still set. When an event occurs, switch over to
6764 + * polling to avoid losing status changes.
6765 + */
6766 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
6767 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6768 spin_unlock(&xhci->lock);
6769 /* Pass this up to the core */
6770 usb_hcd_poll_rh_status(hcd);
6771 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
6772 index 389829e..c9b886e 100644
6773 --- a/drivers/usb/host/xhci.c
6774 +++ b/drivers/usb/host/xhci.c
6775 @@ -880,6 +880,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
6776 struct usb_hcd *hcd = xhci_to_hcd(xhci);
6777 u32 command;
6778
6779 + /* Don't poll the roothubs on bus suspend. */
6780 + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
6781 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6782 + del_timer_sync(&hcd->rh_timer);
6783 +
6784 spin_lock_irq(&xhci->lock);
6785 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
6786 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
6787 @@ -1064,6 +1069,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
6788 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
6789 compliance_mode_recovery_timer_init(xhci);
6790
6791 + /* Re-enable port polling. */
6792 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
6793 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6794 + usb_hcd_poll_rh_status(hcd);
6795 +
6796 return retval;
6797 }
6798 #endif /* CONFIG_PM */
6799 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
6800 index bb56a0e..30e8551 100644
6801 --- a/drivers/usb/musb/musb_core.c
6802 +++ b/drivers/usb/musb/musb_core.c
6803 @@ -2351,10 +2351,7 @@ static int __init musb_init(void)
6804 if (usb_disabled())
6805 return 0;
6806
6807 - pr_info("%s: version " MUSB_VERSION ", "
6808 - "?dma?"
6809 - ", "
6810 - "otg (peripheral+host)",
6811 + pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
6812 musb_driver_name);
6813 return platform_driver_register(&musb_driver);
6814 }
6815 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
6816 index 2641d36..71e80ab 100644
6817 --- a/drivers/usb/serial/ftdi_sio.c
6818 +++ b/drivers/usb/serial/ftdi_sio.c
6819 @@ -876,6 +876,8 @@ static struct usb_device_id id_table_combined [] = {
6820 { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
6821 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6822 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
6823 + /* Crucible Devices */
6824 + { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
6825 { }, /* Optional parameter entry */
6826 { } /* Terminating entry */
6827 };
6828 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
6829 index 049b6e7..fa5d560 100644
6830 --- a/drivers/usb/serial/ftdi_sio_ids.h
6831 +++ b/drivers/usb/serial/ftdi_sio_ids.h
6832 @@ -1259,3 +1259,9 @@
6833 * ATI command output: Cinterion MC55i
6834 */
6835 #define FTDI_CINTERION_MC55I_PID 0xA951
6836 +
6837 +/*
6838 + * Product: Comet Caller ID decoder
6839 + * Manufacturer: Crucible Technologies
6840 + */
6841 +#define FTDI_CT_COMET_PID 0x8e08
6842 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
6843 index da36dc7..fd47369 100644
6844 --- a/drivers/usb/serial/option.c
6845 +++ b/drivers/usb/serial/option.c
6846 @@ -289,6 +289,7 @@ static void option_instat_callback(struct urb *urb);
6847 #define ALCATEL_VENDOR_ID 0x1bbb
6848 #define ALCATEL_PRODUCT_X060S_X200 0x0000
6849 #define ALCATEL_PRODUCT_X220_X500D 0x0017
6850 +#define ALCATEL_PRODUCT_L100V 0x011e
6851
6852 #define PIRELLI_VENDOR_ID 0x1266
6853 #define PIRELLI_PRODUCT_C100_1 0x1002
6854 @@ -430,9 +431,12 @@ static void option_instat_callback(struct urb *urb);
6855 #define MEDIATEK_VENDOR_ID 0x0e8d
6856 #define MEDIATEK_PRODUCT_DC_1COM 0x00a0
6857 #define MEDIATEK_PRODUCT_DC_4COM 0x00a5
6858 +#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
6859 #define MEDIATEK_PRODUCT_DC_5COM 0x00a4
6860 #define MEDIATEK_PRODUCT_7208_1COM 0x7101
6861 #define MEDIATEK_PRODUCT_7208_2COM 0x7102
6862 +#define MEDIATEK_PRODUCT_7103_2COM 0x7103
6863 +#define MEDIATEK_PRODUCT_7106_2COM 0x7106
6864 #define MEDIATEK_PRODUCT_FP_1COM 0x0003
6865 #define MEDIATEK_PRODUCT_FP_2COM 0x0023
6866 #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
6867 @@ -442,6 +446,10 @@ static void option_instat_callback(struct urb *urb);
6868 #define CELLIENT_VENDOR_ID 0x2692
6869 #define CELLIENT_PRODUCT_MEN200 0x9005
6870
6871 +/* Hyundai Petatel Inc. products */
6872 +#define PETATEL_VENDOR_ID 0x1ff4
6873 +#define PETATEL_PRODUCT_NP10T 0x600e
6874 +
6875 /* some devices interfaces need special handling due to a number of reasons */
6876 enum option_blacklist_reason {
6877 OPTION_BLACKLIST_NONE = 0,
6878 @@ -924,7 +932,8 @@ static const struct usb_device_id option_ids[] = {
6879 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
6880 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
6881 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
6882 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
6883 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
6884 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6885 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
6886 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
6887 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6888 @@ -1191,6 +1200,8 @@ static const struct usb_device_id option_ids[] = {
6889 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
6890 },
6891 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
6892 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
6893 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6894 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
6895 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
6896 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
6897 @@ -1295,7 +1306,12 @@ static const struct usb_device_id option_ids[] = {
6898 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
6899 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
6900 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
6901 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
6902 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
6903 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
6904 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
6905 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
6906 + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
6907 { } /* Terminating entry */
6908 };
6909 MODULE_DEVICE_TABLE(usb, option_ids);
6910 diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
6911 index 49619b4..f2a49ef 100644
6912 --- a/drivers/video/mxsfb.c
6913 +++ b/drivers/video/mxsfb.c
6914 @@ -369,7 +369,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
6915 loop--;
6916 }
6917
6918 - writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
6919 + reg = readl(host->base + LCDC_VDCTRL4);
6920 + writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
6921
6922 clk_disable_unprepare(host->clk);
6923
6924 diff --git a/fs/buffer.c b/fs/buffer.c
6925 index ec0aca8..20c0aae 100644
6926 --- a/fs/buffer.c
6927 +++ b/fs/buffer.c
6928 @@ -2939,6 +2939,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
6929 void *kaddr = kmap_atomic(bh->b_page);
6930 memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
6931 kunmap_atomic(kaddr);
6932 + flush_dcache_page(bh->b_page);
6933 }
6934 }
6935
6936 diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
6937 index 6690269..d7293d6 100644
6938 --- a/fs/ceph/addr.c
6939 +++ b/fs/ceph/addr.c
6940 @@ -267,6 +267,14 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
6941 kfree(req->r_pages);
6942 }
6943
6944 +static void ceph_unlock_page_vector(struct page **pages, int num_pages)
6945 +{
6946 + int i;
6947 +
6948 + for (i = 0; i < num_pages; i++)
6949 + unlock_page(pages[i]);
6950 +}
6951 +
6952 /*
6953 * start an async read(ahead) operation. return nr_pages we submitted
6954 * a read for on success, or negative error code.
6955 @@ -347,6 +355,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
6956 return nr_pages;
6957
6958 out_pages:
6959 + ceph_unlock_page_vector(pages, nr_pages);
6960 ceph_release_page_vector(pages, nr_pages);
6961 out:
6962 ceph_osdc_put_request(req);
6963 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
6964 index 3251e9c..6be9bf7 100644
6965 --- a/fs/ceph/caps.c
6966 +++ b/fs/ceph/caps.c
6967 @@ -1349,11 +1349,15 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
6968 if (!ci->i_head_snapc)
6969 ci->i_head_snapc = ceph_get_snap_context(
6970 ci->i_snap_realm->cached_context);
6971 - dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
6972 - ci->i_head_snapc);
6973 + dout(" inode %p now dirty snapc %p auth cap %p\n",
6974 + &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
6975 BUG_ON(!list_empty(&ci->i_dirty_item));
6976 spin_lock(&mdsc->cap_dirty_lock);
6977 - list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
6978 + if (ci->i_auth_cap)
6979 + list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
6980 + else
6981 + list_add(&ci->i_dirty_item,
6982 + &mdsc->cap_dirty_migrating);
6983 spin_unlock(&mdsc->cap_dirty_lock);
6984 if (ci->i_flushing_caps == 0) {
6985 ihold(inode);
6986 @@ -2388,7 +2392,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
6987 &atime);
6988
6989 /* max size increase? */
6990 - if (max_size != ci->i_max_size) {
6991 + if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
6992 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
6993 ci->i_max_size = max_size;
6994 if (max_size >= ci->i_wanted_max_size) {
6995 @@ -2745,6 +2749,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
6996
6997 /* make sure we re-request max_size, if necessary */
6998 spin_lock(&ci->i_ceph_lock);
6999 + ci->i_wanted_max_size = 0; /* reset */
7000 ci->i_requested_max_size = 0;
7001 spin_unlock(&ci->i_ceph_lock);
7002 }
7003 @@ -2840,8 +2845,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
7004 case CEPH_CAP_OP_IMPORT:
7005 handle_cap_import(mdsc, inode, h, session,
7006 snaptrace, snaptrace_len);
7007 - ceph_check_caps(ceph_inode(inode), 0, session);
7008 - goto done_unlocked;
7009 }
7010
7011 /* the rest require a cap */
7012 @@ -2858,6 +2861,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
7013 switch (op) {
7014 case CEPH_CAP_OP_REVOKE:
7015 case CEPH_CAP_OP_GRANT:
7016 + case CEPH_CAP_OP_IMPORT:
7017 handle_cap_grant(inode, h, session, cap, msg->middle);
7018 goto done_unlocked;
7019
7020 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
7021 index ba95eea..2971eaa 100644
7022 --- a/fs/ceph/inode.c
7023 +++ b/fs/ceph/inode.c
7024 @@ -1466,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
7025 {
7026 struct ceph_inode_info *ci = ceph_inode(inode);
7027 u64 to;
7028 - int wrbuffer_refs, wake = 0;
7029 + int wrbuffer_refs, finish = 0;
7030
7031 retry:
7032 spin_lock(&ci->i_ceph_lock);
7033 @@ -1498,15 +1498,18 @@ retry:
7034 truncate_inode_pages(inode->i_mapping, to);
7035
7036 spin_lock(&ci->i_ceph_lock);
7037 - ci->i_truncate_pending--;
7038 - if (ci->i_truncate_pending == 0)
7039 - wake = 1;
7040 + if (to == ci->i_truncate_size) {
7041 + ci->i_truncate_pending = 0;
7042 + finish = 1;
7043 + }
7044 spin_unlock(&ci->i_ceph_lock);
7045 + if (!finish)
7046 + goto retry;
7047
7048 if (wrbuffer_refs == 0)
7049 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
7050 - if (wake)
7051 - wake_up_all(&ci->i_cap_wq);
7052 +
7053 + wake_up_all(&ci->i_cap_wq);
7054 }
7055
7056
7057 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
7058 index 1bcf712..0d9864f 100644
7059 --- a/fs/ceph/mds_client.c
7060 +++ b/fs/ceph/mds_client.c
7061 @@ -1876,9 +1876,14 @@ finish:
7062 static void __wake_requests(struct ceph_mds_client *mdsc,
7063 struct list_head *head)
7064 {
7065 - struct ceph_mds_request *req, *nreq;
7066 + struct ceph_mds_request *req;
7067 + LIST_HEAD(tmp_list);
7068 +
7069 + list_splice_init(head, &tmp_list);
7070
7071 - list_for_each_entry_safe(req, nreq, head, r_wait) {
7072 + while (!list_empty(&tmp_list)) {
7073 + req = list_entry(tmp_list.next,
7074 + struct ceph_mds_request, r_wait);
7075 list_del_init(&req->r_wait);
7076 __do_request(mdsc, req);
7077 }
7078 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
7079 index 2eb43f2..e079899 100644
7080 --- a/fs/ceph/super.c
7081 +++ b/fs/ceph/super.c
7082 @@ -403,8 +403,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
7083 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
7084 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
7085 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
7086 - if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
7087 - seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
7088 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
7089 seq_printf(m, ",osdkeepalivetimeout=%d",
7090 opt->osd_keepalive_timeout);
7091 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
7092 index cd96649..39573ee 100644
7093 --- a/fs/eventpoll.c
7094 +++ b/fs/eventpoll.c
7095 @@ -1285,7 +1285,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
7096 * otherwise we might miss an event that happens between the
7097 * f_op->poll() call and the new event set registering.
7098 */
7099 - epi->event.events = event->events;
7100 + epi->event.events = event->events; /* need barrier below */
7101 pt._key = event->events;
7102 epi->event.data = event->data; /* protected by mtx */
7103 if (epi->event.events & EPOLLWAKEUP) {
7104 @@ -1296,6 +1296,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
7105 }
7106
7107 /*
7108 + * The following barrier has two effects:
7109 + *
7110 + * 1) Flush epi changes above to other CPUs. This ensures
7111 + * we do not miss events from ep_poll_callback if an
7112 + * event occurs immediately after we call f_op->poll().
7113 + * We need this because we did not take ep->lock while
7114 + * changing epi above (but ep_poll_callback does take
7115 + * ep->lock).
7116 + *
7117 + * 2) We also need to ensure we do not miss _past_ events
7118 + * when calling f_op->poll(). This barrier also
7119 + * pairs with the barrier in wq_has_sleeper (see
7120 + * comments for wq_has_sleeper).
7121 + *
7122 + * This barrier will now guarantee ep_poll_callback or f_op->poll
7123 + * (or both) will notice the readiness of an item.
7124 + */
7125 + smp_mb();
7126 +
7127 + /*
7128 * Get current event bits. We can safely use the file* here because
7129 * its usage count has been increased by the caller of this function.
7130 */
7131 diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
7132 index d3c5b88..e6e0d98 100644
7133 --- a/fs/ext4/acl.c
7134 +++ b/fs/ext4/acl.c
7135 @@ -423,8 +423,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
7136
7137 retry:
7138 handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
7139 - if (IS_ERR(handle))
7140 - return PTR_ERR(handle);
7141 + if (IS_ERR(handle)) {
7142 + error = PTR_ERR(handle);
7143 + goto release_and_out;
7144 + }
7145 error = ext4_set_acl(handle, inode, type, acl);
7146 ext4_journal_stop(handle);
7147 if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
7148 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7149 index 7011ac9..19bb769 100644
7150 --- a/fs/ext4/extents.c
7151 +++ b/fs/ext4/extents.c
7152 @@ -2190,13 +2190,14 @@ errout:
7153 * removes index from the index block.
7154 */
7155 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
7156 - struct ext4_ext_path *path)
7157 + struct ext4_ext_path *path, int depth)
7158 {
7159 int err;
7160 ext4_fsblk_t leaf;
7161
7162 /* free index block */
7163 - path--;
7164 + depth--;
7165 + path = path + depth;
7166 leaf = ext4_idx_pblock(path->p_idx);
7167 if (unlikely(path->p_hdr->eh_entries == 0)) {
7168 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
7169 @@ -2221,6 +2222,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
7170
7171 ext4_free_blocks(handle, inode, NULL, leaf, 1,
7172 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
7173 +
7174 + while (--depth >= 0) {
7175 + if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
7176 + break;
7177 + path--;
7178 + err = ext4_ext_get_access(handle, inode, path);
7179 + if (err)
7180 + break;
7181 + path->p_idx->ei_block = (path+1)->p_idx->ei_block;
7182 + err = ext4_ext_dirty(handle, inode, path);
7183 + if (err)
7184 + break;
7185 + }
7186 return err;
7187 }
7188
7189 @@ -2557,7 +2571,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
7190 /* if this leaf is free, then we should
7191 * remove it from index block above */
7192 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
7193 - err = ext4_ext_rm_idx(handle, inode, path + depth);
7194 + err = ext4_ext_rm_idx(handle, inode, path, depth);
7195
7196 out:
7197 return err;
7198 @@ -2760,7 +2774,7 @@ again:
7199 /* index is empty, remove it;
7200 * handle must be already prepared by the
7201 * truncatei_leaf() */
7202 - err = ext4_ext_rm_idx(handle, inode, path + i);
7203 + err = ext4_ext_rm_idx(handle, inode, path, i);
7204 }
7205 /* root level has p_bh == NULL, brelse() eats this */
7206 brelse(path[i].p_bh);
7207 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
7208 index 3a100e7..c7efa88 100644
7209 --- a/fs/ext4/ialloc.c
7210 +++ b/fs/ext4/ialloc.c
7211 @@ -762,7 +762,6 @@ got:
7212
7213 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
7214 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
7215 - brelse(block_bitmap_bh);
7216
7217 /* recheck and clear flag under lock if we still need to */
7218 ext4_lock_group(sb, group);
7219 @@ -775,6 +774,7 @@ got:
7220 ext4_group_desc_csum_set(sb, group, gdp);
7221 }
7222 ext4_unlock_group(sb, group);
7223 + brelse(block_bitmap_bh);
7224
7225 if (err)
7226 goto fail;
7227 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
7228 index 6d600a6..8fa23b4 100644
7229 --- a/fs/ext4/namei.c
7230 +++ b/fs/ext4/namei.c
7231 @@ -725,7 +725,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
7232 ext4_warning(dir->i_sb, "Node failed checksum");
7233 brelse(bh);
7234 *err = ERR_BAD_DX_DIR;
7235 - goto fail;
7236 + goto fail2;
7237 }
7238 set_buffer_verified(bh);
7239
7240 @@ -2498,7 +2498,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
7241 struct ext4_iloc iloc;
7242 int err = 0;
7243
7244 - if (!EXT4_SB(inode->i_sb)->s_journal)
7245 + if ((!EXT4_SB(inode->i_sb)->s_journal) &&
7246 + !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
7247 return 0;
7248
7249 mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
7250 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
7251 index 80928f7..d59b351 100644
7252 --- a/fs/ext4/super.c
7253 +++ b/fs/ext4/super.c
7254 @@ -1650,9 +1650,7 @@ static int parse_options(char *options, struct super_block *sb,
7255 unsigned int *journal_ioprio,
7256 int is_remount)
7257 {
7258 -#ifdef CONFIG_QUOTA
7259 struct ext4_sb_info *sbi = EXT4_SB(sb);
7260 -#endif
7261 char *p;
7262 substring_t args[MAX_OPT_ARGS];
7263 int token;
7264 @@ -1701,6 +1699,16 @@ static int parse_options(char *options, struct super_block *sb,
7265 }
7266 }
7267 #endif
7268 + if (test_opt(sb, DIOREAD_NOLOCK)) {
7269 + int blocksize =
7270 + BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
7271 +
7272 + if (blocksize < PAGE_CACHE_SIZE) {
7273 + ext4_msg(sb, KERN_ERR, "can't mount with "
7274 + "dioread_nolock if block size != PAGE_SIZE");
7275 + return 0;
7276 + }
7277 + }
7278 return 1;
7279 }
7280
7281 @@ -2217,7 +2225,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
7282 __func__, inode->i_ino, inode->i_size);
7283 jbd_debug(2, "truncating inode %lu to %lld bytes\n",
7284 inode->i_ino, inode->i_size);
7285 + mutex_lock(&inode->i_mutex);
7286 ext4_truncate(inode);
7287 + mutex_unlock(&inode->i_mutex);
7288 nr_truncates++;
7289 } else {
7290 ext4_msg(sb, KERN_DEBUG,
7291 @@ -3446,15 +3456,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7292 clear_opt(sb, DELALLOC);
7293 }
7294
7295 - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
7296 - if (test_opt(sb, DIOREAD_NOLOCK)) {
7297 - if (blocksize < PAGE_SIZE) {
7298 - ext4_msg(sb, KERN_ERR, "can't mount with "
7299 - "dioread_nolock if block size != PAGE_SIZE");
7300 - goto failed_mount;
7301 - }
7302 - }
7303 -
7304 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
7305 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
7306
7307 @@ -3496,6 +3497,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7308 if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
7309 goto failed_mount;
7310
7311 + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
7312 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
7313 blocksize > EXT4_MAX_BLOCK_SIZE) {
7314 ext4_msg(sb, KERN_ERR,
7315 @@ -4729,7 +4731,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
7316 }
7317
7318 ext4_setup_system_zone(sb);
7319 - if (sbi->s_journal == NULL)
7320 + if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
7321 ext4_commit_super(sb, 1);
7322
7323 #ifdef CONFIG_QUOTA
7324 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
7325 index a74ba46..6873d24 100644
7326 --- a/fs/jbd2/transaction.c
7327 +++ b/fs/jbd2/transaction.c
7328 @@ -209,7 +209,8 @@ repeat:
7329 if (!new_transaction)
7330 goto alloc_transaction;
7331 write_lock(&journal->j_state_lock);
7332 - if (!journal->j_running_transaction) {
7333 + if (!journal->j_running_transaction &&
7334 + !journal->j_barrier_count) {
7335 jbd2_get_transaction(journal, new_transaction);
7336 new_transaction = NULL;
7337 }
7338 diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
7339 index 0c96eb5..0331072 100644
7340 --- a/fs/jffs2/nodemgmt.c
7341 +++ b/fs/jffs2/nodemgmt.c
7342 @@ -417,14 +417,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
7343 spin_unlock(&c->erase_completion_lock);
7344
7345 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
7346 - if (ret)
7347 - return ret;
7348 +
7349 /* Just lock it again and continue. Nothing much can change because
7350 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
7351 we hold c->erase_completion_lock in the majority of this function...
7352 but that's a question for another (more caffeine-rich) day. */
7353 spin_lock(&c->erase_completion_lock);
7354
7355 + if (ret)
7356 + return ret;
7357 +
7358 waste = jeb->free_size;
7359 jffs2_link_node_ref(c, jeb,
7360 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
7361 diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
7362 index 1a4f6da..bdd840d 100644
7363 --- a/fs/pstore/ram.c
7364 +++ b/fs/pstore/ram.c
7365 @@ -374,10 +374,14 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
7366 goto fail_out;
7367 }
7368
7369 - pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
7370 - pdata->record_size = rounddown_pow_of_two(pdata->record_size);
7371 - pdata->console_size = rounddown_pow_of_two(pdata->console_size);
7372 - pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
7373 + if (!is_power_of_2(pdata->mem_size))
7374 + pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
7375 + if (!is_power_of_2(pdata->record_size))
7376 + pdata->record_size = rounddown_pow_of_two(pdata->record_size);
7377 + if (!is_power_of_2(pdata->console_size))
7378 + pdata->console_size = rounddown_pow_of_two(pdata->console_size);
7379 + if (!is_power_of_2(pdata->ftrace_size))
7380 + pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
7381
7382 cxt->dump_read_cnt = 0;
7383 cxt->size = pdata->mem_size;
7384 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
7385 index df88b95..8266f2e 100644
7386 --- a/fs/udf/inode.c
7387 +++ b/fs/udf/inode.c
7388 @@ -601,6 +601,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7389 struct udf_inode_info *iinfo = UDF_I(inode);
7390 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
7391 int lastblock = 0;
7392 + bool isBeyondEOF;
7393
7394 *err = 0;
7395 *new = 0;
7396 @@ -680,7 +681,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7397 /* Are we beyond EOF? */
7398 if (etype == -1) {
7399 int ret;
7400 -
7401 + isBeyondEOF = 1;
7402 if (count) {
7403 if (c)
7404 laarr[0] = laarr[1];
7405 @@ -723,6 +724,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7406 endnum = c + 1;
7407 lastblock = 1;
7408 } else {
7409 + isBeyondEOF = 0;
7410 endnum = startnum = ((count > 2) ? 2 : count);
7411
7412 /* if the current extent is in position 0,
7413 @@ -765,10 +767,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7414 goal, err);
7415 if (!newblocknum) {
7416 brelse(prev_epos.bh);
7417 + brelse(cur_epos.bh);
7418 + brelse(next_epos.bh);
7419 *err = -ENOSPC;
7420 return 0;
7421 }
7422 - iinfo->i_lenExtents += inode->i_sb->s_blocksize;
7423 + if (isBeyondEOF)
7424 + iinfo->i_lenExtents += inode->i_sb->s_blocksize;
7425 }
7426
7427 /* if the extent the requsted block is located in contains multiple
7428 @@ -795,6 +800,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7429 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
7430
7431 brelse(prev_epos.bh);
7432 + brelse(cur_epos.bh);
7433 + brelse(next_epos.bh);
7434
7435 newblock = udf_get_pblock(inode->i_sb, newblocknum,
7436 iinfo->i_location.partitionReferenceNum, 0);
7437 diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
7438 index 06d7f79..a1b66b7 100644
7439 --- a/include/drm/drm_mm.h
7440 +++ b/include/drm/drm_mm.h
7441 @@ -70,7 +70,7 @@ struct drm_mm {
7442 unsigned long scan_color;
7443 unsigned long scan_size;
7444 unsigned long scan_hit_start;
7445 - unsigned scan_hit_size;
7446 + unsigned long scan_hit_end;
7447 unsigned scanned_blocks;
7448 unsigned long scan_start;
7449 unsigned long scan_end;
7450 diff --git a/include/linux/audit.h b/include/linux/audit.h
7451 index bce729a..9d5104d 100644
7452 --- a/include/linux/audit.h
7453 +++ b/include/linux/audit.h
7454 @@ -157,7 +157,8 @@ void audit_core_dumps(long signr);
7455
7456 static inline void audit_seccomp(unsigned long syscall, long signr, int code)
7457 {
7458 - if (unlikely(!audit_dummy_context()))
7459 + /* Force a record to be reported if a signal was delivered. */
7460 + if (signr || unlikely(!audit_dummy_context()))
7461 __audit_seccomp(syscall, signr, code);
7462 }
7463
7464 diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
7465 index 6470792..084d3c6 100644
7466 --- a/include/linux/ceph/libceph.h
7467 +++ b/include/linux/ceph/libceph.h
7468 @@ -43,7 +43,6 @@ struct ceph_options {
7469 struct ceph_entity_addr my_addr;
7470 int mount_timeout;
7471 int osd_idle_ttl;
7472 - int osd_timeout;
7473 int osd_keepalive_timeout;
7474
7475 /*
7476 @@ -63,7 +62,6 @@ struct ceph_options {
7477 * defaults
7478 */
7479 #define CEPH_MOUNT_TIMEOUT_DEFAULT 60
7480 -#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
7481 #define CEPH_OSD_KEEPALIVE_DEFAULT 5
7482 #define CEPH_OSD_IDLE_TTL_DEFAULT 60
7483
7484 diff --git a/include/linux/compaction.h b/include/linux/compaction.h
7485 index 6ecb6dc..cc7bdde 100644
7486 --- a/include/linux/compaction.h
7487 +++ b/include/linux/compaction.h
7488 @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
7489 extern int fragmentation_index(struct zone *zone, unsigned int order);
7490 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
7491 int order, gfp_t gfp_mask, nodemask_t *mask,
7492 - bool sync, bool *contended, struct page **page);
7493 + bool sync, bool *contended);
7494 extern int compact_pgdat(pg_data_t *pgdat, int order);
7495 extern void reset_isolation_suitable(pg_data_t *pgdat);
7496 extern unsigned long compaction_suitable(struct zone *zone, int order);
7497 @@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
7498 #else
7499 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
7500 int order, gfp_t gfp_mask, nodemask_t *nodemask,
7501 - bool sync, bool *contended, struct page **page)
7502 + bool sync, bool *contended)
7503 {
7504 return COMPACT_CONTINUE;
7505 }
7506 diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
7507 index c96ad68..956afa4 100644
7508 --- a/include/linux/mfd/da9055/core.h
7509 +++ b/include/linux/mfd/da9055/core.h
7510 @@ -1,4 +1,4 @@
7511 -/*
7512 +/*
7513 * da9055 declarations for DA9055 PMICs.
7514 *
7515 * Copyright(c) 2012 Dialog Semiconductor Ltd.
7516 diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
7517 index 147293b..b9b204e 100644
7518 --- a/include/linux/mfd/da9055/pdata.h
7519 +++ b/include/linux/mfd/da9055/pdata.h
7520 @@ -1,4 +1,4 @@
7521 -/* Copyright (C) 2012 Dialog Semiconductor Ltd.
7522 +/* Copyright (C) 2012 Dialog Semiconductor Ltd.
7523 *
7524 * This program is free software; you can redistribute it and/or modify
7525 * it under the terms of the GNU General Public License as published by
7526 diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
7527 index df237ee..2b592e0 100644
7528 --- a/include/linux/mfd/da9055/reg.h
7529 +++ b/include/linux/mfd/da9055/reg.h
7530 @@ -1,4 +1,4 @@
7531 -/*
7532 +/*
7533 * DA9055 declarations for DA9055 PMICs.
7534 *
7535 * Copyright(c) 2012 Dialog Semiconductor Ltd.
7536 diff --git a/include/linux/mm.h b/include/linux/mm.h
7537 index bcaab4e..280dae5 100644
7538 --- a/include/linux/mm.h
7539 +++ b/include/linux/mm.h
7540 @@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
7541
7542 void split_page(struct page *page, unsigned int order);
7543 int split_free_page(struct page *page);
7544 -int capture_free_page(struct page *page, int alloc_order, int migratetype);
7545
7546 /*
7547 * Compound pages have a destructor function. Provide a
7548 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
7549 index f792794..5dc9ee4 100644
7550 --- a/include/linux/sunrpc/cache.h
7551 +++ b/include/linux/sunrpc/cache.h
7552 @@ -217,6 +217,8 @@ extern int qword_get(char **bpp, char *dest, int bufsize);
7553 static inline int get_int(char **bpp, int *anint)
7554 {
7555 char buf[50];
7556 + char *ep;
7557 + int rv;
7558 int len = qword_get(bpp, buf, sizeof(buf));
7559
7560 if (len < 0)
7561 @@ -224,9 +226,11 @@ static inline int get_int(char **bpp, int *anint)
7562 if (len == 0)
7563 return -ENOENT;
7564
7565 - if (kstrtoint(buf, 0, anint))
7566 + rv = simple_strtol(buf, &ep, 0);
7567 + if (*ep)
7568 return -EINVAL;
7569
7570 + *anint = rv;
7571 return 0;
7572 }
7573
7574 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
7575 index 82558c8..d481cc6 100644
7576 --- a/include/net/mac80211.h
7577 +++ b/include/net/mac80211.h
7578 @@ -1253,6 +1253,10 @@ struct ieee80211_tx_control {
7579 * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
7580 * P2P Interface. This will be honoured even if more than one interface
7581 * is supported.
7582 + *
7583 + * @IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL: On this hardware TX BA session
7584 + * should be tear down once BAR frame will not be acked.
7585 + *
7586 */
7587 enum ieee80211_hw_flags {
7588 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
7589 @@ -1281,6 +1285,7 @@ enum ieee80211_hw_flags {
7590 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
7591 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24,
7592 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
7593 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL = 1<<26,
7594 };
7595
7596 /**
7597 diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
7598 index 76352ac..09a2d94 100644
7599 --- a/include/uapi/linux/audit.h
7600 +++ b/include/uapi/linux/audit.h
7601 @@ -106,6 +106,7 @@
7602 #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
7603 #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
7604 #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
7605 +#define AUDIT_SECCOMP 1326 /* Secure Computing event */
7606
7607 #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
7608 #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
7609 diff --git a/include/video/omap-panel-tfp410.h b/include/video/omap-panel-tfp410.h
7610 index 68c31d7..aef35e4 100644
7611 --- a/include/video/omap-panel-tfp410.h
7612 +++ b/include/video/omap-panel-tfp410.h
7613 @@ -28,7 +28,7 @@ struct omap_dss_device;
7614 * @power_down_gpio: gpio number for PD pin (or -1 if not available)
7615 */
7616 struct tfp410_platform_data {
7617 - u16 i2c_bus_num;
7618 + int i2c_bus_num;
7619 int power_down_gpio;
7620 };
7621
7622 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
7623 index 2f186ed..157e989 100644
7624 --- a/kernel/auditsc.c
7625 +++ b/kernel/auditsc.c
7626 @@ -2735,7 +2735,7 @@ void __audit_mmap_fd(int fd, int flags)
7627 context->type = AUDIT_MMAP;
7628 }
7629
7630 -static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7631 +static void audit_log_task(struct audit_buffer *ab)
7632 {
7633 kuid_t auid, uid;
7634 kgid_t gid;
7635 @@ -2753,6 +2753,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7636 audit_log_task_context(ab);
7637 audit_log_format(ab, " pid=%d comm=", current->pid);
7638 audit_log_untrustedstring(ab, current->comm);
7639 +}
7640 +
7641 +static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7642 +{
7643 + audit_log_task(ab);
7644 audit_log_format(ab, " reason=");
7645 audit_log_string(ab, reason);
7646 audit_log_format(ab, " sig=%ld", signr);
7647 @@ -2783,8 +2788,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
7648 {
7649 struct audit_buffer *ab;
7650
7651 - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
7652 - audit_log_abend(ab, "seccomp", signr);
7653 + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
7654 + if (unlikely(!ab))
7655 + return;
7656 + audit_log_task(ab);
7657 + audit_log_format(ab, " sig=%ld", signr);
7658 audit_log_format(ab, " syscall=%ld", syscall);
7659 audit_log_format(ab, " compat=%d", is_compat_task());
7660 audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
7661 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
7662 index c8c21be..762081c 100644
7663 --- a/kernel/watchdog.c
7664 +++ b/kernel/watchdog.c
7665 @@ -343,6 +343,10 @@ static void watchdog_enable(unsigned int cpu)
7666 {
7667 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
7668
7669 + /* kick off the timer for the hardlockup detector */
7670 + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7671 + hrtimer->function = watchdog_timer_fn;
7672 +
7673 if (!watchdog_enabled) {
7674 kthread_park(current);
7675 return;
7676 @@ -351,10 +355,6 @@ static void watchdog_enable(unsigned int cpu)
7677 /* Enable the perf event */
7678 watchdog_nmi_enable(cpu);
7679
7680 - /* kick off the timer for the hardlockup detector */
7681 - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7682 - hrtimer->function = watchdog_timer_fn;
7683 -
7684 /* done here because hrtimer_start can only pin to smp_processor_id() */
7685 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
7686 HRTIMER_MODE_REL_PINNED);
7687 @@ -368,9 +368,6 @@ static void watchdog_disable(unsigned int cpu)
7688 {
7689 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
7690
7691 - if (!watchdog_enabled)
7692 - return;
7693 -
7694 watchdog_set_prio(SCHED_NORMAL, 0);
7695 hrtimer_cancel(hrtimer);
7696 /* disable the perf event */
7697 diff --git a/mm/bootmem.c b/mm/bootmem.c
7698 index f468185..af3d5af 100644
7699 --- a/mm/bootmem.c
7700 +++ b/mm/bootmem.c
7701 @@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
7702
7703 while (start < end) {
7704 unsigned long *map, idx, vec;
7705 + unsigned shift;
7706
7707 map = bdata->node_bootmem_map;
7708 idx = start - bdata->node_min_pfn;
7709 + shift = idx & (BITS_PER_LONG - 1);
7710 + /*
7711 + * vec holds at most BITS_PER_LONG map bits,
7712 + * bit 0 corresponds to start.
7713 + */
7714 vec = ~map[idx / BITS_PER_LONG];
7715 +
7716 + if (shift) {
7717 + vec >>= shift;
7718 + if (end - start >= BITS_PER_LONG)
7719 + vec |= ~map[idx / BITS_PER_LONG + 1] <<
7720 + (BITS_PER_LONG - shift);
7721 + }
7722 /*
7723 * If we have a properly aligned and fully unreserved
7724 * BITS_PER_LONG block of pages in front of us, free
7725 @@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
7726 count += BITS_PER_LONG;
7727 start += BITS_PER_LONG;
7728 } else {
7729 - unsigned long off = 0;
7730 + unsigned long cur = start;
7731
7732 - vec >>= start & (BITS_PER_LONG - 1);
7733 - while (vec) {
7734 + start = ALIGN(start + 1, BITS_PER_LONG);
7735 + while (vec && cur != start) {
7736 if (vec & 1) {
7737 - page = pfn_to_page(start + off);
7738 + page = pfn_to_page(cur);
7739 __free_pages_bootmem(page, 0);
7740 count++;
7741 }
7742 vec >>= 1;
7743 - off++;
7744 + ++cur;
7745 }
7746 - start = ALIGN(start + 1, BITS_PER_LONG);
7747 }
7748 }
7749
7750 diff --git a/mm/compaction.c b/mm/compaction.c
7751 index 694eaab..027ebb9 100644
7752 --- a/mm/compaction.c
7753 +++ b/mm/compaction.c
7754 @@ -214,60 +214,6 @@ static bool suitable_migration_target(struct page *page)
7755 return false;
7756 }
7757
7758 -static void compact_capture_page(struct compact_control *cc)
7759 -{
7760 - unsigned long flags;
7761 - int mtype, mtype_low, mtype_high;
7762 -
7763 - if (!cc->page || *cc->page)
7764 - return;
7765 -
7766 - /*
7767 - * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
7768 - * regardless of the migratetype of the freelist is is captured from.
7769 - * This is fine because the order for a high-order MIGRATE_MOVABLE
7770 - * allocation is typically at least a pageblock size and overall
7771 - * fragmentation is not impaired. Other allocation types must
7772 - * capture pages from their own migratelist because otherwise they
7773 - * could pollute other pageblocks like MIGRATE_MOVABLE with
7774 - * difficult to move pages and making fragmentation worse overall.
7775 - */
7776 - if (cc->migratetype == MIGRATE_MOVABLE) {
7777 - mtype_low = 0;
7778 - mtype_high = MIGRATE_PCPTYPES;
7779 - } else {
7780 - mtype_low = cc->migratetype;
7781 - mtype_high = cc->migratetype + 1;
7782 - }
7783 -
7784 - /* Speculatively examine the free lists without zone lock */
7785 - for (mtype = mtype_low; mtype < mtype_high; mtype++) {
7786 - int order;
7787 - for (order = cc->order; order < MAX_ORDER; order++) {
7788 - struct page *page;
7789 - struct free_area *area;
7790 - area = &(cc->zone->free_area[order]);
7791 - if (list_empty(&area->free_list[mtype]))
7792 - continue;
7793 -
7794 - /* Take the lock and attempt capture of the page */
7795 - if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
7796 - return;
7797 - if (!list_empty(&area->free_list[mtype])) {
7798 - page = list_entry(area->free_list[mtype].next,
7799 - struct page, lru);
7800 - if (capture_free_page(page, cc->order, mtype)) {
7801 - spin_unlock_irqrestore(&cc->zone->lock,
7802 - flags);
7803 - *cc->page = page;
7804 - return;
7805 - }
7806 - }
7807 - spin_unlock_irqrestore(&cc->zone->lock, flags);
7808 - }
7809 - }
7810 -}
7811 -
7812 /*
7813 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
7814 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
7815 @@ -831,6 +777,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
7816 static int compact_finished(struct zone *zone,
7817 struct compact_control *cc)
7818 {
7819 + unsigned int order;
7820 unsigned long watermark;
7821
7822 if (fatal_signal_pending(current))
7823 @@ -865,22 +812,16 @@ static int compact_finished(struct zone *zone,
7824 return COMPACT_CONTINUE;
7825
7826 /* Direct compactor: Is a suitable page free? */
7827 - if (cc->page) {
7828 - /* Was a suitable page captured? */
7829 - if (*cc->page)
7830 + for (order = cc->order; order < MAX_ORDER; order++) {
7831 + struct free_area *area = &zone->free_area[order];
7832 +
7833 + /* Job done if page is free of the right migratetype */
7834 + if (!list_empty(&area->free_list[cc->migratetype]))
7835 + return COMPACT_PARTIAL;
7836 +
7837 + /* Job done if allocation would set block type */
7838 + if (cc->order >= pageblock_order && area->nr_free)
7839 return COMPACT_PARTIAL;
7840 - } else {
7841 - unsigned int order;
7842 - for (order = cc->order; order < MAX_ORDER; order++) {
7843 - struct free_area *area = &zone->free_area[cc->order];
7844 - /* Job done if page is free of the right migratetype */
7845 - if (!list_empty(&area->free_list[cc->migratetype]))
7846 - return COMPACT_PARTIAL;
7847 -
7848 - /* Job done if allocation would set block type */
7849 - if (cc->order >= pageblock_order && area->nr_free)
7850 - return COMPACT_PARTIAL;
7851 - }
7852 }
7853
7854 return COMPACT_CONTINUE;
7855 @@ -1018,9 +959,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
7856 goto out;
7857 }
7858 }
7859 -
7860 - /* Capture a page now if it is a suitable size */
7861 - compact_capture_page(cc);
7862 }
7863
7864 out:
7865 @@ -1033,8 +971,7 @@ out:
7866
7867 static unsigned long compact_zone_order(struct zone *zone,
7868 int order, gfp_t gfp_mask,
7869 - bool sync, bool *contended,
7870 - struct page **page)
7871 + bool sync, bool *contended)
7872 {
7873 unsigned long ret;
7874 struct compact_control cc = {
7875 @@ -1044,7 +981,6 @@ static unsigned long compact_zone_order(struct zone *zone,
7876 .migratetype = allocflags_to_migratetype(gfp_mask),
7877 .zone = zone,
7878 .sync = sync,
7879 - .page = page,
7880 };
7881 INIT_LIST_HEAD(&cc.freepages);
7882 INIT_LIST_HEAD(&cc.migratepages);
7883 @@ -1074,7 +1010,7 @@ int sysctl_extfrag_threshold = 500;
7884 */
7885 unsigned long try_to_compact_pages(struct zonelist *zonelist,
7886 int order, gfp_t gfp_mask, nodemask_t *nodemask,
7887 - bool sync, bool *contended, struct page **page)
7888 + bool sync, bool *contended)
7889 {
7890 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
7891 int may_enter_fs = gfp_mask & __GFP_FS;
7892 @@ -1100,7 +1036,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
7893 int status;
7894
7895 status = compact_zone_order(zone, order, gfp_mask, sync,
7896 - contended, page);
7897 + contended);
7898 rc = max(status, rc);
7899
7900 /* If a normal allocation would succeed, stop compacting */
7901 @@ -1156,7 +1092,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
7902 struct compact_control cc = {
7903 .order = order,
7904 .sync = false,
7905 - .page = NULL,
7906 };
7907
7908 return __compact_pgdat(pgdat, &cc);
7909 @@ -1167,14 +1102,13 @@ static int compact_node(int nid)
7910 struct compact_control cc = {
7911 .order = -1,
7912 .sync = true,
7913 - .page = NULL,
7914 };
7915
7916 return __compact_pgdat(NODE_DATA(nid), &cc);
7917 }
7918
7919 /* Compact all nodes in the system */
7920 -static int compact_nodes(void)
7921 +static void compact_nodes(void)
7922 {
7923 int nid;
7924
7925 @@ -1183,8 +1117,6 @@ static int compact_nodes(void)
7926
7927 for_each_online_node(nid)
7928 compact_node(nid);
7929 -
7930 - return COMPACT_COMPLETE;
7931 }
7932
7933 /* The written value is actually unused, all memory is compacted */
7934 @@ -1195,7 +1127,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
7935 void __user *buffer, size_t *length, loff_t *ppos)
7936 {
7937 if (write)
7938 - return compact_nodes();
7939 + compact_nodes();
7940
7941 return 0;
7942 }
7943 diff --git a/mm/internal.h b/mm/internal.h
7944 index a4fa284..3c5197d 100644
7945 --- a/mm/internal.h
7946 +++ b/mm/internal.h
7947 @@ -130,7 +130,6 @@ struct compact_control {
7948 int migratetype; /* MOVABLE, RECLAIMABLE etc */
7949 struct zone *zone;
7950 bool contended; /* True if a lock was contended */
7951 - struct page **page; /* Page captured of requested size */
7952 };
7953
7954 unsigned long
7955 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7956 index 7e208f0..ceb4168 100644
7957 --- a/mm/page_alloc.c
7958 +++ b/mm/page_alloc.c
7959 @@ -1376,14 +1376,8 @@ void split_page(struct page *page, unsigned int order)
7960 set_page_refcounted(page + i);
7961 }
7962
7963 -/*
7964 - * Similar to the split_page family of functions except that the page
7965 - * required at the given order and being isolated now to prevent races
7966 - * with parallel allocators
7967 - */
7968 -int capture_free_page(struct page *page, int alloc_order, int migratetype)
7969 +static int __isolate_free_page(struct page *page, unsigned int order)
7970 {
7971 - unsigned int order;
7972 unsigned long watermark;
7973 struct zone *zone;
7974 int mt;
7975 @@ -1391,7 +1385,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
7976 BUG_ON(!PageBuddy(page));
7977
7978 zone = page_zone(page);
7979 - order = page_order(page);
7980
7981 /* Obey watermarks as if the page was being allocated */
7982 watermark = low_wmark_pages(zone) + (1 << order);
7983 @@ -1405,13 +1398,9 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
7984
7985 mt = get_pageblock_migratetype(page);
7986 if (unlikely(mt != MIGRATE_ISOLATE))
7987 - __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
7988 -
7989 - if (alloc_order != order)
7990 - expand(zone, page, alloc_order, order,
7991 - &zone->free_area[order], migratetype);
7992 + __mod_zone_freepage_state(zone, -(1UL << order), mt);
7993
7994 - /* Set the pageblock if the captured page is at least a pageblock */
7995 + /* Set the pageblock if the isolated page is at least a pageblock */
7996 if (order >= pageblock_order - 1) {
7997 struct page *endpage = page + (1 << order) - 1;
7998 for (; page < endpage; page += pageblock_nr_pages) {
7999 @@ -1422,7 +1411,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
8000 }
8001 }
8002
8003 - return 1UL << alloc_order;
8004 + return 1UL << order;
8005 }
8006
8007 /*
8008 @@ -1440,10 +1429,9 @@ int split_free_page(struct page *page)
8009 unsigned int order;
8010 int nr_pages;
8011
8012 - BUG_ON(!PageBuddy(page));
8013 order = page_order(page);
8014
8015 - nr_pages = capture_free_page(page, order, 0);
8016 + nr_pages = __isolate_free_page(page, order);
8017 if (!nr_pages)
8018 return 0;
8019
8020 @@ -2148,8 +2136,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8021 bool *contended_compaction, bool *deferred_compaction,
8022 unsigned long *did_some_progress)
8023 {
8024 - struct page *page = NULL;
8025 -
8026 if (!order)
8027 return NULL;
8028
8029 @@ -2161,16 +2147,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8030 current->flags |= PF_MEMALLOC;
8031 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
8032 nodemask, sync_migration,
8033 - contended_compaction, &page);
8034 + contended_compaction);
8035 current->flags &= ~PF_MEMALLOC;
8036
8037 - /* If compaction captured a page, prep and use it */
8038 - if (page) {
8039 - prep_new_page(page, order, gfp_mask);
8040 - goto got_page;
8041 - }
8042 -
8043 if (*did_some_progress != COMPACT_SKIPPED) {
8044 + struct page *page;
8045 +
8046 /* Page migration frees to the PCP lists but we want merging */
8047 drain_pages(get_cpu());
8048 put_cpu();
8049 @@ -2180,7 +2162,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8050 alloc_flags & ~ALLOC_NO_WATERMARKS,
8051 preferred_zone, migratetype);
8052 if (page) {
8053 -got_page:
8054 preferred_zone->compact_blockskip_flush = false;
8055 preferred_zone->compact_considered = 0;
8056 preferred_zone->compact_defer_shift = 0;
8057 @@ -5506,7 +5487,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
8058 pfn &= (PAGES_PER_SECTION-1);
8059 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
8060 #else
8061 - pfn = pfn - zone->zone_start_pfn;
8062 + pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
8063 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
8064 #endif /* CONFIG_SPARSEMEM */
8065 }
8066 diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
8067 index a802029..ee71ea2 100644
8068 --- a/net/ceph/ceph_common.c
8069 +++ b/net/ceph/ceph_common.c
8070 @@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
8071
8072 /* start with defaults */
8073 opt->flags = CEPH_OPT_DEFAULT;
8074 - opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
8075 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
8076 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
8077 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
8078 @@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
8079
8080 /* misc */
8081 case Opt_osdtimeout:
8082 - opt->osd_timeout = intval;
8083 + pr_warning("ignoring deprecated osdtimeout option\n");
8084 break;
8085 case Opt_osdkeepalivetimeout:
8086 opt->osd_keepalive_timeout = intval;
8087 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
8088 index 3ef1759..e9f2159 100644
8089 --- a/net/ceph/messenger.c
8090 +++ b/net/ceph/messenger.c
8091 @@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
8092 {
8093 /* reset connection, out_queue, msg_ and connect_seq */
8094 /* discard existing out_queue and msg_seq */
8095 + dout("reset_connection %p\n", con);
8096 ceph_msg_remove_list(&con->out_queue);
8097 ceph_msg_remove_list(&con->out_sent);
8098
8099 @@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
8100 mutex_lock(&con->mutex);
8101 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
8102
8103 - BUG_ON(con->state != CON_STATE_CLOSED);
8104 + WARN_ON(con->state != CON_STATE_CLOSED);
8105 con->state = CON_STATE_PREOPEN;
8106
8107 con->peer_name.type = (__u8) entity_type;
8108 @@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
8109 return 0;
8110 }
8111
8112 -static void fail_protocol(struct ceph_connection *con)
8113 -{
8114 - reset_connection(con);
8115 - BUG_ON(con->state != CON_STATE_NEGOTIATING);
8116 - con->state = CON_STATE_CLOSED;
8117 -}
8118 -
8119 static int process_connect(struct ceph_connection *con)
8120 {
8121 u64 sup_feat = con->msgr->supported_features;
8122 @@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
8123 ceph_pr_addr(&con->peer_addr.in_addr),
8124 sup_feat, server_feat, server_feat & ~sup_feat);
8125 con->error_msg = "missing required protocol features";
8126 - fail_protocol(con);
8127 + reset_connection(con);
8128 return -1;
8129
8130 case CEPH_MSGR_TAG_BADPROTOVER:
8131 @@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
8132 le32_to_cpu(con->out_connect.protocol_version),
8133 le32_to_cpu(con->in_reply.protocol_version));
8134 con->error_msg = "protocol version mismatch";
8135 - fail_protocol(con);
8136 + reset_connection(con);
8137 return -1;
8138
8139 case CEPH_MSGR_TAG_BADAUTHORIZER:
8140 @@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
8141 ceph_pr_addr(&con->peer_addr.in_addr),
8142 req_feat, server_feat, req_feat & ~server_feat);
8143 con->error_msg = "missing required protocol features";
8144 - fail_protocol(con);
8145 + reset_connection(con);
8146 return -1;
8147 }
8148
8149 - BUG_ON(con->state != CON_STATE_NEGOTIATING);
8150 + WARN_ON(con->state != CON_STATE_NEGOTIATING);
8151 con->state = CON_STATE_OPEN;
8152
8153 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
8154 @@ -2132,7 +2126,6 @@ more:
8155 if (ret < 0)
8156 goto out;
8157
8158 - BUG_ON(con->state != CON_STATE_CONNECTING);
8159 con->state = CON_STATE_NEGOTIATING;
8160
8161 /*
8162 @@ -2160,7 +2153,7 @@ more:
8163 goto more;
8164 }
8165
8166 - BUG_ON(con->state != CON_STATE_OPEN);
8167 + WARN_ON(con->state != CON_STATE_OPEN);
8168
8169 if (con->in_base_pos < 0) {
8170 /*
8171 @@ -2262,6 +2255,35 @@ static void queue_con(struct ceph_connection *con)
8172 }
8173 }
8174
8175 +static bool con_sock_closed(struct ceph_connection *con)
8176 +{
8177 + if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
8178 + return false;
8179 +
8180 +#define CASE(x) \
8181 + case CON_STATE_ ## x: \
8182 + con->error_msg = "socket closed (con state " #x ")"; \
8183 + break;
8184 +
8185 + switch (con->state) {
8186 + CASE(CLOSED);
8187 + CASE(PREOPEN);
8188 + CASE(CONNECTING);
8189 + CASE(NEGOTIATING);
8190 + CASE(OPEN);
8191 + CASE(STANDBY);
8192 + default:
8193 + pr_warning("%s con %p unrecognized state %lu\n",
8194 + __func__, con, con->state);
8195 + con->error_msg = "unrecognized con state";
8196 + BUG();
8197 + break;
8198 + }
8199 +#undef CASE
8200 +
8201 + return true;
8202 +}
8203 +
8204 /*
8205 * Do some work on a connection. Drop a connection ref when we're done.
8206 */
8207 @@ -2273,24 +2295,8 @@ static void con_work(struct work_struct *work)
8208
8209 mutex_lock(&con->mutex);
8210 restart:
8211 - if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
8212 - switch (con->state) {
8213 - case CON_STATE_CONNECTING:
8214 - con->error_msg = "connection failed";
8215 - break;
8216 - case CON_STATE_NEGOTIATING:
8217 - con->error_msg = "negotiation failed";
8218 - break;
8219 - case CON_STATE_OPEN:
8220 - con->error_msg = "socket closed";
8221 - break;
8222 - default:
8223 - dout("unrecognized con state %d\n", (int)con->state);
8224 - con->error_msg = "unrecognized con state";
8225 - BUG();
8226 - }
8227 + if (con_sock_closed(con))
8228 goto fault;
8229 - }
8230
8231 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
8232 dout("con_work %p backing off\n", con);
8233 @@ -2356,12 +2362,12 @@ fault:
8234 static void ceph_fault(struct ceph_connection *con)
8235 __releases(con->mutex)
8236 {
8237 - pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
8238 + pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
8239 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
8240 dout("fault %p state %lu to peer %s\n",
8241 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
8242
8243 - BUG_ON(con->state != CON_STATE_CONNECTING &&
8244 + WARN_ON(con->state != CON_STATE_CONNECTING &&
8245 con->state != CON_STATE_NEGOTIATING &&
8246 con->state != CON_STATE_OPEN);
8247
8248 diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
8249 index c1d756c..eb9a444 100644
8250 --- a/net/ceph/osd_client.c
8251 +++ b/net/ceph/osd_client.c
8252 @@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
8253 kref_init(&req->r_kref);
8254 init_completion(&req->r_completion);
8255 init_completion(&req->r_safe_completion);
8256 + RB_CLEAR_NODE(&req->r_node);
8257 INIT_LIST_HEAD(&req->r_unsafe_item);
8258 INIT_LIST_HEAD(&req->r_linger_item);
8259 INIT_LIST_HEAD(&req->r_linger_osd);
8260 @@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
8261
8262 dout("__kick_osd_requests osd%d\n", osd->o_osd);
8263 err = __reset_osd(osdc, osd);
8264 - if (err == -EAGAIN)
8265 + if (err)
8266 return;
8267
8268 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
8269 @@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
8270 }
8271 }
8272
8273 -static void kick_osd_requests(struct ceph_osd_client *osdc,
8274 - struct ceph_osd *kickosd)
8275 -{
8276 - mutex_lock(&osdc->request_mutex);
8277 - __kick_osd_requests(osdc, kickosd);
8278 - mutex_unlock(&osdc->request_mutex);
8279 -}
8280 -
8281 /*
8282 * If the osd connection drops, we need to resubmit all requests.
8283 */
8284 @@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
8285 dout("osd_reset osd%d\n", osd->o_osd);
8286 osdc = osd->o_osdc;
8287 down_read(&osdc->map_sem);
8288 - kick_osd_requests(osdc, osd);
8289 + mutex_lock(&osdc->request_mutex);
8290 + __kick_osd_requests(osdc, osd);
8291 + mutex_unlock(&osdc->request_mutex);
8292 send_queued(osdc);
8293 up_read(&osdc->map_sem);
8294 }
8295 @@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
8296 atomic_set(&osd->o_ref, 1);
8297 osd->o_osdc = osdc;
8298 osd->o_osd = onum;
8299 + RB_CLEAR_NODE(&osd->o_node);
8300 INIT_LIST_HEAD(&osd->o_requests);
8301 INIT_LIST_HEAD(&osd->o_linger_requests);
8302 INIT_LIST_HEAD(&osd->o_osd_lru);
8303 @@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
8304 if (list_empty(&osd->o_requests) &&
8305 list_empty(&osd->o_linger_requests)) {
8306 __remove_osd(osdc, osd);
8307 + ret = -ENODEV;
8308 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
8309 &osd->o_con.peer_addr,
8310 sizeof(osd->o_con.peer_addr)) == 0 &&
8311 @@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
8312 req->r_osd = NULL;
8313 }
8314
8315 + list_del_init(&req->r_req_lru_item);
8316 ceph_osdc_put_request(req);
8317
8318 - list_del_init(&req->r_req_lru_item);
8319 if (osdc->num_requests == 0) {
8320 dout(" no requests, canceling timeout\n");
8321 __cancel_osd_timeout(osdc);
8322 @@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
8323 struct ceph_osd_request *req)
8324 {
8325 dout("__unregister_linger_request %p\n", req);
8326 + list_del_init(&req->r_linger_item);
8327 if (req->r_osd) {
8328 - list_del_init(&req->r_linger_item);
8329 list_del_init(&req->r_linger_osd);
8330
8331 if (list_empty(&req->r_osd->o_requests) &&
8332 @@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
8333 {
8334 struct ceph_osd_client *osdc =
8335 container_of(work, struct ceph_osd_client, timeout_work.work);
8336 - struct ceph_osd_request *req, *last_req = NULL;
8337 + struct ceph_osd_request *req;
8338 struct ceph_osd *osd;
8339 - unsigned long timeout = osdc->client->options->osd_timeout * HZ;
8340 unsigned long keepalive =
8341 osdc->client->options->osd_keepalive_timeout * HZ;
8342 - unsigned long last_stamp = 0;
8343 struct list_head slow_osds;
8344 dout("timeout\n");
8345 down_read(&osdc->map_sem);
8346 @@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
8347 mutex_lock(&osdc->request_mutex);
8348
8349 /*
8350 - * reset osds that appear to be _really_ unresponsive. this
8351 - * is a failsafe measure.. we really shouldn't be getting to
8352 - * this point if the system is working properly. the monitors
8353 - * should mark the osd as failed and we should find out about
8354 - * it from an updated osd map.
8355 - */
8356 - while (timeout && !list_empty(&osdc->req_lru)) {
8357 - req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
8358 - r_req_lru_item);
8359 -
8360 - /* hasn't been long enough since we sent it? */
8361 - if (time_before(jiffies, req->r_stamp + timeout))
8362 - break;
8363 -
8364 - /* hasn't been long enough since it was acked? */
8365 - if (req->r_request->ack_stamp == 0 ||
8366 - time_before(jiffies, req->r_request->ack_stamp + timeout))
8367 - break;
8368 -
8369 - BUG_ON(req == last_req && req->r_stamp == last_stamp);
8370 - last_req = req;
8371 - last_stamp = req->r_stamp;
8372 -
8373 - osd = req->r_osd;
8374 - BUG_ON(!osd);
8375 - pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
8376 - req->r_tid, osd->o_osd);
8377 - __kick_osd_requests(osdc, osd);
8378 - }
8379 -
8380 - /*
8381 * ping osds that are a bit slow. this ensures that if there
8382 * is a break in the TCP connection we will notice, and reopen
8383 * a connection with that osd (from the fault callback).
8384 @@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
8385 * Requeue requests whose mapping to an OSD has changed. If requests map to
8386 * no osd, request a new map.
8387 *
8388 - * Caller should hold map_sem for read and request_mutex.
8389 + * Caller should hold map_sem for read.
8390 */
8391 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8392 {
8393 @@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8394 for (p = rb_first(&osdc->requests); p; ) {
8395 req = rb_entry(p, struct ceph_osd_request, r_node);
8396 p = rb_next(p);
8397 +
8398 + /*
8399 + * For linger requests that have not yet been
8400 + * registered, move them to the linger list; they'll
8401 + * be sent to the osd in the loop below. Unregister
8402 + * the request before re-registering it as a linger
8403 + * request to ensure the __map_request() below
8404 + * will decide it needs to be sent.
8405 + */
8406 + if (req->r_linger && list_empty(&req->r_linger_item)) {
8407 + dout("%p tid %llu restart on osd%d\n",
8408 + req, req->r_tid,
8409 + req->r_osd ? req->r_osd->o_osd : -1);
8410 + __unregister_request(osdc, req);
8411 + __register_linger_request(osdc, req);
8412 + continue;
8413 + }
8414 +
8415 err = __map_request(osdc, req, force_resend);
8416 if (err < 0)
8417 continue; /* error */
8418 @@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8419 req->r_flags |= CEPH_OSD_FLAG_RETRY;
8420 }
8421 }
8422 - if (req->r_linger && list_empty(&req->r_linger_item)) {
8423 - /*
8424 - * register as a linger so that we will
8425 - * re-submit below and get a new tid
8426 - */
8427 - dout("%p tid %llu restart on osd%d\n",
8428 - req, req->r_tid,
8429 - req->r_osd ? req->r_osd->o_osd : -1);
8430 - __register_linger_request(osdc, req);
8431 - __unregister_request(osdc, req);
8432 - }
8433 }
8434
8435 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
8436 @@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8437 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
8438
8439 err = __map_request(osdc, req, force_resend);
8440 + dout("__map_request returned %d\n", err);
8441 if (err == 0)
8442 continue; /* no change and no osd was specified */
8443 if (err < 0)
8444 @@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8445
8446 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
8447 req->r_osd ? req->r_osd->o_osd : -1);
8448 - __unregister_linger_request(osdc, req);
8449 __register_request(osdc, req);
8450 + __unregister_linger_request(osdc, req);
8451 }
8452 mutex_unlock(&osdc->request_mutex);
8453
8454 @@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8455 dout("%d requests for down osds, need new map\n", needmap);
8456 ceph_monc_request_next_osdmap(&osdc->client->monc);
8457 }
8458 + reset_changed_osds(osdc);
8459 }
8460
8461
8462 @@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
8463 osdc->osdmap = newmap;
8464 }
8465 kick_requests(osdc, 0);
8466 - reset_changed_osds(osdc);
8467 } else {
8468 dout("ignoring incremental map %u len %d\n",
8469 epoch, maplen);
8470 @@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
8471 event->data = data;
8472 event->osdc = osdc;
8473 INIT_LIST_HEAD(&event->osd_node);
8474 + RB_CLEAR_NODE(&event->node);
8475 kref_init(&event->kref); /* one ref for us */
8476 kref_get(&event->kref); /* one ref for the caller */
8477 init_completion(&event->completion);
8478 diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
8479 index 5433fb0..f552aa4 100644
8480 --- a/net/ceph/osdmap.c
8481 +++ b/net/ceph/osdmap.c
8482 @@ -645,10 +645,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8483 ceph_decode_32_safe(p, end, max, bad);
8484 while (max--) {
8485 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
8486 + err = -ENOMEM;
8487 pi = kzalloc(sizeof(*pi), GFP_NOFS);
8488 if (!pi)
8489 goto bad;
8490 pi->id = ceph_decode_32(p);
8491 + err = -EINVAL;
8492 ev = ceph_decode_8(p); /* encoding version */
8493 if (ev > CEPH_PG_POOL_VERSION) {
8494 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
8495 @@ -664,8 +666,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8496 __insert_pg_pool(&map->pg_pools, pi);
8497 }
8498
8499 - if (version >= 5 && __decode_pool_names(p, end, map) < 0)
8500 - goto bad;
8501 + if (version >= 5) {
8502 + err = __decode_pool_names(p, end, map);
8503 + if (err < 0) {
8504 + dout("fail to decode pool names");
8505 + goto bad;
8506 + }
8507 + }
8508
8509 ceph_decode_32_safe(p, end, map->pool_max, bad);
8510
8511 @@ -745,7 +752,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8512 return map;
8513
8514 bad:
8515 - dout("osdmap_decode fail\n");
8516 + dout("osdmap_decode fail err %d\n", err);
8517 ceph_osdmap_destroy(map);
8518 return ERR_PTR(err);
8519 }
8520 @@ -839,6 +846,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8521 if (ev > CEPH_PG_POOL_VERSION) {
8522 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
8523 ev, CEPH_PG_POOL_VERSION);
8524 + err = -EINVAL;
8525 goto bad;
8526 }
8527 pi = __lookup_pg_pool(&map->pg_pools, pool);
8528 @@ -855,8 +863,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8529 if (err < 0)
8530 goto bad;
8531 }
8532 - if (version >= 5 && __decode_pool_names(p, end, map) < 0)
8533 - goto bad;
8534 + if (version >= 5) {
8535 + err = __decode_pool_names(p, end, map);
8536 + if (err < 0)
8537 + goto bad;
8538 + }
8539
8540 /* old_pool */
8541 ceph_decode_32_safe(p, end, len, bad);
8542 @@ -932,15 +943,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8543 (void) __remove_pg_mapping(&map->pg_temp, pgid);
8544
8545 /* insert */
8546 - if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
8547 - err = -EINVAL;
8548 + err = -EINVAL;
8549 + if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
8550 goto bad;
8551 - }
8552 + err = -ENOMEM;
8553 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
8554 - if (!pg) {
8555 - err = -ENOMEM;
8556 + if (!pg)
8557 goto bad;
8558 - }
8559 pg->pgid = pgid;
8560 pg->len = pglen;
8561 for (j = 0; j < pglen; j++)
8562 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
8563 index c21e33d..d9df6b8 100644
8564 --- a/net/mac80211/ibss.c
8565 +++ b/net/mac80211/ibss.c
8566 @@ -678,8 +678,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
8567 sdata_info(sdata,
8568 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
8569
8570 - ieee80211_request_internal_scan(sdata,
8571 - ifibss->ssid, ifibss->ssid_len, NULL);
8572 + ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
8573 + NULL);
8574 }
8575
8576 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
8577 @@ -777,9 +777,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
8578 IEEE80211_SCAN_INTERVAL)) {
8579 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
8580
8581 - ieee80211_request_internal_scan(sdata,
8582 - ifibss->ssid, ifibss->ssid_len,
8583 - ifibss->fixed_channel ? ifibss->channel : NULL);
8584 + ieee80211_request_ibss_scan(sdata, ifibss->ssid,
8585 + ifibss->ssid_len, chan);
8586 } else {
8587 int interval = IEEE80211_SCAN_INTERVAL;
8588
8589 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
8590 index 156e583..3da215c 100644
8591 --- a/net/mac80211/ieee80211_i.h
8592 +++ b/net/mac80211/ieee80211_i.h
8593 @@ -730,6 +730,10 @@ struct ieee80211_sub_if_data {
8594 u32 mntr_flags;
8595 } u;
8596
8597 + spinlock_t cleanup_stations_lock;
8598 + struct list_head cleanup_stations;
8599 + struct work_struct cleanup_stations_wk;
8600 +
8601 #ifdef CONFIG_MAC80211_DEBUGFS
8602 struct {
8603 struct dentry *dir;
8604 @@ -1247,9 +1251,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
8605
8606 /* scan/BSS handling */
8607 void ieee80211_scan_work(struct work_struct *work);
8608 -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8609 - const u8 *ssid, u8 ssid_len,
8610 - struct ieee80211_channel *chan);
8611 +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
8612 + const u8 *ssid, u8 ssid_len,
8613 + struct ieee80211_channel *chan);
8614 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
8615 struct cfg80211_scan_request *req);
8616 void ieee80211_scan_cancel(struct ieee80211_local *local);
8617 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
8618 index 7de7717..0f5af91 100644
8619 --- a/net/mac80211/iface.c
8620 +++ b/net/mac80211/iface.c
8621 @@ -793,20 +793,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
8622 flush_work(&sdata->work);
8623 /*
8624 * When we get here, the interface is marked down.
8625 - * Call rcu_barrier() to wait both for the RX path
8626 + * Call synchronize_rcu() to wait for the RX path
8627 * should it be using the interface and enqueuing
8628 - * frames at this very time on another CPU, and
8629 - * for the sta free call_rcu callbacks.
8630 + * frames at this very time on another CPU.
8631 */
8632 - rcu_barrier();
8633 -
8634 - /*
8635 - * free_sta_rcu() enqueues a work for the actual
8636 - * sta cleanup, so we need to flush it while
8637 - * sdata is still valid.
8638 - */
8639 - flush_workqueue(local->workqueue);
8640 -
8641 + synchronize_rcu();
8642 skb_queue_purge(&sdata->skb_queue);
8643
8644 /*
8645 @@ -1432,6 +1423,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
8646 mutex_unlock(&local->iflist_mtx);
8647 }
8648
8649 +static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
8650 +{
8651 + struct ieee80211_sub_if_data *sdata;
8652 +
8653 + sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
8654 +
8655 + ieee80211_cleanup_sdata_stas(sdata);
8656 +}
8657 +
8658 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
8659 struct wireless_dev **new_wdev, enum nl80211_iftype type,
8660 struct vif_params *params)
8661 @@ -1507,6 +1507,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
8662
8663 INIT_LIST_HEAD(&sdata->key_list);
8664
8665 + spin_lock_init(&sdata->cleanup_stations_lock);
8666 + INIT_LIST_HEAD(&sdata->cleanup_stations);
8667 + INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
8668 +
8669 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
8670 struct ieee80211_supported_band *sband;
8671 sband = local->hw.wiphy->bands[i];
8672 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
8673 index 43e60b5..fab706f 100644
8674 --- a/net/mac80211/scan.c
8675 +++ b/net/mac80211/scan.c
8676 @@ -819,9 +819,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
8677 return res;
8678 }
8679
8680 -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8681 - const u8 *ssid, u8 ssid_len,
8682 - struct ieee80211_channel *chan)
8683 +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
8684 + const u8 *ssid, u8 ssid_len,
8685 + struct ieee80211_channel *chan)
8686 {
8687 struct ieee80211_local *local = sdata->local;
8688 int ret = -EBUSY;
8689 @@ -835,22 +835,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8690
8691 /* fill internal scan request */
8692 if (!chan) {
8693 - int i, nchan = 0;
8694 + int i, max_n;
8695 + int n_ch = 0;
8696
8697 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
8698 if (!local->hw.wiphy->bands[band])
8699 continue;
8700 - for (i = 0;
8701 - i < local->hw.wiphy->bands[band]->n_channels;
8702 - i++) {
8703 - local->int_scan_req->channels[nchan] =
8704 +
8705 + max_n = local->hw.wiphy->bands[band]->n_channels;
8706 + for (i = 0; i < max_n; i++) {
8707 + struct ieee80211_channel *tmp_ch =
8708 &local->hw.wiphy->bands[band]->channels[i];
8709 - nchan++;
8710 +
8711 + if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
8712 + IEEE80211_CHAN_DISABLED))
8713 + continue;
8714 +
8715 + local->int_scan_req->channels[n_ch] = tmp_ch;
8716 + n_ch++;
8717 }
8718 }
8719
8720 - local->int_scan_req->n_channels = nchan;
8721 + if (WARN_ON_ONCE(n_ch == 0))
8722 + goto unlock;
8723 +
8724 + local->int_scan_req->n_channels = n_ch;
8725 } else {
8726 + if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
8727 + IEEE80211_CHAN_DISABLED)))
8728 + goto unlock;
8729 +
8730 local->int_scan_req->channels[0] = chan;
8731 local->int_scan_req->n_channels = 1;
8732 }
8733 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
8734 index d2eb64e..8a9931b 100644
8735 --- a/net/mac80211/sta_info.c
8736 +++ b/net/mac80211/sta_info.c
8737 @@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
8738 return -ENOENT;
8739 }
8740
8741 -static void free_sta_work(struct work_struct *wk)
8742 +static void cleanup_single_sta(struct sta_info *sta)
8743 {
8744 - struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
8745 int ac, i;
8746 struct tid_ampdu_tx *tid_tx;
8747 struct ieee80211_sub_if_data *sdata = sta->sdata;
8748 @@ -148,11 +147,35 @@ static void free_sta_work(struct work_struct *wk)
8749 sta_info_free(local, sta);
8750 }
8751
8752 +void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
8753 +{
8754 + struct sta_info *sta;
8755 +
8756 + spin_lock_bh(&sdata->cleanup_stations_lock);
8757 + while (!list_empty(&sdata->cleanup_stations)) {
8758 + sta = list_first_entry(&sdata->cleanup_stations,
8759 + struct sta_info, list);
8760 + list_del(&sta->list);
8761 + spin_unlock_bh(&sdata->cleanup_stations_lock);
8762 +
8763 + cleanup_single_sta(sta);
8764 +
8765 + spin_lock_bh(&sdata->cleanup_stations_lock);
8766 + }
8767 +
8768 + spin_unlock_bh(&sdata->cleanup_stations_lock);
8769 +}
8770 +
8771 static void free_sta_rcu(struct rcu_head *h)
8772 {
8773 struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
8774 + struct ieee80211_sub_if_data *sdata = sta->sdata;
8775
8776 - ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
8777 + spin_lock(&sdata->cleanup_stations_lock);
8778 + list_add_tail(&sta->list, &sdata->cleanup_stations);
8779 + spin_unlock(&sdata->cleanup_stations_lock);
8780 +
8781 + ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
8782 }
8783
8784 /* protected by RCU */
8785 @@ -305,7 +328,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
8786
8787 spin_lock_init(&sta->lock);
8788 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
8789 - INIT_WORK(&sta->free_sta_wk, free_sta_work);
8790 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
8791 mutex_init(&sta->ampdu_mlme.mtx);
8792
8793 @@ -848,7 +870,7 @@ void sta_info_init(struct ieee80211_local *local)
8794
8795 void sta_info_stop(struct ieee80211_local *local)
8796 {
8797 - del_timer(&local->sta_cleanup);
8798 + del_timer_sync(&local->sta_cleanup);
8799 sta_info_flush(local, NULL);
8800 }
8801
8802 @@ -877,6 +899,20 @@ int sta_info_flush(struct ieee80211_local *local,
8803 }
8804 mutex_unlock(&local->sta_mtx);
8805
8806 + rcu_barrier();
8807 +
8808 + if (sdata) {
8809 + ieee80211_cleanup_sdata_stas(sdata);
8810 + cancel_work_sync(&sdata->cleanup_stations_wk);
8811 + } else {
8812 + mutex_lock(&local->iflist_mtx);
8813 + list_for_each_entry(sdata, &local->interfaces, list) {
8814 + ieee80211_cleanup_sdata_stas(sdata);
8815 + cancel_work_sync(&sdata->cleanup_stations_wk);
8816 + }
8817 + mutex_unlock(&local->iflist_mtx);
8818 + }
8819 +
8820 return ret;
8821 }
8822
8823 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
8824 index c88f161f..3c4c0f6 100644
8825 --- a/net/mac80211/sta_info.h
8826 +++ b/net/mac80211/sta_info.h
8827 @@ -298,7 +298,6 @@ struct sta_info {
8828 spinlock_t lock;
8829
8830 struct work_struct drv_unblock_wk;
8831 - struct work_struct free_sta_wk;
8832
8833 u16 listen_interval;
8834
8835 @@ -558,4 +557,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
8836 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
8837 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
8838
8839 +void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
8840 +
8841 #endif /* STA_INFO_H */
8842 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
8843 index 101eb88..c511e9c 100644
8844 --- a/net/mac80211/status.c
8845 +++ b/net/mac80211/status.c
8846 @@ -432,7 +432,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
8847 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
8848 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
8849
8850 - ieee80211_set_bar_pending(sta, tid, ssn);
8851 + if (local->hw.flags &
8852 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
8853 + ieee80211_stop_tx_ba_session(&sta->sta, tid);
8854 + else
8855 + ieee80211_set_bar_pending(sta, tid, ssn);
8856 }
8857 }
8858
8859 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
8860 index cdc7564..10b18b2 100644
8861 --- a/net/sunrpc/clnt.c
8862 +++ b/net/sunrpc/clnt.c
8863 @@ -234,7 +234,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
8864 spin_lock(&sn->rpc_client_lock);
8865 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
8866 if (clnt->cl_program->pipe_dir_name == NULL)
8867 - break;
8868 + continue;
8869 if (rpc_clnt_skip_event(clnt, event))
8870 continue;
8871 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
8872 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
8873 index 80f5dd2..e659def 100644
8874 --- a/net/sunrpc/rpc_pipe.c
8875 +++ b/net/sunrpc/rpc_pipe.c
8876 @@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
8877 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
8878
8879 mutex_lock(&sn->pipefs_sb_lock);
8880 + if (sn->pipefs_sb != sb) {
8881 + mutex_unlock(&sn->pipefs_sb_lock);
8882 + goto out;
8883 + }
8884 sn->pipefs_sb = NULL;
8885 mutex_unlock(&sn->pipefs_sb_lock);
8886 - put_net(net);
8887 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
8888 net, NET_NAME(net));
8889 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
8890 RPC_PIPEFS_UMOUNT,
8891 sb);
8892 + put_net(net);
8893 +out:
8894 kill_litter_super(sb);
8895 }
8896
8897 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
8898 index 6357fcb..7865b44 100644
8899 --- a/net/sunrpc/sched.c
8900 +++ b/net/sunrpc/sched.c
8901 @@ -919,16 +919,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
8902 return task;
8903 }
8904
8905 +/*
8906 + * rpc_free_task - release rpc task and perform cleanups
8907 + *
8908 + * Note that we free up the rpc_task _after_ rpc_release_calldata()
8909 + * in order to work around a workqueue dependency issue.
8910 + *
8911 + * Tejun Heo states:
8912 + * "Workqueue currently considers two work items to be the same if they're
8913 + * on the same address and won't execute them concurrently - ie. it
8914 + * makes a work item which is queued again while being executed wait
8915 + * for the previous execution to complete.
8916 + *
8917 + * If a work function frees the work item, and then waits for an event
8918 + * which should be performed by another work item and *that* work item
8919 + * recycles the freed work item, it can create a false dependency loop.
8920 + * There really is no reliable way to detect this short of verifying
8921 + * every memory free."
8922 + *
8923 + */
8924 static void rpc_free_task(struct rpc_task *task)
8925 {
8926 - const struct rpc_call_ops *tk_ops = task->tk_ops;
8927 - void *calldata = task->tk_calldata;
8928 + unsigned short tk_flags = task->tk_flags;
8929 +
8930 + rpc_release_calldata(task->tk_ops, task->tk_calldata);
8931
8932 - if (task->tk_flags & RPC_TASK_DYNAMIC) {
8933 + if (tk_flags & RPC_TASK_DYNAMIC) {
8934 dprintk("RPC: %5u freeing task\n", task->tk_pid);
8935 mempool_free(task, rpc_task_mempool);
8936 }
8937 - rpc_release_calldata(tk_ops, calldata);
8938 }
8939
8940 static void rpc_async_release(struct work_struct *work)
8941 @@ -938,8 +957,7 @@ static void rpc_async_release(struct work_struct *work)
8942
8943 static void rpc_release_resources_task(struct rpc_task *task)
8944 {
8945 - if (task->tk_rqstp)
8946 - xprt_release(task);
8947 + xprt_release(task);
8948 if (task->tk_msg.rpc_cred) {
8949 put_rpccred(task->tk_msg.rpc_cred);
8950 task->tk_msg.rpc_cred = NULL;
8951 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
8952 index bd462a5..33811db 100644
8953 --- a/net/sunrpc/xprt.c
8954 +++ b/net/sunrpc/xprt.c
8955 @@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
8956 void xprt_release(struct rpc_task *task)
8957 {
8958 struct rpc_xprt *xprt;
8959 - struct rpc_rqst *req;
8960 + struct rpc_rqst *req = task->tk_rqstp;
8961
8962 - if (!(req = task->tk_rqstp))
8963 + if (req == NULL) {
8964 + if (task->tk_client) {
8965 + rcu_read_lock();
8966 + xprt = rcu_dereference(task->tk_client->cl_xprt);
8967 + if (xprt->snd_task == task)
8968 + xprt_release_write(xprt, task);
8969 + rcu_read_unlock();
8970 + }
8971 return;
8972 + }
8973
8974 xprt = req->rq_xprt;
8975 if (task->tk_ops->rpc_count_stats != NULL)
8976 diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
8977 index 48d7c0a..bd3ba88 100644
8978 --- a/sound/arm/pxa2xx-ac97-lib.c
8979 +++ b/sound/arm/pxa2xx-ac97-lib.c
8980 @@ -18,6 +18,7 @@
8981 #include <linux/delay.h>
8982 #include <linux/module.h>
8983 #include <linux/io.h>
8984 +#include <linux/gpio.h>
8985
8986 #include <sound/ac97_codec.h>
8987 #include <sound/pxa2xx-lib.h>
8988 @@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
8989
8990 static inline void pxa_ac97_cold_pxa27x(void)
8991 {
8992 + unsigned int timeout;
8993 +
8994 GCR &= GCR_COLD_RST; /* clear everything but nCRST */
8995 GCR &= ~GCR_COLD_RST; /* then assert nCRST */
8996
8997 @@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
8998 clk_enable(ac97conf_clk);
8999 udelay(5);
9000 clk_disable(ac97conf_clk);
9001 - GCR = GCR_COLD_RST;
9002 - udelay(50);
9003 + GCR = GCR_COLD_RST | GCR_WARM_RST;
9004 + timeout = 100; /* wait for the codec-ready bit to be set */
9005 + while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
9006 + mdelay(1);
9007 }
9008 #endif
9009
9010 @@ -340,8 +345,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
9011 }
9012
9013 if (cpu_is_pxa27x()) {
9014 - /* Use GPIO 113 as AC97 Reset on Bulverde */
9015 + /*
9016 + * This gpio is needed for a work-around to a bug in the ac97
9017 + * controller during warm reset. The direction and level is set
9018 + * here so that it is an output driven high when switching from
9019 + * AC97_nRESET alt function to generic gpio.
9020 + */
9021 + ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
9022 + "pxa27x ac97 reset");
9023 + if (ret < 0) {
9024 + pr_err("%s: gpio_request_one() failed: %d\n",
9025 + __func__, ret);
9026 + goto err_conf;
9027 + }
9028 pxa27x_assert_ac97reset(reset_gpio, 0);
9029 +
9030 ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
9031 if (IS_ERR(ac97conf_clk)) {
9032 ret = PTR_ERR(ac97conf_clk);
9033 @@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
9034
9035 void pxa2xx_ac97_hw_remove(struct platform_device *dev)
9036 {
9037 + if (cpu_is_pxa27x())
9038 + gpio_free(reset_gpio);
9039 GCR |= GCR_ACLINK_OFF;
9040 free_irq(IRQ_AC97, NULL);
9041 if (ac97conf_clk) {
9042 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
9043 index a9652d6..f419f0a9 100644
9044 --- a/sound/pci/hda/hda_intel.c
9045 +++ b/sound/pci/hda/hda_intel.c
9046 @@ -559,9 +559,12 @@ enum {
9047 #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
9048
9049 /* quirks for Intel PCH */
9050 -#define AZX_DCAPS_INTEL_PCH \
9051 +#define AZX_DCAPS_INTEL_PCH_NOPM \
9052 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
9053 - AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
9054 + AZX_DCAPS_COUNT_LPIB_DELAY)
9055 +
9056 +#define AZX_DCAPS_INTEL_PCH \
9057 + (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
9058
9059 /* quirks for ATI SB / AMD Hudson */
9060 #define AZX_DCAPS_PRESET_ATI_SB \
9061 @@ -3448,13 +3451,13 @@ static void __devexit azx_remove(struct pci_dev *pci)
9062 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
9063 /* CPT */
9064 { PCI_DEVICE(0x8086, 0x1c20),
9065 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9066 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9067 /* PBG */
9068 { PCI_DEVICE(0x8086, 0x1d20),
9069 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9070 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9071 /* Panther Point */
9072 { PCI_DEVICE(0x8086, 0x1e20),
9073 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9074 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9075 /* Lynx Point */
9076 { PCI_DEVICE(0x8086, 0x8c20),
9077 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9078 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
9079 index a7b522a..8799cf1 100644
9080 --- a/sound/pci/hda/patch_conexant.c
9081 +++ b/sound/pci/hda/patch_conexant.c
9082 @@ -553,24 +553,12 @@ static int conexant_build_controls(struct hda_codec *codec)
9083 return 0;
9084 }
9085
9086 -#ifdef CONFIG_PM
9087 -static int conexant_suspend(struct hda_codec *codec)
9088 -{
9089 - snd_hda_shutup_pins(codec);
9090 - return 0;
9091 -}
9092 -#endif
9093 -
9094 static const struct hda_codec_ops conexant_patch_ops = {
9095 .build_controls = conexant_build_controls,
9096 .build_pcms = conexant_build_pcms,
9097 .init = conexant_init,
9098 .free = conexant_free,
9099 .set_power_state = conexant_set_power,
9100 -#ifdef CONFIG_PM
9101 - .suspend = conexant_suspend,
9102 -#endif
9103 - .reboot_notify = snd_hda_shutup_pins,
9104 };
9105
9106 #ifdef CONFIG_SND_HDA_INPUT_BEEP
9107 @@ -4393,10 +4381,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
9108 .init = cx_auto_init,
9109 .free = conexant_free,
9110 .unsol_event = snd_hda_jack_unsol_event,
9111 -#ifdef CONFIG_PM
9112 - .suspend = conexant_suspend,
9113 -#endif
9114 - .reboot_notify = snd_hda_shutup_pins,
9115 };
9116
9117 /*
9118 diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
9119 index 054967d..08ae3cb 100644
9120 --- a/sound/soc/codecs/arizona.c
9121 +++ b/sound/soc/codecs/arizona.c
9122 @@ -409,15 +409,9 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9123 case SND_SOC_DAIFMT_DSP_A:
9124 mode = 0;
9125 break;
9126 - case SND_SOC_DAIFMT_DSP_B:
9127 - mode = 1;
9128 - break;
9129 case SND_SOC_DAIFMT_I2S:
9130 mode = 2;
9131 break;
9132 - case SND_SOC_DAIFMT_LEFT_J:
9133 - mode = 3;
9134 - break;
9135 default:
9136 arizona_aif_err(dai, "Unsupported DAI format %d\n",
9137 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9138 @@ -677,7 +671,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
9139 snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
9140 ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
9141 snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
9142 - ARIZONA_AIF1_RATE_MASK, 8);
9143 + ARIZONA_AIF1_RATE_MASK,
9144 + 8 << ARIZONA_AIF1_RATE_SHIFT);
9145 break;
9146 default:
9147 arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
9148 diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
9149 index 36ec649..cff8dfb 100644
9150 --- a/sound/soc/codecs/arizona.h
9151 +++ b/sound/soc/codecs/arizona.h
9152 @@ -32,15 +32,15 @@
9153
9154 #define ARIZONA_FLL_SRC_MCLK1 0
9155 #define ARIZONA_FLL_SRC_MCLK2 1
9156 -#define ARIZONA_FLL_SRC_SLIMCLK 2
9157 -#define ARIZONA_FLL_SRC_FLL1 3
9158 -#define ARIZONA_FLL_SRC_FLL2 4
9159 -#define ARIZONA_FLL_SRC_AIF1BCLK 5
9160 -#define ARIZONA_FLL_SRC_AIF2BCLK 6
9161 -#define ARIZONA_FLL_SRC_AIF3BCLK 7
9162 -#define ARIZONA_FLL_SRC_AIF1LRCLK 8
9163 -#define ARIZONA_FLL_SRC_AIF2LRCLK 9
9164 -#define ARIZONA_FLL_SRC_AIF3LRCLK 10
9165 +#define ARIZONA_FLL_SRC_SLIMCLK 3
9166 +#define ARIZONA_FLL_SRC_FLL1 4
9167 +#define ARIZONA_FLL_SRC_FLL2 5
9168 +#define ARIZONA_FLL_SRC_AIF1BCLK 8
9169 +#define ARIZONA_FLL_SRC_AIF2BCLK 9
9170 +#define ARIZONA_FLL_SRC_AIF3BCLK 10
9171 +#define ARIZONA_FLL_SRC_AIF1LRCLK 12
9172 +#define ARIZONA_FLL_SRC_AIF2LRCLK 13
9173 +#define ARIZONA_FLL_SRC_AIF3LRCLK 14
9174
9175 #define ARIZONA_MIXER_VOL_MASK 0x00FE
9176 #define ARIZONA_MIXER_VOL_SHIFT 1
9177 diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
9178 index 5be42bf..4068f24 100644
9179 --- a/sound/soc/codecs/sigmadsp.c
9180 +++ b/sound/soc/codecs/sigmadsp.c
9181 @@ -225,7 +225,7 @@ EXPORT_SYMBOL(process_sigma_firmware);
9182 static int sigma_action_write_regmap(void *control_data,
9183 const struct sigma_action *sa, size_t len)
9184 {
9185 - return regmap_raw_write(control_data, le16_to_cpu(sa->addr),
9186 + return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
9187 sa->payload, len - 2);
9188 }
9189
9190 diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
9191 index 9e31448..18171ad 100644
9192 --- a/sound/soc/codecs/sta529.c
9193 +++ b/sound/soc/codecs/sta529.c
9194 @@ -74,9 +74,10 @@
9195 SNDRV_PCM_FMTBIT_S32_LE)
9196 #define S2PC_VALUE 0x98
9197 #define CLOCK_OUT 0x60
9198 -#define LEFT_J_DATA_FORMAT 0x10
9199 -#define I2S_DATA_FORMAT 0x12
9200 -#define RIGHT_J_DATA_FORMAT 0x14
9201 +#define DATA_FORMAT_MSK 0x0E
9202 +#define LEFT_J_DATA_FORMAT 0x00
9203 +#define I2S_DATA_FORMAT 0x02
9204 +#define RIGHT_J_DATA_FORMAT 0x04
9205 #define CODEC_MUTE_VAL 0x80
9206
9207 #define POWER_CNTLMSAK 0x40
9208 @@ -289,7 +290,7 @@ static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
9209 return -EINVAL;
9210 }
9211
9212 - snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
9213 + snd_soc_update_bits(codec, STA529_S2PCFG0, DATA_FORMAT_MSK, mode);
9214
9215 return 0;
9216 }
9217 diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
9218 index 683dc43..cdab549 100644
9219 --- a/sound/soc/codecs/wm2000.c
9220 +++ b/sound/soc/codecs/wm2000.c
9221 @@ -209,9 +209,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
9222
9223 ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
9224 if (wm2000->speech_clarity)
9225 - ret &= ~WM2000_SPEECH_CLARITY;
9226 - else
9227 ret |= WM2000_SPEECH_CLARITY;
9228 + else
9229 + ret &= ~WM2000_SPEECH_CLARITY;
9230 wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
9231
9232 wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
9233 diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
9234 index eab64a1..7ef4e96 100644
9235 --- a/sound/soc/codecs/wm2200.c
9236 +++ b/sound/soc/codecs/wm2200.c
9237 @@ -1380,15 +1380,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9238 case SND_SOC_DAIFMT_DSP_A:
9239 fmt_val = 0;
9240 break;
9241 - case SND_SOC_DAIFMT_DSP_B:
9242 - fmt_val = 1;
9243 - break;
9244 case SND_SOC_DAIFMT_I2S:
9245 fmt_val = 2;
9246 break;
9247 - case SND_SOC_DAIFMT_LEFT_J:
9248 - fmt_val = 3;
9249 - break;
9250 default:
9251 dev_err(codec->dev, "Unsupported DAI format %d\n",
9252 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9253 @@ -1440,7 +1434,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9254 WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
9255 lrclk);
9256 snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
9257 - WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
9258 + WM2200_AIF1_FMT_MASK, fmt_val);
9259
9260 return 0;
9261 }
9262 diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
9263 index 7f56758..a351ca0 100644
9264 --- a/sound/soc/codecs/wm5100.c
9265 +++ b/sound/soc/codecs/wm5100.c
9266 @@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9267 case SND_SOC_DAIFMT_DSP_A:
9268 mask = 0;
9269 break;
9270 - case SND_SOC_DAIFMT_DSP_B:
9271 - mask = 1;
9272 - break;
9273 case SND_SOC_DAIFMT_I2S:
9274 mask = 2;
9275 break;
9276 - case SND_SOC_DAIFMT_LEFT_J:
9277 - mask = 3;
9278 - break;
9279 default:
9280 dev_err(codec->dev, "Unsupported DAI format %d\n",
9281 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9282 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
9283 index b2b2b37..1c2b337 100644
9284 --- a/sound/soc/codecs/wm8994.c
9285 +++ b/sound/soc/codecs/wm8994.c
9286 @@ -3839,20 +3839,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
9287 wm8994->hubs.no_cache_dac_hp_direct = true;
9288 wm8994->fll_byp = true;
9289
9290 - switch (control->cust_id) {
9291 - case 0:
9292 - case 2:
9293 - wm8994->hubs.dcs_codes_l = -9;
9294 - wm8994->hubs.dcs_codes_r = -7;
9295 - break;
9296 - case 1:
9297 - case 3:
9298 - wm8994->hubs.dcs_codes_l = -8;
9299 - wm8994->hubs.dcs_codes_r = -7;
9300 - break;
9301 - default:
9302 - break;
9303 - }
9304 + wm8994->hubs.dcs_codes_l = -9;
9305 + wm8994->hubs.dcs_codes_r = -7;
9306
9307 snd_soc_update_bits(codec, WM8994_ANALOGUE_HP_1,
9308 WM1811_HPOUT1_ATTN, WM1811_HPOUT1_ATTN);
9309 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
9310 index ef22d0b..d340644 100644
9311 --- a/sound/soc/soc-pcm.c
9312 +++ b/sound/soc/soc-pcm.c
9313 @@ -1240,6 +1240,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
9314 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
9315 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
9316 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
9317 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
9318 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
9319 continue;
9320
9321 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9322 index be70035..6e8fa7e 100644
9323 --- a/virt/kvm/kvm_main.c
9324 +++ b/virt/kvm/kvm_main.c
9325 @@ -709,8 +709,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
9326 int r;
9327 gfn_t base_gfn;
9328 unsigned long npages;
9329 - unsigned long i;
9330 - struct kvm_memory_slot *memslot;
9331 + struct kvm_memory_slot *memslot, *slot;
9332 struct kvm_memory_slot old, new;
9333 struct kvm_memslots *slots, *old_memslots;
9334
9335 @@ -761,13 +760,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
9336
9337 /* Check for overlaps */
9338 r = -EEXIST;
9339 - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
9340 - struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
9341 -
9342 - if (s == memslot || !s->npages)
9343 + kvm_for_each_memslot(slot, kvm->memslots) {
9344 + if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot)
9345 continue;
9346 - if (!((base_gfn + npages <= s->base_gfn) ||
9347 - (base_gfn >= s->base_gfn + s->npages)))
9348 + if (!((base_gfn + npages <= slot->base_gfn) ||
9349 + (base_gfn >= slot->base_gfn + slot->npages)))
9350 goto out_free;
9351 }
9352