Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.7/0102-3.7.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2039 - (show annotations) (download)
Mon Jan 28 08:11:20 2013 UTC (11 years, 3 months ago) by niro
File size: 329089 byte(s)
linux-3.7.3
1 diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
2 index 4abe83e..03591a7 100644
3 --- a/Documentation/power/runtime_pm.txt
4 +++ b/Documentation/power/runtime_pm.txt
5 @@ -642,12 +642,13 @@ out the following operations:
6 * During system suspend it calls pm_runtime_get_noresume() and
7 pm_runtime_barrier() for every device right before executing the
8 subsystem-level .suspend() callback for it. In addition to that it calls
9 - pm_runtime_disable() for every device right after executing the
10 - subsystem-level .suspend() callback for it.
11 + __pm_runtime_disable() with 'false' as the second argument for every device
12 + right before executing the subsystem-level .suspend_late() callback for it.
13
14 * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
15 - for every device right before and right after executing the subsystem-level
16 - .resume() callback for it, respectively.
17 + for every device right after executing the subsystem-level .resume_early()
18 + callback and right after executing the subsystem-level .resume() callback
19 + for it, respectively.
20
21 7. Generic subsystem callbacks
22
23 @@ -1021,11 +1021,14 @@ clean: rm-dirs := $(CLEAN_DIRS)
24 clean: rm-files := $(CLEAN_FILES)
25 clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
26
27 -PHONY += $(clean-dirs) clean archclean
28 +PHONY += $(clean-dirs) clean archclean vmlinuxclean
29 $(clean-dirs):
30 $(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
31
32 -clean: archclean
33 +vmlinuxclean:
34 + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
35 +
36 +clean: archclean vmlinuxclean
37
38 # mrproper - Delete all generated files, including .config
39 #
40 @@ -1252,7 +1255,6 @@ scripts: ;
41 endif # KBUILD_EXTMOD
42
43 clean: $(clean-dirs)
44 - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
45 $(call cmd,rmdirs)
46 $(call cmd,rmfiles)
47 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
48 diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
49 index 96cd369..09e1790 100644
50 --- a/arch/arm/mach-omap2/board-3430sdp.c
51 +++ b/arch/arm/mach-omap2/board-3430sdp.c
52 @@ -157,6 +157,7 @@ static struct omap_dss_device sdp3430_lcd_device = {
53
54 static struct tfp410_platform_data dvi_panel = {
55 .power_down_gpio = -1,
56 + .i2c_bus_num = -1,
57 };
58
59 static struct omap_dss_device sdp3430_dvi_device = {
60 diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
61 index e162897..f2a920a 100644
62 --- a/arch/arm/mach-omap2/board-am3517evm.c
63 +++ b/arch/arm/mach-omap2/board-am3517evm.c
64 @@ -208,6 +208,7 @@ static struct omap_dss_device am3517_evm_tv_device = {
65
66 static struct tfp410_platform_data dvi_panel = {
67 .power_down_gpio = -1,
68 + .i2c_bus_num = -1,
69 };
70
71 static struct omap_dss_device am3517_evm_dvi_device = {
72 diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
73 index 376d26e..7ed0270 100644
74 --- a/arch/arm/mach-omap2/board-cm-t35.c
75 +++ b/arch/arm/mach-omap2/board-cm-t35.c
76 @@ -243,6 +243,7 @@ static struct omap_dss_device cm_t35_lcd_device = {
77
78 static struct tfp410_platform_data dvi_panel = {
79 .power_down_gpio = CM_T35_DVI_EN_GPIO,
80 + .i2c_bus_num = -1,
81 };
82
83 static struct omap_dss_device cm_t35_dvi_device = {
84 diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
85 index 1fd161e..6f04f0f 100644
86 --- a/arch/arm/mach-omap2/board-devkit8000.c
87 +++ b/arch/arm/mach-omap2/board-devkit8000.c
88 @@ -139,6 +139,7 @@ static struct omap_dss_device devkit8000_lcd_device = {
89
90 static struct tfp410_platform_data dvi_panel = {
91 .power_down_gpio = -1,
92 + .i2c_bus_num = 1,
93 };
94
95 static struct omap_dss_device devkit8000_dvi_device = {
96 diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
97 index b9b776b..5631eb9 100644
98 --- a/arch/arm/mach-omap2/board-omap3evm.c
99 +++ b/arch/arm/mach-omap2/board-omap3evm.c
100 @@ -236,6 +236,7 @@ static struct omap_dss_device omap3_evm_tv_device = {
101
102 static struct tfp410_platform_data dvi_panel = {
103 .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
104 + .i2c_bus_num = -1,
105 };
106
107 static struct omap_dss_device omap3_evm_dvi_device = {
108 diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
109 index 731235e..797be22 100644
110 --- a/arch/arm/mach-omap2/board-omap3stalker.c
111 +++ b/arch/arm/mach-omap2/board-omap3stalker.c
112 @@ -119,6 +119,7 @@ static struct omap_dss_device omap3_stalker_tv_device = {
113
114 static struct tfp410_platform_data dvi_panel = {
115 .power_down_gpio = DSS_ENABLE_GPIO,
116 + .i2c_bus_num = -1,
117 };
118
119 static struct omap_dss_device omap3_stalker_dvi_device = {
120 diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
121 index a611ad3..b6132aa 100644
122 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
123 +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
124 @@ -463,6 +463,9 @@
125 GPIO76_LCD_PCLK, \
126 GPIO77_LCD_BIAS
127
128 +/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
129 +#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
130 +#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
131
132 extern int keypad_set_wake(unsigned int on);
133 #endif /* __ASM_ARCH_MFP_PXA27X_H */
134 diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
135 index 8047ee0..616cb87 100644
136 --- a/arch/arm/mach-pxa/pxa27x.c
137 +++ b/arch/arm/mach-pxa/pxa27x.c
138 @@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
139 EXPORT_SYMBOL(pxa27x_clear_otgph);
140
141 static unsigned long ac97_reset_config[] = {
142 - GPIO113_GPIO,
143 + GPIO113_AC97_nRESET_GPIO_HIGH,
144 GPIO113_AC97_nRESET,
145 - GPIO95_GPIO,
146 + GPIO95_AC97_nRESET_GPIO_HIGH,
147 GPIO95_AC97_nRESET,
148 };
149
150 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
151 index e9a5fd7..69b17a9 100644
152 --- a/arch/mips/kernel/process.c
153 +++ b/arch/mips/kernel/process.c
154 @@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
155 }
156 }
157 #ifdef CONFIG_HOTPLUG_CPU
158 - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
159 - (system_state == SYSTEM_RUNNING ||
160 - system_state == SYSTEM_BOOTING))
161 + if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
162 play_dead();
163 #endif
164 rcu_idle_exit();
165 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
166 index 2833dcb..e7c383b 100644
167 --- a/arch/mips/mm/tlbex.c
168 +++ b/arch/mips/mm/tlbex.c
169 @@ -952,13 +952,6 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
170 #endif
171 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
172 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
173 -
174 - if (cpu_has_mips_r2) {
175 - uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
176 - uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
177 - return;
178 - }
179 -
180 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
181 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
182 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
183 @@ -994,15 +987,6 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
184
185 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
186 {
187 - if (cpu_has_mips_r2) {
188 - /* PTE ptr offset is obtained from BadVAddr */
189 - UASM_i_MFC0(p, tmp, C0_BADVADDR);
190 - UASM_i_LW(p, ptr, 0, ptr);
191 - uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1);
192 - uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1);
193 - return;
194 - }
195 -
196 /*
197 * Bug workaround for the Nevada. It seems as if under certain
198 * circumstances the move from cp0_context might produce a
199 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
200 index 58bddee..9e07bd0 100644
201 --- a/arch/powerpc/kernel/head_64.S
202 +++ b/arch/powerpc/kernel/head_64.S
203 @@ -422,7 +422,7 @@ _STATIC(__after_prom_start)
204 tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
205 #endif
206
207 -#ifdef CONFIG_CRASH_DUMP
208 +#ifdef CONFIG_RELOCATABLE
209 /*
210 * Check if the kernel has to be running as relocatable kernel based on the
211 * variable __run_at_load, if it is set the kernel is treated as relocatable
212 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
213 index ce4cb77..ba48a88 100644
214 --- a/arch/powerpc/kernel/time.c
215 +++ b/arch/powerpc/kernel/time.c
216 @@ -774,13 +774,8 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
217
218 void update_vsyscall_tz(void)
219 {
220 - /* Make userspace gettimeofday spin until we're done. */
221 - ++vdso_data->tb_update_count;
222 - smp_mb();
223 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
224 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
225 - smp_mb();
226 - ++vdso_data->tb_update_count;
227 }
228
229 static void __init clocksource_init(void)
230 diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
231 index c8c6157..c39cd0b 100644
232 --- a/arch/powerpc/kvm/44x_emulate.c
233 +++ b/arch/powerpc/kvm/44x_emulate.c
234 @@ -76,6 +76,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
235 run->dcr.dcrn = dcrn;
236 run->dcr.data = 0;
237 run->dcr.is_write = 0;
238 + vcpu->arch.dcr_is_write = 0;
239 vcpu->arch.io_gpr = rt;
240 vcpu->arch.dcr_needed = 1;
241 kvmppc_account_exit(vcpu, DCR_EXITS);
242 @@ -94,6 +95,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
243 run->dcr.dcrn = dcrn;
244 run->dcr.data = kvmppc_get_gpr(vcpu, rs);
245 run->dcr.is_write = 1;
246 + vcpu->arch.dcr_is_write = 1;
247 vcpu->arch.dcr_needed = 1;
248 kvmppc_account_exit(vcpu, DCR_EXITS);
249 emulated = EMULATE_DO_DCR;
250 diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
251 index ff38b66..ea30a90 100644
252 --- a/arch/powerpc/kvm/e500_tlb.c
253 +++ b/arch/powerpc/kvm/e500_tlb.c
254 @@ -1332,7 +1332,7 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
255 if (!vcpu_e500->gtlb_priv[1])
256 goto err;
257
258 - vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
259 + vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
260 vcpu_e500->gtlb_params[1].entries,
261 GFP_KERNEL);
262 if (!vcpu_e500->g2h_tlb1_map)
263 diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
264 index 969dddc..8f3920e 100644
265 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c
266 +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
267 @@ -57,7 +57,8 @@ static const char * const board[] __initconst = {
268 "amcc,makalu",
269 "apm,klondike",
270 "est,hotfoot",
271 - "plathome,obs600"
272 + "plathome,obs600",
273 + NULL
274 };
275
276 static int __init ppc40x_probe(void)
277 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
278 index 07d8de3..19b6080 100644
279 --- a/arch/s390/kernel/entry64.S
280 +++ b/arch/s390/kernel/entry64.S
281 @@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
282 #endif
283 .endm
284
285 - .macro HANDLE_SIE_INTERCEPT scratch
286 + .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
287 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
288 tmhh %r8,0x0001 # interrupting from user ?
289 jnz .+42
290 lgr \scratch,%r9
291 slg \scratch,BASED(.Lsie_loop)
292 clg \scratch,BASED(.Lsie_length)
293 + .if \pgmcheck
294 + # Some program interrupts are suppressing (e.g. protection).
295 + # We must also check the instruction after SIE in that case.
296 + # do_protection_exception will rewind to rewind_pad
297 + jh .+22
298 + .else
299 jhe .+22
300 + .endif
301 lg %r9,BASED(.Lsie_loop)
302 SPP BASED(.Lhost_id) # set host id
303 #endif
304 @@ -391,7 +398,7 @@ ENTRY(pgm_check_handler)
305 lg %r12,__LC_THREAD_INFO
306 larl %r13,system_call
307 lmg %r8,%r9,__LC_PGM_OLD_PSW
308 - HANDLE_SIE_INTERCEPT %r14
309 + HANDLE_SIE_INTERCEPT %r14,1
310 tmhh %r8,0x0001 # test problem state bit
311 jnz 1f # -> fault in user space
312 tmhh %r8,0x4000 # PER bit set in old PSW ?
313 @@ -467,7 +474,7 @@ ENTRY(io_int_handler)
314 lg %r12,__LC_THREAD_INFO
315 larl %r13,system_call
316 lmg %r8,%r9,__LC_IO_OLD_PSW
317 - HANDLE_SIE_INTERCEPT %r14
318 + HANDLE_SIE_INTERCEPT %r14,0
319 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
320 tmhh %r8,0x0001 # interrupting from user?
321 jz io_skip
322 @@ -613,7 +620,7 @@ ENTRY(ext_int_handler)
323 lg %r12,__LC_THREAD_INFO
324 larl %r13,system_call
325 lmg %r8,%r9,__LC_EXT_OLD_PSW
326 - HANDLE_SIE_INTERCEPT %r14
327 + HANDLE_SIE_INTERCEPT %r14,0
328 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
329 tmhh %r8,0x0001 # interrupting from user ?
330 jz ext_skip
331 @@ -661,7 +668,7 @@ ENTRY(mcck_int_handler)
332 lg %r12,__LC_THREAD_INFO
333 larl %r13,system_call
334 lmg %r8,%r9,__LC_MCK_OLD_PSW
335 - HANDLE_SIE_INTERCEPT %r14
336 + HANDLE_SIE_INTERCEPT %r14,0
337 tm __LC_MCCK_CODE,0x80 # system damage?
338 jo mcck_panic # yes -> rest of mcck code invalid
339 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
340 @@ -960,6 +967,13 @@ ENTRY(sie64a)
341 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
342 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
343 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
344 +# some program checks are suppressing. C code (e.g. do_protection_exception)
345 +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
346 +# instructions in the sie_loop should not cause program interrupts. So
347 +# lets use a nop (47 00 00 00) as a landing pad.
348 +# See also HANDLE_SIE_INTERCEPT
349 +rewind_pad:
350 + nop 0
351 sie_loop:
352 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
353 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
354 @@ -999,6 +1013,7 @@ sie_fault:
355 .Lhost_id:
356 .quad 0
357
358 + EX_TABLE(rewind_pad,sie_fault)
359 EX_TABLE(sie_loop,sie_fault)
360 #endif
361
362 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
363 index ecced9d..38883f0 100644
364 --- a/arch/s390/kvm/kvm-s390.c
365 +++ b/arch/s390/kvm/kvm-s390.c
366 @@ -997,7 +997,7 @@ static int __init kvm_s390_init(void)
367 }
368 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
369 facilities[0] &= 0xff00fff3f47c0000ULL;
370 - facilities[1] &= 0x201c000000000000ULL;
371 + facilities[1] &= 0x001c000000000000ULL;
372 return 0;
373 }
374
375 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
376 index c441834..1b888e8 100644
377 --- a/drivers/acpi/scan.c
378 +++ b/drivers/acpi/scan.c
379 @@ -859,8 +859,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
380 static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
381 {
382 struct acpi_device_id button_device_ids[] = {
383 - {"PNP0C0D", 0},
384 {"PNP0C0C", 0},
385 + {"PNP0C0D", 0},
386 {"PNP0C0E", 0},
387 {"", 0},
388 };
389 @@ -872,6 +872,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
390 /* Power button, Lid switch always enable wakeup */
391 if (!acpi_match_device_ids(device, button_device_ids)) {
392 device->wakeup.flags.run_wake = 1;
393 + if (!acpi_match_device_ids(device, &button_device_ids[1])) {
394 + /* Do not use Lid/sleep button for S5 wakeup */
395 + if (device->wakeup.sleep_state == ACPI_STATE_S5)
396 + device->wakeup.sleep_state = ACPI_STATE_S4;
397 + }
398 device_set_wakeup_capable(&device->dev, true);
399 return;
400 }
401 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
402 index f46fbd3..586362e 100644
403 --- a/drivers/ata/libata-core.c
404 +++ b/drivers/ata/libata-core.c
405 @@ -2560,6 +2560,7 @@ int ata_bus_probe(struct ata_port *ap)
406 * bus as we may be talking too fast.
407 */
408 dev->pio_mode = XFER_PIO_0;
409 + dev->dma_mode = 0xff;
410
411 /* If the controller has a pio mode setup function
412 * then use it to set the chipset to rights. Don't
413 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
414 index e60437c..bf039b0 100644
415 --- a/drivers/ata/libata-eh.c
416 +++ b/drivers/ata/libata-eh.c
417 @@ -2657,6 +2657,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
418 * bus as we may be talking too fast.
419 */
420 dev->pio_mode = XFER_PIO_0;
421 + dev->dma_mode = 0xff;
422
423 /* If the controller has a pio mode setup function
424 * then use it to set the chipset to rights. Don't
425 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
426 index a6df6a3..7c337e7 100644
427 --- a/drivers/ata/libata-scsi.c
428 +++ b/drivers/ata/libata-scsi.c
429 @@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
430 struct ata_port *ap = ata_shost_to_port(sdev->host);
431 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
432
433 - if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
434 + if (atadev && ap->ops->sw_activity_show &&
435 + (ap->flags & ATA_FLAG_SW_ACTIVITY))
436 return ap->ops->sw_activity_show(atadev, buf);
437 return -EINVAL;
438 }
439 @@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
440 enum sw_activity val;
441 int rc;
442
443 - if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
444 + if (atadev && ap->ops->sw_activity_store &&
445 + (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
446 val = simple_strtoul(buf, NULL, 0);
447 switch (val) {
448 case OFF: case BLINK_ON: case BLINK_OFF:
449 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
450 index 489c817..fb0dd87 100644
451 --- a/drivers/ata/sata_promise.c
452 +++ b/drivers/ata/sata_promise.c
453 @@ -147,6 +147,10 @@ struct pdc_port_priv {
454 dma_addr_t pkt_dma;
455 };
456
457 +struct pdc_host_priv {
458 + spinlock_t hard_reset_lock;
459 +};
460 +
461 static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
462 static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
463 static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
464 @@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
465 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
466 void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
467 unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
468 + struct pdc_host_priv *hpriv = ap->host->private_data;
469 u8 tmp;
470
471 - spin_lock(&ap->host->lock);
472 + spin_lock(&hpriv->hard_reset_lock);
473
474 tmp = readb(pcictl_b1_mmio);
475 tmp &= ~(0x10 << ata_no);
476 @@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
477 writeb(tmp, pcictl_b1_mmio);
478 readb(pcictl_b1_mmio); /* flush */
479
480 - spin_unlock(&ap->host->lock);
481 + spin_unlock(&hpriv->hard_reset_lock);
482 }
483
484 static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
485 @@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
486 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
487 const struct ata_port_info *ppi[PDC_MAX_PORTS];
488 struct ata_host *host;
489 + struct pdc_host_priv *hpriv;
490 void __iomem *host_mmio;
491 int n_ports, i, rc;
492 int is_sataii_tx4;
493 @@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
494 dev_err(&pdev->dev, "failed to allocate host\n");
495 return -ENOMEM;
496 }
497 + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
498 + if (!hpriv)
499 + return -ENOMEM;
500 + spin_lock_init(&hpriv->hard_reset_lock);
501 + host->private_data = hpriv;
502 host->iomap = pcim_iomap_table(pdev);
503
504 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
505 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
506 index a3c1404..2b7f77d 100644
507 --- a/drivers/base/power/main.c
508 +++ b/drivers/base/power/main.c
509 @@ -513,6 +513,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
510
511 Out:
512 TRACE_RESUME(error);
513 +
514 + pm_runtime_enable(dev);
515 return error;
516 }
517
518 @@ -589,8 +591,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
519 if (!dev->power.is_suspended)
520 goto Unlock;
521
522 - pm_runtime_enable(dev);
523 -
524 if (dev->pm_domain) {
525 info = "power domain ";
526 callback = pm_op(&dev->pm_domain->ops, state);
527 @@ -930,6 +930,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
528 pm_callback_t callback = NULL;
529 char *info = NULL;
530
531 + __pm_runtime_disable(dev, false);
532 +
533 if (dev->power.syscore)
534 return 0;
535
536 @@ -1133,11 +1135,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
537
538 Complete:
539 complete_all(&dev->power.completion);
540 -
541 if (error)
542 async_error = error;
543 - else if (dev->power.is_suspended)
544 - __pm_runtime_disable(dev, false);
545
546 return error;
547 }
548 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
549 index bb1ff17..c394041 100644
550 --- a/drivers/base/regmap/regmap-debugfs.c
551 +++ b/drivers/base/regmap/regmap-debugfs.c
552 @@ -90,7 +90,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
553 /* If we're in the region the user is trying to read */
554 if (p >= *ppos) {
555 /* ...but not beyond it */
556 - if (buf_pos >= count - 1 - tot_len)
557 + if (buf_pos + 1 + tot_len >= count)
558 break;
559
560 /* Format the register */
561 diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
562 index cc65b45..b4e83b8 100644
563 --- a/drivers/bcma/driver_mips.c
564 +++ b/drivers/bcma/driver_mips.c
565 @@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
566 bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
567 ~(1 << irqflag));
568 else
569 - bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
570 + bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
571
572 /* assign the new one */
573 if (irq == 0) {
574 diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
575 index 00dfc50..047baf0 100644
576 --- a/drivers/block/aoe/aoeblk.c
577 +++ b/drivers/block/aoe/aoeblk.c
578 @@ -231,18 +231,12 @@ aoeblk_gdalloc(void *vp)
579 if (q == NULL) {
580 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
581 d->aoemajor, d->aoeminor);
582 - mempool_destroy(mp);
583 - goto err_disk;
584 + goto err_mempool;
585 }
586
587 - d->blkq = blk_alloc_queue(GFP_KERNEL);
588 - if (!d->blkq)
589 - goto err_mempool;
590 - d->blkq->backing_dev_info.name = "aoe";
591 - if (bdi_init(&d->blkq->backing_dev_info))
592 - goto err_blkq;
593 spin_lock_irqsave(&d->lock, flags);
594 - blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS);
595 + blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
596 + q->backing_dev_info.name = "aoe";
597 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
598 d->bufpool = mp;
599 d->blkq = gd->queue = q;
600 @@ -265,11 +259,8 @@ aoeblk_gdalloc(void *vp)
601 aoedisk_add_sysfs(d);
602 return;
603
604 -err_blkq:
605 - blk_cleanup_queue(d->blkq);
606 - d->blkq = NULL;
607 err_mempool:
608 - mempool_destroy(d->bufpool);
609 + mempool_destroy(mp);
610 err_disk:
611 put_disk(gd);
612 err:
613 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
614 index bb3d9be..67de124 100644
615 --- a/drivers/block/rbd.c
616 +++ b/drivers/block/rbd.c
617 @@ -61,7 +61,10 @@
618
619 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
620
621 -#define RBD_MAX_SNAP_NAME_LEN 32
622 +#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
623 +#define RBD_MAX_SNAP_NAME_LEN \
624 + (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
625 +
626 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
627 #define RBD_MAX_OPT_LEN 1024
628
629 @@ -204,6 +207,7 @@ struct rbd_device {
630
631 /* sysfs related */
632 struct device dev;
633 + unsigned long open_count;
634 };
635
636 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
637 @@ -218,7 +222,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
638 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
639
640 static void rbd_dev_release(struct device *dev);
641 -static void __rbd_remove_snap_dev(struct rbd_snap *snap);
642 +static void rbd_remove_snap_dev(struct rbd_snap *snap);
643
644 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
645 size_t count);
646 @@ -277,8 +281,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
647 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
648 return -EROFS;
649
650 + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
651 rbd_get_dev(rbd_dev);
652 set_device_ro(bdev, rbd_dev->mapping.read_only);
653 + rbd_dev->open_count++;
654 + mutex_unlock(&ctl_mutex);
655
656 return 0;
657 }
658 @@ -287,7 +294,11 @@ static int rbd_release(struct gendisk *disk, fmode_t mode)
659 {
660 struct rbd_device *rbd_dev = disk->private_data;
661
662 + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
663 + rbd_assert(rbd_dev->open_count > 0);
664 + rbd_dev->open_count--;
665 rbd_put_dev(rbd_dev);
666 + mutex_unlock(&ctl_mutex);
667
668 return 0;
669 }
670 @@ -388,7 +399,7 @@ enum {
671 static match_table_t rbd_opts_tokens = {
672 /* int args above */
673 /* string args above */
674 - {Opt_read_only, "mapping.read_only"},
675 + {Opt_read_only, "read_only"},
676 {Opt_read_only, "ro"}, /* Alternate spelling */
677 {Opt_read_write, "read_write"},
678 {Opt_read_write, "rw"}, /* Alternate spelling */
679 @@ -695,13 +706,13 @@ static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
680 u64 segment;
681 int ret;
682
683 - name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
684 + name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
685 if (!name)
686 return NULL;
687 segment = offset >> rbd_dev->header.obj_order;
688 - ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
689 + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
690 rbd_dev->header.object_prefix, segment);
691 - if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
692 + if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
693 pr_err("error formatting segment name for #%llu (%d)\n",
694 segment, ret);
695 kfree(name);
696 @@ -1707,13 +1718,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
697 return ret;
698 }
699
700 -static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
701 +static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
702 {
703 struct rbd_snap *snap;
704 struct rbd_snap *next;
705
706 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
707 - __rbd_remove_snap_dev(snap);
708 + rbd_remove_snap_dev(snap);
709 }
710
711 /*
712 @@ -2057,7 +2068,7 @@ static bool rbd_snap_registered(struct rbd_snap *snap)
713 return ret;
714 }
715
716 -static void __rbd_remove_snap_dev(struct rbd_snap *snap)
717 +static void rbd_remove_snap_dev(struct rbd_snap *snap)
718 {
719 list_del(&snap->node);
720 if (device_is_registered(&snap->dev))
721 @@ -2073,7 +2084,7 @@ static int rbd_register_snap_dev(struct rbd_snap *snap,
722 dev->type = &rbd_snap_device_type;
723 dev->parent = parent;
724 dev->release = rbd_snap_dev_release;
725 - dev_set_name(dev, "snap_%s", snap->name);
726 + dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
727 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
728
729 ret = device_register(dev);
730 @@ -2189,6 +2200,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
731 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
732 if (ret < 0)
733 goto out;
734 + ret = 0; /* rbd_req_sync_exec() can return positive */
735
736 p = reply_buf;
737 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
738 @@ -2438,7 +2450,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
739
740 if (rbd_dev->mapping.snap_id == snap->id)
741 rbd_dev->mapping.snap_exists = false;
742 - __rbd_remove_snap_dev(snap);
743 + rbd_remove_snap_dev(snap);
744 dout("%ssnap id %llu has been removed\n",
745 rbd_dev->mapping.snap_id == snap->id ?
746 "mapped " : "",
747 @@ -2621,8 +2633,8 @@ static void rbd_dev_id_put(struct rbd_device *rbd_dev)
748 struct rbd_device *rbd_dev;
749
750 rbd_dev = list_entry(tmp, struct rbd_device, node);
751 - if (rbd_id > max_id)
752 - max_id = rbd_id;
753 + if (rbd_dev->dev_id > max_id)
754 + max_id = rbd_dev->dev_id;
755 }
756 spin_unlock(&rbd_dev_list_lock);
757
758 @@ -2765,8 +2777,13 @@ static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
759 if (!rbd_dev->image_name)
760 goto out_err;
761
762 - /* Snapshot name is optional */
763 + /* Snapshot name is optional; default is to use "head" */
764 +
765 len = next_token(&buf);
766 + if (len > RBD_MAX_SNAP_NAME_LEN) {
767 + err_ptr = ERR_PTR(-ENAMETOOLONG);
768 + goto out_err;
769 + }
770 if (!len) {
771 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
772 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
773 @@ -2777,8 +2794,6 @@ static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
774 memcpy(snap_name, buf, len);
775 *(snap_name + len) = '\0';
776
777 -dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
778 -
779 return snap_name;
780
781 out_err:
782 @@ -2841,6 +2856,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev)
783 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
784 if (ret < 0)
785 goto out;
786 + ret = 0; /* rbd_req_sync_exec() can return positive */
787
788 p = response;
789 rbd_dev->image_id = ceph_extract_encoded_string(&p,
790 @@ -3045,11 +3061,11 @@ static ssize_t rbd_add(struct bus_type *bus,
791 /* no need to lock here, as rbd_dev is not registered yet */
792 rc = rbd_dev_snaps_update(rbd_dev);
793 if (rc)
794 - goto err_out_header;
795 + goto err_out_probe;
796
797 rc = rbd_dev_set_mapping(rbd_dev, snap_name);
798 if (rc)
799 - goto err_out_header;
800 + goto err_out_snaps;
801
802 /* generate unique id: find highest unique id, add one */
803 rbd_dev_id_get(rbd_dev);
804 @@ -3113,7 +3129,9 @@ err_out_blkdev:
805 unregister_blkdev(rbd_dev->major, rbd_dev->name);
806 err_out_id:
807 rbd_dev_id_put(rbd_dev);
808 -err_out_header:
809 +err_out_snaps:
810 + rbd_remove_all_snaps(rbd_dev);
811 +err_out_probe:
812 rbd_header_free(&rbd_dev->header);
813 err_out_client:
814 kfree(rbd_dev->header_name);
815 @@ -3211,7 +3229,12 @@ static ssize_t rbd_remove(struct bus_type *bus,
816 goto done;
817 }
818
819 - __rbd_remove_all_snaps(rbd_dev);
820 + if (rbd_dev->open_count) {
821 + ret = -EBUSY;
822 + goto done;
823 + }
824 +
825 + rbd_remove_all_snaps(rbd_dev);
826 rbd_bus_del_dev(rbd_dev);
827
828 done:
829 diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
830 index cbe77fa..49d77cb 100644
831 --- a/drivers/block/rbd_types.h
832 +++ b/drivers/block/rbd_types.h
833 @@ -46,8 +46,6 @@
834 #define RBD_MIN_OBJ_ORDER 16
835 #define RBD_MAX_OBJ_ORDER 30
836
837 -#define RBD_MAX_SEG_NAME_LEN 128
838 -
839 #define RBD_COMP_NONE 0
840 #define RBD_CRYPT_NONE 0
841
842 diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
843 index 3265844..2a297f8 100644
844 --- a/drivers/cpuidle/coupled.c
845 +++ b/drivers/cpuidle/coupled.c
846 @@ -209,7 +209,7 @@ inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
847 int all;
848 int ret;
849
850 - all = coupled->online_count || (coupled->online_count << WAITING_BITS);
851 + all = coupled->online_count | (coupled->online_count << WAITING_BITS);
852 ret = atomic_add_unless(&coupled->ready_waiting_counts,
853 -MAX_WAITING_CPUS, all);
854
855 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
856 index ed0bc07..fe4fa1c 100644
857 --- a/drivers/edac/edac_mc_sysfs.c
858 +++ b/drivers/edac/edac_mc_sysfs.c
859 @@ -1145,7 +1145,7 @@ int __init edac_mc_sysfs_init(void)
860
861 void __exit edac_mc_sysfs_exit(void)
862 {
863 - put_device(mci_pdev);
864 device_del(mci_pdev);
865 + put_device(mci_pdev);
866 edac_put_sysfs_subsys();
867 }
868 diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
869 index 08c6749..638e1f7 100644
870 --- a/drivers/firewire/net.c
871 +++ b/drivers/firewire/net.c
872 @@ -861,8 +861,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
873 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
874 buf_ptr += 2;
875 length -= IEEE1394_GASP_HDR_SIZE;
876 - fwnet_incoming_packet(dev, buf_ptr, length,
877 - source_node_id, -1, true);
878 + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
879 + context->card->generation, true);
880 }
881
882 packet.payload_length = dev->rcv_buffer_size;
883 @@ -958,7 +958,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
884 break;
885 }
886
887 - skb_pull(skb, ptask->max_payload);
888 + if (ptask->dest_node == IEEE1394_ALL_NODES) {
889 + skb_pull(skb,
890 + ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
891 + } else {
892 + skb_pull(skb, ptask->max_payload);
893 + }
894 if (ptask->outstanding_pkts > 1) {
895 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
896 dg_size, fg_off, datagram_label);
897 @@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
898 smp_rmb();
899 node_id = dev->card->node_id;
900
901 - p = skb_push(ptask->skb, 8);
902 + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
903 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
904 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
905 | RFC2734_SW_VERSION, &p[4]);
906 diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
907 index 0761a03..665553c 100644
908 --- a/drivers/gpu/drm/drm_mm.c
909 +++ b/drivers/gpu/drm/drm_mm.c
910 @@ -213,11 +213,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
911
912 BUG_ON(!hole_node->hole_follows || node->allocated);
913
914 - if (mm->color_adjust)
915 - mm->color_adjust(hole_node, color, &adj_start, &adj_end);
916 -
917 if (adj_start < start)
918 adj_start = start;
919 + if (adj_end > end)
920 + adj_end = end;
921 +
922 + if (mm->color_adjust)
923 + mm->color_adjust(hole_node, color, &adj_start, &adj_end);
924
925 if (alignment) {
926 unsigned tmp = adj_start % alignment;
927 @@ -489,7 +491,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
928 mm->scan_size = size;
929 mm->scanned_blocks = 0;
930 mm->scan_hit_start = 0;
931 - mm->scan_hit_size = 0;
932 + mm->scan_hit_end = 0;
933 mm->scan_check_range = 0;
934 mm->prev_scanned_node = NULL;
935 }
936 @@ -516,7 +518,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
937 mm->scan_size = size;
938 mm->scanned_blocks = 0;
939 mm->scan_hit_start = 0;
940 - mm->scan_hit_size = 0;
941 + mm->scan_hit_end = 0;
942 mm->scan_start = start;
943 mm->scan_end = end;
944 mm->scan_check_range = 1;
945 @@ -535,8 +537,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
946 struct drm_mm *mm = node->mm;
947 struct drm_mm_node *prev_node;
948 unsigned long hole_start, hole_end;
949 - unsigned long adj_start;
950 - unsigned long adj_end;
951 + unsigned long adj_start, adj_end;
952
953 mm->scanned_blocks++;
954
955 @@ -553,14 +554,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
956 node->node_list.next = &mm->prev_scanned_node->node_list;
957 mm->prev_scanned_node = node;
958
959 - hole_start = drm_mm_hole_node_start(prev_node);
960 - hole_end = drm_mm_hole_node_end(prev_node);
961 -
962 - adj_start = hole_start;
963 - adj_end = hole_end;
964 -
965 - if (mm->color_adjust)
966 - mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
967 + adj_start = hole_start = drm_mm_hole_node_start(prev_node);
968 + adj_end = hole_end = drm_mm_hole_node_end(prev_node);
969
970 if (mm->scan_check_range) {
971 if (adj_start < mm->scan_start)
972 @@ -569,11 +564,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
973 adj_end = mm->scan_end;
974 }
975
976 + if (mm->color_adjust)
977 + mm->color_adjust(prev_node, mm->scan_color,
978 + &adj_start, &adj_end);
979 +
980 if (check_free_hole(adj_start, adj_end,
981 mm->scan_size, mm->scan_alignment)) {
982 mm->scan_hit_start = hole_start;
983 - mm->scan_hit_size = hole_end;
984 -
985 + mm->scan_hit_end = hole_end;
986 return 1;
987 }
988
989 @@ -609,19 +607,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
990 node_list);
991
992 prev_node->hole_follows = node->scanned_preceeds_hole;
993 - INIT_LIST_HEAD(&node->node_list);
994 list_add(&node->node_list, &prev_node->node_list);
995
996 - /* Only need to check for containement because start&size for the
997 - * complete resulting free block (not just the desired part) is
998 - * stored. */
999 - if (node->start >= mm->scan_hit_start &&
1000 - node->start + node->size
1001 - <= mm->scan_hit_start + mm->scan_hit_size) {
1002 - return 1;
1003 - }
1004 -
1005 - return 0;
1006 + return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
1007 + node->start < mm->scan_hit_end);
1008 }
1009 EXPORT_SYMBOL(drm_mm_scan_remove_block);
1010
1011 diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1012 index fae1f2e..f2b2f01 100644
1013 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1014 +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
1015 @@ -210,7 +210,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
1016
1017 /* is it from our device? */
1018 if (obj->dev == drm_dev) {
1019 + /*
1020 + * Importing dmabuf exported from out own gem increases
1021 + * refcount on gem itself instead of f_count of dmabuf.
1022 + */
1023 drm_gem_object_reference(obj);
1024 + dma_buf_put(dma_buf);
1025 return obj;
1026 }
1027 }
1028 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
1029 index dde8b50..da21b11 100644
1030 --- a/drivers/gpu/drm/i915/i915_debugfs.c
1031 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
1032 @@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
1033 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
1034 pipe, plane);
1035 } else {
1036 - if (!work->pending) {
1037 + if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
1038 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
1039 pipe, plane);
1040 } else {
1041 @@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
1042 seq_printf(m, "Stall check enabled, ");
1043 else
1044 seq_printf(m, "Stall check waiting for page flip ioctl, ");
1045 - seq_printf(m, "%d prepares\n", work->pending);
1046 + seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
1047
1048 if (work->old_fb_obj) {
1049 struct drm_i915_gem_object *obj = work->old_fb_obj;
1050 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
1051 index 6770ee6..1f20ead 100644
1052 --- a/drivers/gpu/drm/i915/i915_drv.c
1053 +++ b/drivers/gpu/drm/i915/i915_drv.c
1054 @@ -552,7 +552,7 @@ static int i915_drm_thaw(struct drm_device *dev)
1055 mutex_unlock(&dev->struct_mutex);
1056
1057 intel_modeset_init_hw(dev);
1058 - intel_modeset_setup_hw_state(dev);
1059 + intel_modeset_setup_hw_state(dev, false);
1060 drm_mode_config_reset(dev);
1061 drm_irq_install(dev);
1062 }
1063 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1064 index f511fa2..92f1750 100644
1065 --- a/drivers/gpu/drm/i915/i915_drv.h
1066 +++ b/drivers/gpu/drm/i915/i915_drv.h
1067 @@ -1595,7 +1595,8 @@ extern void intel_modeset_init(struct drm_device *dev);
1068 extern void intel_modeset_gem_init(struct drm_device *dev);
1069 extern void intel_modeset_cleanup(struct drm_device *dev);
1070 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1071 -extern void intel_modeset_setup_hw_state(struct drm_device *dev);
1072 +extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1073 + bool force_restore);
1074 extern bool intel_fbc_enabled(struct drm_device *dev);
1075 extern void intel_disable_fbc(struct drm_device *dev);
1076 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1077 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1078 index 9b285da..fe3a778 100644
1079 --- a/drivers/gpu/drm/i915/i915_gem.c
1080 +++ b/drivers/gpu/drm/i915/i915_gem.c
1081 @@ -1718,7 +1718,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1082 }
1083
1084 static long
1085 -i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1086 +__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1087 + bool purgeable_only)
1088 {
1089 struct drm_i915_gem_object *obj, *next;
1090 long count = 0;
1091 @@ -1726,7 +1727,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1092 list_for_each_entry_safe(obj, next,
1093 &dev_priv->mm.unbound_list,
1094 gtt_list) {
1095 - if (i915_gem_object_is_purgeable(obj) &&
1096 + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1097 i915_gem_object_put_pages(obj) == 0) {
1098 count += obj->base.size >> PAGE_SHIFT;
1099 if (count >= target)
1100 @@ -1737,7 +1738,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1101 list_for_each_entry_safe(obj, next,
1102 &dev_priv->mm.inactive_list,
1103 mm_list) {
1104 - if (i915_gem_object_is_purgeable(obj) &&
1105 + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1106 i915_gem_object_unbind(obj) == 0 &&
1107 i915_gem_object_put_pages(obj) == 0) {
1108 count += obj->base.size >> PAGE_SHIFT;
1109 @@ -1749,6 +1750,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1110 return count;
1111 }
1112
1113 +static long
1114 +i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1115 +{
1116 + return __i915_gem_shrink(dev_priv, target, true);
1117 +}
1118 +
1119 static void
1120 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1121 {
1122 @@ -3511,14 +3518,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1123 goto out;
1124 }
1125
1126 - obj->user_pin_count++;
1127 - obj->pin_filp = file;
1128 - if (obj->user_pin_count == 1) {
1129 + if (obj->user_pin_count == 0) {
1130 ret = i915_gem_object_pin(obj, args->alignment, true, false);
1131 if (ret)
1132 goto out;
1133 }
1134
1135 + obj->user_pin_count++;
1136 + obj->pin_filp = file;
1137 +
1138 /* XXX - flush the CPU caches for pinned objects
1139 * as the X server doesn't manage domains yet
1140 */
1141 @@ -4425,6 +4433,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
1142 if (nr_to_scan) {
1143 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
1144 if (nr_to_scan > 0)
1145 + nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
1146 + false);
1147 + if (nr_to_scan > 0)
1148 i915_gem_shrink_all(dev_priv);
1149 }
1150
1151 @@ -4432,7 +4443,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
1152 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
1153 if (obj->pages_pin_count == 0)
1154 cnt += obj->base.size >> PAGE_SHIFT;
1155 - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1156 + list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
1157 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
1158 cnt += obj->base.size >> PAGE_SHIFT;
1159
1160 diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1161 index 773ef77..abeaafe 100644
1162 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1163 +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
1164 @@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1165 {
1166 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
1167
1168 - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
1169 + return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
1170 }
1171
1172 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
1173 @@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1174 obj = dma_buf->priv;
1175 /* is it from our device? */
1176 if (obj->base.dev == dev) {
1177 + /*
1178 + * Importing dmabuf exported from out own gem increases
1179 + * refcount on gem itself instead of f_count of dmabuf.
1180 + */
1181 drm_gem_object_reference(&obj->base);
1182 + dma_buf_put(dma_buf);
1183 return &obj->base;
1184 }
1185 }
1186 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
1187 index 32e1bda..dc29ace 100644
1188 --- a/drivers/gpu/drm/i915/i915_irq.c
1189 +++ b/drivers/gpu/drm/i915/i915_irq.c
1190 @@ -1464,7 +1464,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1191 spin_lock_irqsave(&dev->event_lock, flags);
1192 work = intel_crtc->unpin_work;
1193
1194 - if (work == NULL || work->pending || !work->enable_stall_check) {
1195 + if (work == NULL ||
1196 + atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1197 + !work->enable_stall_check) {
1198 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1199 spin_unlock_irqrestore(&dev->event_lock, flags);
1200 return;
1201 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1202 index a4162dd..09ae4b0 100644
1203 --- a/drivers/gpu/drm/i915/i915_reg.h
1204 +++ b/drivers/gpu/drm/i915/i915_reg.h
1205 @@ -3315,6 +3315,8 @@
1206 #define _PFA_CTL_1 0x68080
1207 #define _PFB_CTL_1 0x68880
1208 #define PF_ENABLE (1<<31)
1209 +#define PF_PIPE_SEL_MASK_IVB (3<<29)
1210 +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
1211 #define PF_FILTER_MASK (3<<23)
1212 #define PF_FILTER_PROGRAMMED (0<<23)
1213 #define PF_FILTER_MED_3x3 (1<<23)
1214 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1215 index b426d44..4d3c7c6 100644
1216 --- a/drivers/gpu/drm/i915/intel_display.c
1217 +++ b/drivers/gpu/drm/i915/intel_display.c
1218 @@ -2302,18 +2302,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
1219 FDI_FE_ERRC_ENABLE);
1220 }
1221
1222 -static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
1223 -{
1224 - struct drm_i915_private *dev_priv = dev->dev_private;
1225 - u32 flags = I915_READ(SOUTH_CHICKEN1);
1226 -
1227 - flags |= FDI_PHASE_SYNC_OVR(pipe);
1228 - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
1229 - flags |= FDI_PHASE_SYNC_EN(pipe);
1230 - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
1231 - POSTING_READ(SOUTH_CHICKEN1);
1232 -}
1233 -
1234 /* The FDI link training functions for ILK/Ibexpeak. */
1235 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1236 {
1237 @@ -2464,9 +2452,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1238 POSTING_READ(reg);
1239 udelay(150);
1240
1241 - if (HAS_PCH_CPT(dev))
1242 - cpt_phase_pointer_enable(dev, pipe);
1243 -
1244 for (i = 0; i < 4; i++) {
1245 reg = FDI_TX_CTL(pipe);
1246 temp = I915_READ(reg);
1247 @@ -2593,9 +2578,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
1248 POSTING_READ(reg);
1249 udelay(150);
1250
1251 - if (HAS_PCH_CPT(dev))
1252 - cpt_phase_pointer_enable(dev, pipe);
1253 -
1254 for (i = 0; i < 4; i++) {
1255 reg = FDI_TX_CTL(pipe);
1256 temp = I915_READ(reg);
1257 @@ -2737,17 +2719,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
1258 udelay(100);
1259 }
1260
1261 -static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
1262 -{
1263 - struct drm_i915_private *dev_priv = dev->dev_private;
1264 - u32 flags = I915_READ(SOUTH_CHICKEN1);
1265 -
1266 - flags &= ~(FDI_PHASE_SYNC_EN(pipe));
1267 - I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
1268 - flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
1269 - I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
1270 - POSTING_READ(SOUTH_CHICKEN1);
1271 -}
1272 static void ironlake_fdi_disable(struct drm_crtc *crtc)
1273 {
1274 struct drm_device *dev = crtc->dev;
1275 @@ -2777,8 +2748,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
1276 I915_WRITE(FDI_RX_CHICKEN(pipe),
1277 I915_READ(FDI_RX_CHICKEN(pipe) &
1278 ~FDI_RX_PHASE_SYNC_POINTER_EN));
1279 - } else if (HAS_PCH_CPT(dev)) {
1280 - cpt_phase_pointer_disable(dev, pipe);
1281 }
1282
1283 /* still set train pattern 1 */
1284 @@ -3225,7 +3194,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
1285 * as some pre-programmed values are broken,
1286 * e.g. x201.
1287 */
1288 - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
1289 + if (IS_IVYBRIDGE(dev))
1290 + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
1291 + PF_PIPE_SEL_IVB(pipe));
1292 + else
1293 + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
1294 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
1295 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
1296 }
1297 @@ -6183,14 +6156,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
1298 {
1299 struct intel_unpin_work *work =
1300 container_of(__work, struct intel_unpin_work, work);
1301 + struct drm_device *dev = work->crtc->dev;
1302
1303 - mutex_lock(&work->dev->struct_mutex);
1304 + mutex_lock(&dev->struct_mutex);
1305 intel_unpin_fb_obj(work->old_fb_obj);
1306 drm_gem_object_unreference(&work->pending_flip_obj->base);
1307 drm_gem_object_unreference(&work->old_fb_obj->base);
1308
1309 - intel_update_fbc(work->dev);
1310 - mutex_unlock(&work->dev->struct_mutex);
1311 + intel_update_fbc(dev);
1312 + mutex_unlock(&dev->struct_mutex);
1313 +
1314 + BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
1315 + atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
1316 +
1317 kfree(work);
1318 }
1319
1320 @@ -6211,11 +6189,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
1321
1322 spin_lock_irqsave(&dev->event_lock, flags);
1323 work = intel_crtc->unpin_work;
1324 - if (work == NULL || !work->pending) {
1325 +
1326 + /* Ensure we don't miss a work->pending update ... */
1327 + smp_rmb();
1328 +
1329 + if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
1330 spin_unlock_irqrestore(&dev->event_lock, flags);
1331 return;
1332 }
1333
1334 + /* and that the unpin work is consistent wrt ->pending. */
1335 + smp_rmb();
1336 +
1337 intel_crtc->unpin_work = NULL;
1338
1339 if (work->event) {
1340 @@ -6238,9 +6223,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
1341
1342 atomic_clear_mask(1 << intel_crtc->plane,
1343 &obj->pending_flip.counter);
1344 -
1345 wake_up(&dev_priv->pending_flip_queue);
1346 - schedule_work(&work->work);
1347 +
1348 + queue_work(dev_priv->wq, &work->work);
1349
1350 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
1351 }
1352 @@ -6268,16 +6253,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
1353 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
1354 unsigned long flags;
1355
1356 + /* NB: An MMIO update of the plane base pointer will also
1357 + * generate a page-flip completion irq, i.e. every modeset
1358 + * is also accompanied by a spurious intel_prepare_page_flip().
1359 + */
1360 spin_lock_irqsave(&dev->event_lock, flags);
1361 - if (intel_crtc->unpin_work) {
1362 - if ((++intel_crtc->unpin_work->pending) > 1)
1363 - DRM_ERROR("Prepared flip multiple times\n");
1364 - } else {
1365 - DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
1366 - }
1367 + if (intel_crtc->unpin_work)
1368 + atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
1369 spin_unlock_irqrestore(&dev->event_lock, flags);
1370 }
1371
1372 +inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
1373 +{
1374 + /* Ensure that the work item is consistent when activating it ... */
1375 + smp_wmb();
1376 + atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
1377 + /* and that it is marked active as soon as the irq could fire. */
1378 + smp_wmb();
1379 +}
1380 +
1381 static int intel_gen2_queue_flip(struct drm_device *dev,
1382 struct drm_crtc *crtc,
1383 struct drm_framebuffer *fb,
1384 @@ -6311,6 +6305,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
1385 intel_ring_emit(ring, fb->pitches[0]);
1386 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1387 intel_ring_emit(ring, 0); /* aux display base address, unused */
1388 +
1389 + intel_mark_page_flip_active(intel_crtc);
1390 intel_ring_advance(ring);
1391 return 0;
1392
1393 @@ -6351,6 +6347,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
1394 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1395 intel_ring_emit(ring, MI_NOOP);
1396
1397 + intel_mark_page_flip_active(intel_crtc);
1398 intel_ring_advance(ring);
1399 return 0;
1400
1401 @@ -6397,6 +6394,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
1402 pf = 0;
1403 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
1404 intel_ring_emit(ring, pf | pipesrc);
1405 +
1406 + intel_mark_page_flip_active(intel_crtc);
1407 intel_ring_advance(ring);
1408 return 0;
1409
1410 @@ -6439,6 +6438,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
1411 pf = 0;
1412 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
1413 intel_ring_emit(ring, pf | pipesrc);
1414 +
1415 + intel_mark_page_flip_active(intel_crtc);
1416 intel_ring_advance(ring);
1417 return 0;
1418
1419 @@ -6493,6 +6494,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
1420 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
1421 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
1422 intel_ring_emit(ring, (MI_NOOP));
1423 +
1424 + intel_mark_page_flip_active(intel_crtc);
1425 intel_ring_advance(ring);
1426 return 0;
1427
1428 @@ -6541,7 +6544,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1429 return -ENOMEM;
1430
1431 work->event = event;
1432 - work->dev = crtc->dev;
1433 + work->crtc = crtc;
1434 intel_fb = to_intel_framebuffer(crtc->fb);
1435 work->old_fb_obj = intel_fb->obj;
1436 INIT_WORK(&work->work, intel_unpin_work_fn);
1437 @@ -6566,6 +6569,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1438 intel_fb = to_intel_framebuffer(fb);
1439 obj = intel_fb->obj;
1440
1441 + if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
1442 + flush_workqueue(dev_priv->wq);
1443 +
1444 ret = i915_mutex_lock_interruptible(dev);
1445 if (ret)
1446 goto cleanup;
1447 @@ -6584,6 +6590,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1448 * the flip occurs and the object is no longer visible.
1449 */
1450 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
1451 + atomic_inc(&intel_crtc->unpin_work_count);
1452
1453 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
1454 if (ret)
1455 @@ -6598,6 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1456 return 0;
1457
1458 cleanup_pending:
1459 + atomic_dec(&intel_crtc->unpin_work_count);
1460 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
1461 drm_gem_object_unreference(&work->old_fb_obj->base);
1462 drm_gem_object_unreference(&obj->base);
1463 @@ -7259,10 +7267,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
1464 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
1465 config->mode_changed = true;
1466 }
1467 -
1468 - /* Disable all disconnected encoders. */
1469 - if (connector->base.status == connector_status_disconnected)
1470 - connector->new_encoder = NULL;
1471 }
1472 /* connector->new_encoder is now updated for all connectors. */
1473
1474 @@ -8244,9 +8248,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
1475 * the crtc fixup. */
1476 }
1477
1478 +static void i915_redisable_vga(struct drm_device *dev)
1479 +{
1480 + struct drm_i915_private *dev_priv = dev->dev_private;
1481 + u32 vga_reg;
1482 +
1483 + if (HAS_PCH_SPLIT(dev))
1484 + vga_reg = CPU_VGACNTRL;
1485 + else
1486 + vga_reg = VGACNTRL;
1487 +
1488 + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
1489 + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
1490 + I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1491 + POSTING_READ(vga_reg);
1492 + }
1493 +}
1494 +
1495 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
1496 * and i915 state tracking structures. */
1497 -void intel_modeset_setup_hw_state(struct drm_device *dev)
1498 +void intel_modeset_setup_hw_state(struct drm_device *dev,
1499 + bool force_restore)
1500 {
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1502 enum pipe pipe;
1503 @@ -8317,7 +8339,17 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
1504 intel_sanitize_crtc(crtc);
1505 }
1506
1507 - intel_modeset_update_staged_output_state(dev);
1508 + if (force_restore) {
1509 + for_each_pipe(pipe) {
1510 + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1511 + intel_set_mode(&crtc->base, &crtc->base.mode,
1512 + crtc->base.x, crtc->base.y, crtc->base.fb);
1513 + }
1514 +
1515 + i915_redisable_vga(dev);
1516 + } else {
1517 + intel_modeset_update_staged_output_state(dev);
1518 + }
1519
1520 intel_modeset_check_state(dev);
1521 }
1522 @@ -8328,7 +8360,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
1523
1524 intel_setup_overlay(dev);
1525
1526 - intel_modeset_setup_hw_state(dev);
1527 + intel_modeset_setup_hw_state(dev, false);
1528 }
1529
1530 void intel_modeset_cleanup(struct drm_device *dev)
1531 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1532 index fe71425..016e375 100644
1533 --- a/drivers/gpu/drm/i915/intel_drv.h
1534 +++ b/drivers/gpu/drm/i915/intel_drv.h
1535 @@ -198,6 +198,8 @@ struct intel_crtc {
1536 struct intel_unpin_work *unpin_work;
1537 int fdi_lanes;
1538
1539 + atomic_t unpin_work_count;
1540 +
1541 /* Display surface base address adjustement for pageflips. Note that on
1542 * gen4+ this only adjusts up to a tile, offsets within a tile are
1543 * handled in the hw itself (with the TILEOFF register). */
1544 @@ -380,11 +382,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
1545
1546 struct intel_unpin_work {
1547 struct work_struct work;
1548 - struct drm_device *dev;
1549 + struct drm_crtc *crtc;
1550 struct drm_i915_gem_object *old_fb_obj;
1551 struct drm_i915_gem_object *pending_flip_obj;
1552 struct drm_pending_vblank_event *event;
1553 - int pending;
1554 + atomic_t pending;
1555 +#define INTEL_FLIP_INACTIVE 0
1556 +#define INTEL_FLIP_PENDING 1
1557 +#define INTEL_FLIP_COMPLETE 2
1558 bool enable_stall_check;
1559 };
1560
1561 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1562 index edba93b..d4d9a6f 100644
1563 --- a/drivers/gpu/drm/i915/intel_lvds.c
1564 +++ b/drivers/gpu/drm/i915/intel_lvds.c
1565 @@ -526,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
1566 dev_priv->modeset_on_lid = 0;
1567
1568 mutex_lock(&dev->mode_config.mutex);
1569 - intel_modeset_check_state(dev);
1570 + intel_modeset_setup_hw_state(dev, true);
1571 mutex_unlock(&dev->mode_config.mutex);
1572
1573 return NOTIFY_OK;
1574 @@ -763,14 +763,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
1575 },
1576 {
1577 .callback = intel_no_lvds_dmi_callback,
1578 - .ident = "ZOTAC ZBOXSD-ID12/ID13",
1579 - .matches = {
1580 - DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
1581 - DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
1582 - },
1583 - },
1584 - {
1585 - .callback = intel_no_lvds_dmi_callback,
1586 .ident = "Gigabyte GA-D525TUD",
1587 .matches = {
1588 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
1589 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1590 index 442968f..eaaff3c 100644
1591 --- a/drivers/gpu/drm/i915/intel_pm.c
1592 +++ b/drivers/gpu/drm/i915/intel_pm.c
1593 @@ -44,6 +44,14 @@
1594 * i915.i915_enable_fbc parameter
1595 */
1596
1597 +static bool intel_crtc_active(struct drm_crtc *crtc)
1598 +{
1599 + /* Be paranoid as we can arrive here with only partial
1600 + * state retrieved from the hardware during setup.
1601 + */
1602 + return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
1603 +}
1604 +
1605 static void i8xx_disable_fbc(struct drm_device *dev)
1606 {
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1608 @@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
1609 * - going to an unsupported config (interlace, pixel multiply, etc.)
1610 */
1611 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1612 - if (tmp_crtc->enabled &&
1613 - !to_intel_crtc(tmp_crtc)->primary_disabled &&
1614 - tmp_crtc->fb) {
1615 + if (intel_crtc_active(tmp_crtc) &&
1616 + !to_intel_crtc(tmp_crtc)->primary_disabled) {
1617 if (crtc) {
1618 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1619 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1620 @@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1621 struct drm_crtc *crtc, *enabled = NULL;
1622
1623 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1624 - if (crtc->enabled && crtc->fb) {
1625 + if (intel_crtc_active(crtc)) {
1626 if (enabled)
1627 return NULL;
1628 enabled = crtc;
1629 @@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1630 int entries, tlb_miss;
1631
1632 crtc = intel_get_crtc_for_plane(dev, plane);
1633 - if (crtc->fb == NULL || !crtc->enabled) {
1634 + if (!intel_crtc_active(crtc)) {
1635 *cursor_wm = cursor->guard_size;
1636 *plane_wm = display->guard_size;
1637 return false;
1638 @@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1639 int entries;
1640
1641 crtc = intel_get_crtc_for_plane(dev, plane);
1642 - if (crtc->fb == NULL || !crtc->enabled)
1643 + if (!intel_crtc_active(crtc))
1644 return false;
1645
1646 clock = crtc->mode.clock; /* VESA DOT Clock */
1647 @@ -1478,7 +1485,7 @@ static void i9xx_update_wm(struct drm_device *dev)
1648
1649 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1650 crtc = intel_get_crtc_for_plane(dev, 1);
1651 - if (crtc->enabled && crtc->fb) {
1652 + if (intel_crtc_active(crtc)) {
1653 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1654 wm_info, fifo_size,
1655 crtc->fb->bits_per_pixel / 8,
1656 @@ -1923,7 +1930,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1657 int entries, tlb_miss;
1658
1659 crtc = intel_get_crtc_for_plane(dev, plane);
1660 - if (crtc->fb == NULL || !crtc->enabled) {
1661 + if (!intel_crtc_active(crtc)) {
1662 *sprite_wm = display->guard_size;
1663 return false;
1664 }
1665 diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1666 index c345097..b2f3d4d 100644
1667 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1668 +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
1669 @@ -38,6 +38,8 @@ enum nvbios_pll_type {
1670 PLL_UNK42 = 0x42,
1671 PLL_VPLL0 = 0x80,
1672 PLL_VPLL1 = 0x81,
1673 + PLL_VPLL2 = 0x82,
1674 + PLL_VPLL3 = 0x83,
1675 PLL_MAX = 0xff
1676 };
1677
1678 diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1679 index f6962c9..7c96262 100644
1680 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1681 +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
1682 @@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
1683 switch (info.type) {
1684 case PLL_VPLL0:
1685 case PLL_VPLL1:
1686 + case PLL_VPLL2:
1687 + case PLL_VPLL3:
1688 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
1689 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
1690 nv_wr32(priv, info.reg + 0x10, fN << 16);
1691 diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1692 index 9f59f2b..73bedff 100644
1693 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1694 +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
1695 @@ -86,14 +86,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
1696 mem->memtype = type;
1697 mem->size = size;
1698
1699 - mutex_lock(&mm->mutex);
1700 + mutex_lock(&pfb->base.mutex);
1701 do {
1702 if (back)
1703 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
1704 else
1705 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
1706 if (ret) {
1707 - mutex_unlock(&mm->mutex);
1708 + mutex_unlock(&pfb->base.mutex);
1709 pfb->ram.put(pfb, &mem);
1710 return ret;
1711 }
1712 @@ -101,7 +101,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
1713 list_add_tail(&r->rl_entry, &mem->regions);
1714 size -= r->length;
1715 } while (size);
1716 - mutex_unlock(&mm->mutex);
1717 + mutex_unlock(&pfb->base.mutex);
1718
1719 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
1720 mem->offset = (u64)r->offset << 12;
1721 diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1722 index 1188227..6565f3d 100644
1723 --- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1724 +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
1725 @@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
1726 if (ret)
1727 return ret;
1728
1729 + mutex_lock(&imem->base.mutex);
1730 list_add(&iobj->head, &imem->list);
1731 + mutex_unlock(&imem->base.mutex);
1732 return 0;
1733 }
1734
1735 void
1736 nouveau_instobj_destroy(struct nouveau_instobj *iobj)
1737 {
1738 - if (iobj->head.prev)
1739 - list_del(&iobj->head);
1740 + struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
1741 +
1742 + mutex_lock(&subdev->mutex);
1743 + list_del(&iobj->head);
1744 + mutex_unlock(&subdev->mutex);
1745 +
1746 return nouveau_object_destroy(&iobj->base);
1747 }
1748
1749 @@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
1750 if (ret)
1751 return ret;
1752
1753 + mutex_lock(&imem->base.mutex);
1754 +
1755 list_for_each_entry(iobj, &imem->list, head) {
1756 if (iobj->suspend) {
1757 for (i = 0; i < iobj->size; i += 4)
1758 @@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
1759 }
1760 }
1761
1762 + mutex_unlock(&imem->base.mutex);
1763 +
1764 return 0;
1765 }
1766
1767 @@ -104,17 +114,26 @@ int
1768 nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
1769 {
1770 struct nouveau_instobj *iobj;
1771 - int i;
1772 + int i, ret = 0;
1773
1774 if (suspend) {
1775 + mutex_lock(&imem->base.mutex);
1776 +
1777 list_for_each_entry(iobj, &imem->list, head) {
1778 iobj->suspend = vmalloc(iobj->size);
1779 - if (iobj->suspend) {
1780 - for (i = 0; i < iobj->size; i += 4)
1781 - iobj->suspend[i / 4] = nv_ro32(iobj, i);
1782 - } else
1783 - return -ENOMEM;
1784 + if (!iobj->suspend) {
1785 + ret = -ENOMEM;
1786 + break;
1787 + }
1788 +
1789 + for (i = 0; i < iobj->size; i += 4)
1790 + iobj->suspend[i / 4] = nv_ro32(iobj, i);
1791 }
1792 +
1793 + mutex_unlock(&imem->base.mutex);
1794 +
1795 + if (ret)
1796 + return ret;
1797 }
1798
1799 return nouveau_subdev_fini(&imem->base, suspend);
1800 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
1801 index 35ac57f..5f0e7ef 100644
1802 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
1803 +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
1804 @@ -1279,7 +1279,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1805 if (drm->agp.stat == ENABLED) {
1806 mem->bus.offset = mem->start << PAGE_SHIFT;
1807 mem->bus.base = drm->agp.base;
1808 - mem->bus.is_iomem = true;
1809 + mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1810 }
1811 #endif
1812 break;
1813 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
1814 index bedafd1..cdb83ac 100644
1815 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h
1816 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
1817 @@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
1818 void nv10_fence_context_del(struct nouveau_channel *);
1819 void nv10_fence_destroy(struct nouveau_drm *);
1820 int nv10_fence_create(struct nouveau_drm *);
1821 +void nv17_fence_resume(struct nouveau_drm *drm);
1822
1823 int nv50_fence_create(struct nouveau_drm *);
1824 int nv84_fence_create(struct nouveau_drm *);
1825 diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
1826 index 366462c..4f604cd 100644
1827 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
1828 +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
1829 @@ -197,6 +197,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
1830 if (nvbo->gem) {
1831 if (nvbo->gem->dev == dev) {
1832 drm_gem_object_reference(nvbo->gem);
1833 + dma_buf_put(dma_buf);
1834 return nvbo->gem;
1835 }
1836 }
1837 diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
1838 index 184cdf8..39ffc07 100644
1839 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c
1840 +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
1841 @@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
1842
1843 static inline bool is_powersaving_dpms(int mode)
1844 {
1845 - return (mode != DRM_MODE_DPMS_ON);
1846 + return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
1847 }
1848
1849 static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
1850 diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
1851 index ce752bf..0b34d23 100644
1852 --- a/drivers/gpu/drm/nouveau/nv10_fence.c
1853 +++ b/drivers/gpu/drm/nouveau/nv10_fence.c
1854 @@ -160,6 +160,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
1855 kfree(priv);
1856 }
1857
1858 +void nv17_fence_resume(struct nouveau_drm *drm)
1859 +{
1860 + struct nv10_fence_priv *priv = drm->fence;
1861 +
1862 + nouveau_bo_wr32(priv->bo, 0, priv->sequence);
1863 +}
1864 +
1865 int
1866 nv10_fence_create(struct nouveau_drm *drm)
1867 {
1868 @@ -192,6 +199,7 @@ nv10_fence_create(struct nouveau_drm *drm)
1869 if (ret == 0) {
1870 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
1871 priv->base.sync = nv17_fence_sync;
1872 + priv->base.resume = nv17_fence_resume;
1873 }
1874 }
1875
1876 diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
1877 index e0763ea..ecd22f5 100644
1878 --- a/drivers/gpu/drm/nouveau/nv50_fence.c
1879 +++ b/drivers/gpu/drm/nouveau/nv50_fence.c
1880 @@ -119,6 +119,7 @@ nv50_fence_create(struct nouveau_drm *drm)
1881 if (ret == 0) {
1882 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
1883 priv->base.sync = nv17_fence_sync;
1884 + priv->base.resume = nv17_fence_resume;
1885 }
1886
1887 if (ret)
1888 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1889 index 24d932f..9175615 100644
1890 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1891 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1892 @@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1893 /* use frac fb div on APUs */
1894 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1895 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1896 + if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
1897 + radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1898 } else {
1899 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
1900
1901 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1902 index 010bae1..4552d4a 100644
1903 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1904 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1905 @@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
1906 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
1907 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
1908 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1909 - radeon_dp_set_link_config(connector, mode);
1910 + radeon_dp_set_link_config(connector, adjusted_mode);
1911 }
1912
1913 return true;
1914 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1915 index 219942c..18a5382 100644
1916 --- a/drivers/gpu/drm/radeon/evergreen.c
1917 +++ b/drivers/gpu/drm/radeon/evergreen.c
1918 @@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1919 case CHIP_SUMO:
1920 rdev->config.evergreen.num_ses = 1;
1921 rdev->config.evergreen.max_pipes = 4;
1922 - rdev->config.evergreen.max_tile_pipes = 2;
1923 + rdev->config.evergreen.max_tile_pipes = 4;
1924 if (rdev->pdev->device == 0x9648)
1925 rdev->config.evergreen.max_simds = 3;
1926 else if ((rdev->pdev->device == 0x9647) ||
1927 @@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1928 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1929 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1930 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1931 - gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1932 + gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
1933 break;
1934 case CHIP_SUMO2:
1935 rdev->config.evergreen.num_ses = 1;
1936 @@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1937 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1938 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1939 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1940 - gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1941 + gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
1942 break;
1943 case CHIP_BARTS:
1944 rdev->config.evergreen.num_ses = 2;
1945 @@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1946 break;
1947 case CHIP_CAICOS:
1948 rdev->config.evergreen.num_ses = 1;
1949 - rdev->config.evergreen.max_pipes = 4;
1950 + rdev->config.evergreen.max_pipes = 2;
1951 rdev->config.evergreen.max_tile_pipes = 2;
1952 rdev->config.evergreen.max_simds = 2;
1953 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1954 @@ -3093,6 +3093,16 @@ restart_ih:
1955 break;
1956 }
1957 break;
1958 + case 146:
1959 + case 147:
1960 + dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
1961 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1962 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
1963 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1964 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
1965 + /* reset addr and status */
1966 + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
1967 + break;
1968 case 176: /* CP_INT in ring buffer */
1969 case 177: /* CP_INT in IB1 */
1970 case 178: /* CP_INT in IB2 */
1971 diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
1972 index c042e49..69ffae2 100644
1973 --- a/drivers/gpu/drm/radeon/evergreen_cs.c
1974 +++ b/drivers/gpu/drm/radeon/evergreen_cs.c
1975 @@ -2724,6 +2724,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
1976
1977 /* check config regs */
1978 switch (reg) {
1979 + case WAIT_UNTIL:
1980 case GRBM_GFX_INDEX:
1981 case CP_STRMOUT_CNTL:
1982 case CP_COHER_CNTL:
1983 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
1984 index 2bc0f6a..442732f 100644
1985 --- a/drivers/gpu/drm/radeon/evergreend.h
1986 +++ b/drivers/gpu/drm/radeon/evergreend.h
1987 @@ -45,6 +45,8 @@
1988 #define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
1989 #define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
1990 #define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
1991 +#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
1992 +#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
1993
1994 /* Registers */
1995
1996 @@ -651,6 +653,7 @@
1997 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
1998 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
1999 #define VM_CONTEXT1_CNTL 0x1414
2000 +#define VM_CONTEXT1_CNTL2 0x1434
2001 #define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
2002 #define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
2003 #define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
2004 @@ -672,6 +675,8 @@
2005 #define CACHE_UPDATE_MODE(x) ((x) << 6)
2006 #define VM_L2_STATUS 0x140C
2007 #define L2_BUSY (1 << 0)
2008 +#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
2009 +#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
2010
2011 #define WAIT_UNTIL 0x8040
2012
2013 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
2014 index 81e6a56..30c18a6 100644
2015 --- a/drivers/gpu/drm/radeon/ni.c
2016 +++ b/drivers/gpu/drm/radeon/ni.c
2017 @@ -784,10 +784,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
2018 /* enable context1-7 */
2019 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2020 (u32)(rdev->dummy_page.addr >> 12));
2021 - WREG32(VM_CONTEXT1_CNTL2, 0);
2022 - WREG32(VM_CONTEXT1_CNTL, 0);
2023 + WREG32(VM_CONTEXT1_CNTL2, 4);
2024 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2025 - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2026 + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2027 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2028 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2029 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2030 + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2031 + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2032 + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2033 + VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2034 + READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2035 + READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2036 + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2037 + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2038
2039 cayman_pcie_gart_tlb_flush(rdev);
2040 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2041 diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
2042 index cbef681..f5e54a7 100644
2043 --- a/drivers/gpu/drm/radeon/nid.h
2044 +++ b/drivers/gpu/drm/radeon/nid.h
2045 @@ -80,7 +80,18 @@
2046 #define VM_CONTEXT0_CNTL 0x1410
2047 #define ENABLE_CONTEXT (1 << 0)
2048 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
2049 +#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
2050 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
2051 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
2052 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
2053 +#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
2054 +#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
2055 +#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
2056 +#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
2057 +#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
2058 +#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
2059 +#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
2060 +#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
2061 #define VM_CONTEXT1_CNTL 0x1414
2062 #define VM_CONTEXT0_CNTL2 0x1430
2063 #define VM_CONTEXT1_CNTL2 0x1434
2064 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2065 index 8c42d54..b3f1459 100644
2066 --- a/drivers/gpu/drm/radeon/radeon.h
2067 +++ b/drivers/gpu/drm/radeon/radeon.h
2068 @@ -220,12 +220,13 @@ struct radeon_fence {
2069 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
2070 int radeon_fence_driver_init(struct radeon_device *rdev);
2071 void radeon_fence_driver_fini(struct radeon_device *rdev);
2072 +void radeon_fence_driver_force_completion(struct radeon_device *rdev);
2073 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
2074 void radeon_fence_process(struct radeon_device *rdev, int ring);
2075 bool radeon_fence_signaled(struct radeon_fence *fence);
2076 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
2077 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
2078 -void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
2079 +int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
2080 int radeon_fence_wait_any(struct radeon_device *rdev,
2081 struct radeon_fence **fences,
2082 bool intr);
2083 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
2084 index 45b660b..ced9a81 100644
2085 --- a/drivers/gpu/drm/radeon/radeon_combios.c
2086 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
2087 @@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2088 of_machine_is_compatible("PowerBook6,7")) {
2089 /* ibook */
2090 rdev->mode_info.connector_table = CT_IBOOK;
2091 + } else if (of_machine_is_compatible("PowerMac3,5")) {
2092 + /* PowerMac G4 Silver radeon 7500 */
2093 + rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
2094 } else if (of_machine_is_compatible("PowerMac4,4")) {
2095 /* emac */
2096 rdev->mode_info.connector_table = CT_EMAC;
2097 @@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2098 CONNECTOR_OBJECT_ID_SVIDEO,
2099 &hpd);
2100 break;
2101 + case CT_MAC_G4_SILVER:
2102 + DRM_INFO("Connector Table: %d (mac g4 silver)\n",
2103 + rdev->mode_info.connector_table);
2104 + /* DVI-I - tv dac, int tmds */
2105 + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2106 + hpd.hpd = RADEON_HPD_1; /* ??? */
2107 + radeon_add_legacy_encoder(dev,
2108 + radeon_get_encoder_enum(dev,
2109 + ATOM_DEVICE_DFP1_SUPPORT,
2110 + 0),
2111 + ATOM_DEVICE_DFP1_SUPPORT);
2112 + radeon_add_legacy_encoder(dev,
2113 + radeon_get_encoder_enum(dev,
2114 + ATOM_DEVICE_CRT2_SUPPORT,
2115 + 2),
2116 + ATOM_DEVICE_CRT2_SUPPORT);
2117 + radeon_add_legacy_connector(dev, 0,
2118 + ATOM_DEVICE_DFP1_SUPPORT |
2119 + ATOM_DEVICE_CRT2_SUPPORT,
2120 + DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2121 + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2122 + &hpd);
2123 + /* VGA - primary dac */
2124 + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2125 + hpd.hpd = RADEON_HPD_NONE;
2126 + radeon_add_legacy_encoder(dev,
2127 + radeon_get_encoder_enum(dev,
2128 + ATOM_DEVICE_CRT1_SUPPORT,
2129 + 1),
2130 + ATOM_DEVICE_CRT1_SUPPORT);
2131 + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
2132 + DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2133 + CONNECTOR_OBJECT_ID_VGA,
2134 + &hpd);
2135 + /* TV - TV DAC */
2136 + ddc_i2c.valid = false;
2137 + hpd.hpd = RADEON_HPD_NONE;
2138 + radeon_add_legacy_encoder(dev,
2139 + radeon_get_encoder_enum(dev,
2140 + ATOM_DEVICE_TV1_SUPPORT,
2141 + 2),
2142 + ATOM_DEVICE_TV1_SUPPORT);
2143 + radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
2144 + DRM_MODE_CONNECTOR_SVIDEO,
2145 + &ddc_i2c,
2146 + CONNECTOR_OBJECT_ID_SVIDEO,
2147 + &hpd);
2148 + break;
2149 default:
2150 DRM_INFO("Connector table: %d (invalid)\n",
2151 rdev->mode_info.connector_table);
2152 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2153 index b884c36..810268b 100644
2154 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2155 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2156 @@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2157 ret = connector_status_disconnected;
2158
2159 if (radeon_connector->ddc_bus)
2160 - dret = radeon_ddc_probe(radeon_connector);
2161 + dret = radeon_ddc_probe(radeon_connector, false);
2162 if (dret) {
2163 radeon_connector->detected_by_load = false;
2164 if (radeon_connector->edid) {
2165 @@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2166 return connector->status;
2167
2168 if (radeon_connector->ddc_bus)
2169 - dret = radeon_ddc_probe(radeon_connector);
2170 + dret = radeon_ddc_probe(radeon_connector, false);
2171 if (dret) {
2172 radeon_connector->detected_by_load = false;
2173 if (radeon_connector->edid) {
2174 @@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2175 if (encoder) {
2176 /* setup ddc on the bridge */
2177 radeon_atom_ext_encoder_setup_ddc(encoder);
2178 - if (radeon_ddc_probe(radeon_connector)) /* try DDC */
2179 + /* bridge chips are always aux */
2180 + if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
2181 ret = connector_status_connected;
2182 else if (radeon_connector->dac_load_detect) { /* try load detection */
2183 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
2184 @@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2185 if (radeon_dp_getdpcd(radeon_connector))
2186 ret = connector_status_connected;
2187 } else {
2188 - if (radeon_ddc_probe(radeon_connector))
2189 + /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
2190 + if (radeon_ddc_probe(radeon_connector, false))
2191 ret = connector_status_connected;
2192 }
2193 }
2194 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2195 index e2f5f88..ad4c973 100644
2196 --- a/drivers/gpu/drm/radeon/radeon_device.c
2197 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2198 @@ -1163,6 +1163,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2199 struct drm_crtc *crtc;
2200 struct drm_connector *connector;
2201 int i, r;
2202 + bool force_completion = false;
2203
2204 if (dev == NULL || dev->dev_private == NULL) {
2205 return -ENODEV;
2206 @@ -1205,8 +1206,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2207
2208 mutex_lock(&rdev->ring_lock);
2209 /* wait for gpu to finish processing current batch */
2210 - for (i = 0; i < RADEON_NUM_RINGS; i++)
2211 - radeon_fence_wait_empty_locked(rdev, i);
2212 + for (i = 0; i < RADEON_NUM_RINGS; i++) {
2213 + r = radeon_fence_wait_empty_locked(rdev, i);
2214 + if (r) {
2215 + /* delay GPU reset to resume */
2216 + force_completion = true;
2217 + }
2218 + }
2219 + if (force_completion) {
2220 + radeon_fence_driver_force_completion(rdev);
2221 + }
2222 mutex_unlock(&rdev->ring_lock);
2223
2224 radeon_save_bios_scratch_regs(rdev);
2225 @@ -1337,7 +1346,6 @@ retry:
2226 }
2227
2228 radeon_restore_bios_scratch_regs(rdev);
2229 - drm_helper_resume_force_mode(rdev->ddev);
2230
2231 if (!r) {
2232 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
2233 @@ -1357,11 +1365,14 @@ retry:
2234 }
2235 }
2236 } else {
2237 + radeon_fence_driver_force_completion(rdev);
2238 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
2239 kfree(ring_data[i]);
2240 }
2241 }
2242
2243 + drm_helper_resume_force_mode(rdev->ddev);
2244 +
2245 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
2246 if (r) {
2247 /* bad news, how to tell it to userspace ? */
2248 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
2249 index bfa2a60..2bddddd 100644
2250 --- a/drivers/gpu/drm/radeon/radeon_display.c
2251 +++ b/drivers/gpu/drm/radeon/radeon_display.c
2252 @@ -695,10 +695,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
2253 if (radeon_connector->router.ddc_valid)
2254 radeon_router_select_ddc_port(radeon_connector);
2255
2256 - if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
2257 - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
2258 - (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
2259 - ENCODER_OBJECT_ID_NONE)) {
2260 + if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
2261 + ENCODER_OBJECT_ID_NONE) {
2262 + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2263 +
2264 + if (dig->dp_i2c_bus)
2265 + radeon_connector->edid = drm_get_edid(&radeon_connector->base,
2266 + &dig->dp_i2c_bus->adapter);
2267 + } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
2268 + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
2269 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2270
2271 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
2272 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
2273 index 22bd6c2..28c09b6 100644
2274 --- a/drivers/gpu/drm/radeon/radeon_fence.c
2275 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
2276 @@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
2277 * Returns 0 if the fences have passed, error for all other cases.
2278 * Caller must hold ring lock.
2279 */
2280 -void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
2281 +int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
2282 {
2283 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
2284 + int r;
2285
2286 - while(1) {
2287 - int r;
2288 - r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
2289 + r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
2290 + if (r) {
2291 if (r == -EDEADLK) {
2292 - mutex_unlock(&rdev->ring_lock);
2293 - r = radeon_gpu_reset(rdev);
2294 - mutex_lock(&rdev->ring_lock);
2295 - if (!r)
2296 - continue;
2297 - }
2298 - if (r) {
2299 - dev_err(rdev->dev, "error waiting for ring to become"
2300 - " idle (%d)\n", r);
2301 + return -EDEADLK;
2302 }
2303 - return;
2304 + dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
2305 + ring, r);
2306 }
2307 + return 0;
2308 }
2309
2310 /**
2311 @@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
2312 */
2313 void radeon_fence_driver_fini(struct radeon_device *rdev)
2314 {
2315 - int ring;
2316 + int ring, r;
2317
2318 mutex_lock(&rdev->ring_lock);
2319 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
2320 if (!rdev->fence_drv[ring].initialized)
2321 continue;
2322 - radeon_fence_wait_empty_locked(rdev, ring);
2323 + r = radeon_fence_wait_empty_locked(rdev, ring);
2324 + if (r) {
2325 + /* no need to trigger GPU reset as we are unloading */
2326 + radeon_fence_driver_force_completion(rdev);
2327 + }
2328 wake_up_all(&rdev->fence_queue);
2329 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
2330 rdev->fence_drv[ring].initialized = false;
2331 @@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
2332 mutex_unlock(&rdev->ring_lock);
2333 }
2334
2335 +/**
2336 + * radeon_fence_driver_force_completion - force all fence waiter to complete
2337 + *
2338 + * @rdev: radeon device pointer
2339 + *
2340 + * In case of GPU reset failure make sure no process keep waiting on fence
2341 + * that will never complete.
2342 + */
2343 +void radeon_fence_driver_force_completion(struct radeon_device *rdev)
2344 +{
2345 + int ring;
2346 +
2347 + for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
2348 + if (!rdev->fence_drv[ring].initialized)
2349 + continue;
2350 + radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
2351 + }
2352 +}
2353 +
2354
2355 /*
2356 * Fence debugfs
2357 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
2358 index c5bddd6..fc60b74 100644
2359 --- a/drivers/gpu/drm/radeon/radeon_i2c.c
2360 +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
2361 @@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
2362 * radeon_ddc_probe
2363 *
2364 */
2365 -bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
2366 +bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
2367 {
2368 u8 out = 0x0;
2369 u8 buf[8];
2370 @@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
2371 if (radeon_connector->router.ddc_valid)
2372 radeon_router_select_ddc_port(radeon_connector);
2373
2374 - ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
2375 + if (use_aux) {
2376 + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
2377 + ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
2378 + } else {
2379 + ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
2380 + }
2381 +
2382 if (ret != 2)
2383 /* Couldn't find an accessible DDC on this connector */
2384 return false;
2385 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2386 index f5ba224..62cd512 100644
2387 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2388 +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
2389 @@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
2390 enum drm_connector_status found = connector_status_disconnected;
2391 bool color = true;
2392
2393 + /* just don't bother on RN50 those chip are often connected to remoting
2394 + * console hw and often we get failure to load detect those. So to make
2395 + * everyone happy report the encoder as always connected.
2396 + */
2397 + if (ASIC_IS_RN50(rdev)) {
2398 + return connector_status_connected;
2399 + }
2400 +
2401 /* save the regs we need */
2402 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
2403 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
2404 diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
2405 index 92c5f47..a9c3f06 100644
2406 --- a/drivers/gpu/drm/radeon/radeon_mode.h
2407 +++ b/drivers/gpu/drm/radeon/radeon_mode.h
2408 @@ -209,7 +209,8 @@ enum radeon_connector_table {
2409 CT_RN50_POWER,
2410 CT_MAC_X800,
2411 CT_MAC_G5_9600,
2412 - CT_SAM440EP
2413 + CT_SAM440EP,
2414 + CT_MAC_G4_SILVER
2415 };
2416
2417 enum radeon_dvo_chip {
2418 @@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
2419 u8 val);
2420 extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
2421 extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
2422 -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
2423 +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
2424 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
2425
2426 extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
2427 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
2428 index aa14dbb..0bfa656 100644
2429 --- a/drivers/gpu/drm/radeon/radeon_pm.c
2430 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
2431 @@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
2432
2433 static void radeon_pm_set_clocks(struct radeon_device *rdev)
2434 {
2435 - int i;
2436 + int i, r;
2437
2438 /* no need to take locks, etc. if nothing's going to change */
2439 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
2440 @@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
2441 /* wait for the rings to drain */
2442 for (i = 0; i < RADEON_NUM_RINGS; i++) {
2443 struct radeon_ring *ring = &rdev->ring[i];
2444 - if (ring->ready)
2445 - radeon_fence_wait_empty_locked(rdev, i);
2446 + if (!ring->ready) {
2447 + continue;
2448 + }
2449 + r = radeon_fence_wait_empty_locked(rdev, i);
2450 + if (r) {
2451 + /* needs a GPU reset dont reset here */
2452 + mutex_unlock(&rdev->ring_lock);
2453 + up_write(&rdev->pm.mclk_lock);
2454 + mutex_unlock(&rdev->ddev->struct_mutex);
2455 + return;
2456 + }
2457 }
2458
2459 radeon_unmap_vram_bos(rdev);
2460 diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
2461 index e095218..26c23bb 100644
2462 --- a/drivers/gpu/drm/radeon/radeon_prime.c
2463 +++ b/drivers/gpu/drm/radeon/radeon_prime.c
2464 @@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
2465 bo = dma_buf->priv;
2466 if (bo->gem_base.dev == dev) {
2467 drm_gem_object_reference(&bo->gem_base);
2468 + dma_buf_put(dma_buf);
2469 return &bo->gem_base;
2470 }
2471 }
2472 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
2473 index 4422d63..c4d9eb6 100644
2474 --- a/drivers/gpu/drm/radeon/si.c
2475 +++ b/drivers/gpu/drm/radeon/si.c
2476 @@ -2426,9 +2426,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2477 /* enable context1-15 */
2478 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2479 (u32)(rdev->dummy_page.addr >> 12));
2480 - WREG32(VM_CONTEXT1_CNTL2, 0);
2481 + WREG32(VM_CONTEXT1_CNTL2, 4);
2482 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2483 - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2484 + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2485 + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2486 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2487 + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2488 + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2489 + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2490 + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2491 + VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2492 + READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2493 + READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2494 + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2495 + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2496
2497 si_pcie_gart_tlb_flush(rdev);
2498 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2499 @@ -3684,6 +3695,16 @@ restart_ih:
2500 break;
2501 }
2502 break;
2503 + case 146:
2504 + case 147:
2505 + dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
2506 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
2507 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
2508 + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
2509 + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
2510 + /* reset addr and status */
2511 + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
2512 + break;
2513 case 176: /* RINGID0 CP_INT */
2514 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
2515 break;
2516 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
2517 index a8871af..53b4d45 100644
2518 --- a/drivers/gpu/drm/radeon/sid.h
2519 +++ b/drivers/gpu/drm/radeon/sid.h
2520 @@ -91,7 +91,18 @@
2521 #define VM_CONTEXT0_CNTL 0x1410
2522 #define ENABLE_CONTEXT (1 << 0)
2523 #define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
2524 +#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
2525 #define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
2526 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
2527 +#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
2528 +#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
2529 +#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
2530 +#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
2531 +#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
2532 +#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
2533 +#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
2534 +#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
2535 +#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
2536 #define VM_CONTEXT1_CNTL 0x1414
2537 #define VM_CONTEXT0_CNTL2 0x1430
2538 #define VM_CONTEXT1_CNTL2 0x1434
2539 @@ -104,6 +115,9 @@
2540 #define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
2541 #define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
2542
2543 +#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
2544 +#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
2545 +
2546 #define VM_INVALIDATE_REQUEST 0x1478
2547 #define VM_INVALIDATE_RESPONSE 0x147c
2548
2549 diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
2550 index b3b2ced..6d7acf4 100644
2551 --- a/drivers/gpu/drm/udl/udl_connector.c
2552 +++ b/drivers/gpu/drm/udl/udl_connector.c
2553 @@ -22,13 +22,17 @@
2554 static u8 *udl_get_edid(struct udl_device *udl)
2555 {
2556 u8 *block;
2557 - char rbuf[3];
2558 + char *rbuf;
2559 int ret, i;
2560
2561 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
2562 if (block == NULL)
2563 return NULL;
2564
2565 + rbuf = kmalloc(2, GFP_KERNEL);
2566 + if (rbuf == NULL)
2567 + goto error;
2568 +
2569 for (i = 0; i < EDID_LENGTH; i++) {
2570 ret = usb_control_msg(udl->ddev->usbdev,
2571 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
2572 @@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
2573 HZ);
2574 if (ret < 1) {
2575 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
2576 - i--;
2577 goto error;
2578 }
2579 block[i] = rbuf[1];
2580 }
2581
2582 + kfree(rbuf);
2583 return block;
2584
2585 error:
2586 kfree(block);
2587 + kfree(rbuf);
2588 return NULL;
2589 }
2590
2591 @@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
2592
2593 edid = (struct edid *)udl_get_edid(udl);
2594
2595 + /*
2596 + * We only read the main block, but if the monitor reports extension
2597 + * blocks then the drm edid code expects them to be present, so patch
2598 + * the extension count to 0.
2599 + */
2600 + edid->checksum += edid->extensions;
2601 + edid->extensions = 0;
2602 +
2603 drm_mode_connector_update_edid_property(connector, edid);
2604 ret = drm_add_edid_modes(connector, edid);
2605 kfree(edid);
2606 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2607 index 9d7a428..1ef9a9e 100644
2608 --- a/drivers/hid/hid-ids.h
2609 +++ b/drivers/hid/hid-ids.h
2610 @@ -696,6 +696,9 @@
2611 #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
2612 #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
2613
2614 +#define USB_VENDOR_ID_SIGMATEL 0x066F
2615 +#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
2616 +
2617 #define USB_VENDOR_ID_SKYCABLE 0x1223
2618 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
2619
2620 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2621 index 11c7932..0a1429f 100644
2622 --- a/drivers/hid/usbhid/hid-quirks.c
2623 +++ b/drivers/hid/usbhid/hid-quirks.c
2624 @@ -79,6 +79,7 @@ static const struct hid_blacklist {
2625 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
2626 { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
2627 { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
2628 + { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
2629 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
2630 { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
2631 { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
2632 diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
2633 index 8fa2632..7272176 100644
2634 --- a/drivers/hwmon/lm73.c
2635 +++ b/drivers/hwmon/lm73.c
2636 @@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
2637 struct i2c_client *client = to_i2c_client(dev);
2638 long temp;
2639 short value;
2640 + s32 err;
2641
2642 int status = kstrtol(buf, 10, &temp);
2643 if (status < 0)
2644 @@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
2645 /* Write value */
2646 value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
2647 (LM73_TEMP_MAX*4)) << 5;
2648 - i2c_smbus_write_word_swapped(client, attr->index, value);
2649 - return count;
2650 + err = i2c_smbus_write_word_swapped(client, attr->index, value);
2651 + return (err < 0) ? err : count;
2652 }
2653
2654 static ssize_t show_temp(struct device *dev, struct device_attribute *da,
2655 @@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
2656 {
2657 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
2658 struct i2c_client *client = to_i2c_client(dev);
2659 + int temp;
2660 +
2661 + s32 err = i2c_smbus_read_word_swapped(client, attr->index);
2662 + if (err < 0)
2663 + return err;
2664 +
2665 /* use integer division instead of equivalent right shift to
2666 guarantee arithmetic shift and preserve the sign */
2667 - int temp = ((s16) (i2c_smbus_read_word_swapped(client,
2668 - attr->index))*250) / 32;
2669 - return sprintf(buf, "%d\n", temp);
2670 + temp = (((s16) err) * 250) / 32;
2671 + return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
2672 }
2673
2674
2675 diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
2676 index 80079e5..dbc99d4 100644
2677 --- a/drivers/infiniband/hw/mlx4/cm.c
2678 +++ b/drivers/infiniband/hw/mlx4/cm.c
2679 @@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
2680 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
2681 unsigned long flags;
2682
2683 - spin_lock_irqsave(&sriov->going_down_lock, flags);
2684 spin_lock(&sriov->id_map_lock);
2685 + spin_lock_irqsave(&sriov->going_down_lock, flags);
2686 /*make sure that there is no schedule inside the scheduled work.*/
2687 if (!sriov->is_going_down) {
2688 id->scheduled_delete = 1;
2689 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
2690 }
2691 - spin_unlock(&sriov->id_map_lock);
2692 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
2693 + spin_unlock(&sriov->id_map_lock);
2694 }
2695
2696 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
2697 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
2698 index 5cac29e..33cc589 100644
2699 --- a/drivers/infiniband/hw/nes/nes.h
2700 +++ b/drivers/infiniband/hw/nes/nes.h
2701 @@ -532,6 +532,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
2702 int nes_destroy_cqp(struct nes_device *);
2703 int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
2704 void nes_recheck_link_status(struct work_struct *work);
2705 +void nes_terminate_timeout(unsigned long context);
2706
2707 /* nes_nic.c */
2708 struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
2709 diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
2710 index fe7965e..67647e2 100644
2711 --- a/drivers/infiniband/hw/nes/nes_hw.c
2712 +++ b/drivers/infiniband/hw/nes/nes_hw.c
2713 @@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2714 static void process_critical_error(struct nes_device *nesdev);
2715 static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
2716 static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
2717 -static void nes_terminate_timeout(unsigned long context);
2718 static void nes_terminate_start_timer(struct nes_qp *nesqp);
2719
2720 #ifdef CONFIG_INFINIBAND_NES_DEBUG
2721 @@ -3520,7 +3519,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
2722 }
2723
2724 /* Timeout routine in case terminate fails to complete */
2725 -static void nes_terminate_timeout(unsigned long context)
2726 +void nes_terminate_timeout(unsigned long context)
2727 {
2728 struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
2729
2730 @@ -3530,11 +3529,7 @@ static void nes_terminate_timeout(unsigned long context)
2731 /* Set a timer in case hw cannot complete the terminate sequence */
2732 static void nes_terminate_start_timer(struct nes_qp *nesqp)
2733 {
2734 - init_timer(&nesqp->terminate_timer);
2735 - nesqp->terminate_timer.function = nes_terminate_timeout;
2736 - nesqp->terminate_timer.expires = jiffies + HZ;
2737 - nesqp->terminate_timer.data = (unsigned long)nesqp;
2738 - add_timer(&nesqp->terminate_timer);
2739 + mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
2740 }
2741
2742 /**
2743 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
2744 index cd0ecb2..07e4fba 100644
2745 --- a/drivers/infiniband/hw/nes/nes_verbs.c
2746 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
2747 @@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
2748 }
2749
2750 nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
2751 + init_timer(&nesqp->terminate_timer);
2752 + nesqp->terminate_timer.function = nes_terminate_timeout;
2753 + nesqp->terminate_timer.data = (unsigned long)nesqp;
2754
2755 /* update the QP table */
2756 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
2757 @@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
2758 return &nesqp->ibqp;
2759 }
2760
2761 -
2762 /**
2763 * nes_clean_cq
2764 */
2765 @@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2766 return ibmr;
2767 case IWNES_MEMREG_TYPE_QP:
2768 case IWNES_MEMREG_TYPE_CQ:
2769 + if (!region->length) {
2770 + nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
2771 + ib_umem_release(region);
2772 + return ERR_PTR(-EINVAL);
2773 + }
2774 nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
2775 if (!nespbl) {
2776 nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
2777 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2778 index 0badfa4..9476c1b 100644
2779 --- a/drivers/iommu/intel-iommu.c
2780 +++ b/drivers/iommu/intel-iommu.c
2781 @@ -1827,10 +1827,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2782 if (!pte)
2783 return -ENOMEM;
2784 /* It is large page*/
2785 - if (largepage_lvl > 1)
2786 + if (largepage_lvl > 1) {
2787 pteval |= DMA_PTE_LARGE_PAGE;
2788 - else
2789 + /* Ensure that old small page tables are removed to make room
2790 + for superpage, if they exist. */
2791 + dma_pte_clear_range(domain, iov_pfn,
2792 + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2793 + dma_pte_free_pagetable(domain, iov_pfn,
2794 + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
2795 + } else {
2796 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2797 + }
2798
2799 }
2800 /* We don't need lock here, nobody else
2801 diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
2802 index e4e8415..aefb78e 100644
2803 --- a/drivers/md/dm-bio-prison.c
2804 +++ b/drivers/md/dm-bio-prison.c
2805 @@ -208,31 +208,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
2806 EXPORT_SYMBOL_GPL(dm_cell_release);
2807
2808 /*
2809 - * There are a couple of places where we put a bio into a cell briefly
2810 - * before taking it out again. In these situations we know that no other
2811 - * bio may be in the cell. This function releases the cell, and also does
2812 - * a sanity check.
2813 - */
2814 -static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
2815 -{
2816 - BUG_ON(cell->holder != bio);
2817 - BUG_ON(!bio_list_empty(&cell->bios));
2818 -
2819 - __cell_release(cell, NULL);
2820 -}
2821 -
2822 -void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
2823 -{
2824 - unsigned long flags;
2825 - struct dm_bio_prison *prison = cell->prison;
2826 -
2827 - spin_lock_irqsave(&prison->lock, flags);
2828 - __cell_release_singleton(cell, bio);
2829 - spin_unlock_irqrestore(&prison->lock, flags);
2830 -}
2831 -EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
2832 -
2833 -/*
2834 * Sometimes we don't want the holder, just the additional bios.
2835 */
2836 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
2837 diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
2838 index 4e0ac37..53d1a7a 100644
2839 --- a/drivers/md/dm-bio-prison.h
2840 +++ b/drivers/md/dm-bio-prison.h
2841 @@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
2842 struct bio *inmate, struct dm_bio_prison_cell **ref);
2843
2844 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
2845 -void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
2846 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
2847 void dm_cell_error(struct dm_bio_prison_cell *cell);
2848
2849 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
2850 index afd9598..a651d52 100644
2851 --- a/drivers/md/dm-ioctl.c
2852 +++ b/drivers/md/dm-ioctl.c
2853 @@ -1566,6 +1566,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
2854 if (copy_from_user(dmi, user, tmp.data_size))
2855 goto bad;
2856
2857 + /*
2858 + * Abort if something changed the ioctl data while it was being copied.
2859 + */
2860 + if (dmi->data_size != tmp.data_size) {
2861 + DMERR("rejecting ioctl: data size modified while processing parameters");
2862 + goto bad;
2863 + }
2864 +
2865 /* Wipe the user buffer so we do not return it to userspace */
2866 if (secure_data && clear_user(user, tmp.data_size))
2867 goto bad;
2868 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
2869 index 100368e..fa29557 100644
2870 --- a/drivers/md/dm-table.c
2871 +++ b/drivers/md/dm-table.c
2872 @@ -1445,6 +1445,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
2873 else
2874 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
2875
2876 + q->limits.max_write_same_sectors = 0;
2877 +
2878 dm_table_set_integrity(t);
2879
2880 /*
2881 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
2882 index 058acf3..41c9e81 100644
2883 --- a/drivers/md/dm-thin.c
2884 +++ b/drivers/md/dm-thin.c
2885 @@ -368,6 +368,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
2886 dm_thin_changed_this_transaction(tc->td);
2887 }
2888
2889 +static void inc_all_io_entry(struct pool *pool, struct bio *bio)
2890 +{
2891 + struct dm_thin_endio_hook *h;
2892 +
2893 + if (bio->bi_rw & REQ_DISCARD)
2894 + return;
2895 +
2896 + h = dm_get_mapinfo(bio)->ptr;
2897 + h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
2898 +}
2899 +
2900 static void issue(struct thin_c *tc, struct bio *bio)
2901 {
2902 struct pool *pool = tc->pool;
2903 @@ -513,8 +524,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
2904 }
2905
2906 /*
2907 - * Same as cell_defer above, except it omits one particular detainee,
2908 - * a write bio that covers the block and has already been processed.
2909 + * Same as cell_defer except it omits the original holder of the cell.
2910 */
2911 static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2912 {
2913 @@ -597,13 +607,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
2914 {
2915 struct thin_c *tc = m->tc;
2916
2917 + inc_all_io_entry(tc->pool, m->bio);
2918 + cell_defer_except(tc, m->cell);
2919 + cell_defer_except(tc, m->cell2);
2920 +
2921 if (m->pass_discard)
2922 remap_and_issue(tc, m->bio, m->data_block);
2923 else
2924 bio_endio(m->bio, 0);
2925
2926 - cell_defer_except(tc, m->cell);
2927 - cell_defer_except(tc, m->cell2);
2928 mempool_free(m, tc->pool->mapping_pool);
2929 }
2930
2931 @@ -711,6 +723,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2932 h->overwrite_mapping = m;
2933 m->bio = bio;
2934 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
2935 + inc_all_io_entry(pool, bio);
2936 remap_and_issue(tc, bio, data_dest);
2937 } else {
2938 struct dm_io_region from, to;
2939 @@ -780,6 +793,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
2940 h->overwrite_mapping = m;
2941 m->bio = bio;
2942 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
2943 + inc_all_io_entry(pool, bio);
2944 remap_and_issue(tc, bio, data_block);
2945 } else {
2946 int r;
2947 @@ -936,7 +950,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2948 */
2949 build_data_key(tc->td, lookup_result.block, &key2);
2950 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
2951 - dm_cell_release_singleton(cell, bio);
2952 + cell_defer_except(tc, cell);
2953 break;
2954 }
2955
2956 @@ -962,13 +976,15 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2957 wake_worker(pool);
2958 }
2959 } else {
2960 + inc_all_io_entry(pool, bio);
2961 + cell_defer_except(tc, cell);
2962 + cell_defer_except(tc, cell2);
2963 +
2964 /*
2965 * The DM core makes sure that the discard doesn't span
2966 * a block boundary. So we submit the discard of a
2967 * partial block appropriately.
2968 */
2969 - dm_cell_release_singleton(cell, bio);
2970 - dm_cell_release_singleton(cell2, bio);
2971 if ((!lookup_result.shared) && pool->pf.discard_passdown)
2972 remap_and_issue(tc, bio, lookup_result.block);
2973 else
2974 @@ -980,13 +996,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
2975 /*
2976 * It isn't provisioned, just forget it.
2977 */
2978 - dm_cell_release_singleton(cell, bio);
2979 + cell_defer_except(tc, cell);
2980 bio_endio(bio, 0);
2981 break;
2982
2983 default:
2984 DMERR("discard: find block unexpectedly returned %d", r);
2985 - dm_cell_release_singleton(cell, bio);
2986 + cell_defer_except(tc, cell);
2987 bio_io_error(bio);
2988 break;
2989 }
2990 @@ -1040,8 +1056,9 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
2991 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
2992
2993 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
2994 + inc_all_io_entry(pool, bio);
2995 + cell_defer_except(tc, cell);
2996
2997 - dm_cell_release_singleton(cell, bio);
2998 remap_and_issue(tc, bio, lookup_result->block);
2999 }
3000 }
3001 @@ -1056,7 +1073,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
3002 * Remap empty bios (flushes) immediately, without provisioning.
3003 */
3004 if (!bio->bi_size) {
3005 - dm_cell_release_singleton(cell, bio);
3006 + inc_all_io_entry(tc->pool, bio);
3007 + cell_defer_except(tc, cell);
3008 +
3009 remap_and_issue(tc, bio, 0);
3010 return;
3011 }
3012 @@ -1066,7 +1085,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
3013 */
3014 if (bio_data_dir(bio) == READ) {
3015 zero_fill_bio(bio);
3016 - dm_cell_release_singleton(cell, bio);
3017 + cell_defer_except(tc, cell);
3018 bio_endio(bio, 0);
3019 return;
3020 }
3021 @@ -1111,26 +1130,22 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
3022 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
3023 switch (r) {
3024 case 0:
3025 - /*
3026 - * We can release this cell now. This thread is the only
3027 - * one that puts bios into a cell, and we know there were
3028 - * no preceding bios.
3029 - */
3030 - /*
3031 - * TODO: this will probably have to change when discard goes
3032 - * back in.
3033 - */
3034 - dm_cell_release_singleton(cell, bio);
3035 -
3036 - if (lookup_result.shared)
3037 + if (lookup_result.shared) {
3038 process_shared_bio(tc, bio, block, &lookup_result);
3039 - else
3040 + cell_defer_except(tc, cell);
3041 + } else {
3042 + inc_all_io_entry(tc->pool, bio);
3043 + cell_defer_except(tc, cell);
3044 +
3045 remap_and_issue(tc, bio, lookup_result.block);
3046 + }
3047 break;
3048
3049 case -ENODATA:
3050 if (bio_data_dir(bio) == READ && tc->origin_dev) {
3051 - dm_cell_release_singleton(cell, bio);
3052 + inc_all_io_entry(tc->pool, bio);
3053 + cell_defer_except(tc, cell);
3054 +
3055 remap_to_origin_and_issue(tc, bio);
3056 } else
3057 provision_block(tc, bio, block, cell);
3058 @@ -1138,7 +1153,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
3059
3060 default:
3061 DMERR("dm_thin_find_block() failed, error = %d", r);
3062 - dm_cell_release_singleton(cell, bio);
3063 + cell_defer_except(tc, cell);
3064 bio_io_error(bio);
3065 break;
3066 }
3067 @@ -1156,8 +1171,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
3068 case 0:
3069 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
3070 bio_io_error(bio);
3071 - else
3072 + else {
3073 + inc_all_io_entry(tc->pool, bio);
3074 remap_and_issue(tc, bio, lookup_result.block);
3075 + }
3076 break;
3077
3078 case -ENODATA:
3079 @@ -1167,6 +1184,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
3080 }
3081
3082 if (tc->origin_dev) {
3083 + inc_all_io_entry(tc->pool, bio);
3084 remap_to_origin_and_issue(tc, bio);
3085 break;
3086 }
3087 @@ -1347,7 +1365,7 @@ static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *b
3088
3089 h->tc = tc;
3090 h->shared_read_entry = NULL;
3091 - h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
3092 + h->all_io_entry = NULL;
3093 h->overwrite_mapping = NULL;
3094
3095 return h;
3096 @@ -1364,6 +1382,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
3097 dm_block_t block = get_bio_block(tc, bio);
3098 struct dm_thin_device *td = tc->td;
3099 struct dm_thin_lookup_result result;
3100 + struct dm_bio_prison_cell *cell1, *cell2;
3101 + struct dm_cell_key key;
3102
3103 map_context->ptr = thin_hook_bio(tc, bio);
3104
3105 @@ -1400,12 +1420,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
3106 * shared flag will be set in their case.
3107 */
3108 thin_defer_bio(tc, bio);
3109 - r = DM_MAPIO_SUBMITTED;
3110 - } else {
3111 - remap(tc, bio, result.block);
3112 - r = DM_MAPIO_REMAPPED;
3113 + return DM_MAPIO_SUBMITTED;
3114 }
3115 - break;
3116 +
3117 + build_virtual_key(tc->td, block, &key);
3118 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
3119 + return DM_MAPIO_SUBMITTED;
3120 +
3121 + build_data_key(tc->td, result.block, &key);
3122 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
3123 + cell_defer_except(tc, cell1);
3124 + return DM_MAPIO_SUBMITTED;
3125 + }
3126 +
3127 + inc_all_io_entry(tc->pool, bio);
3128 + cell_defer_except(tc, cell2);
3129 + cell_defer_except(tc, cell1);
3130 +
3131 + remap(tc, bio, result.block);
3132 + return DM_MAPIO_REMAPPED;
3133
3134 case -ENODATA:
3135 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
3136 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
3137 index 5709bfe..accbb05 100644
3138 --- a/drivers/md/persistent-data/dm-btree-internal.h
3139 +++ b/drivers/md/persistent-data/dm-btree-internal.h
3140 @@ -36,13 +36,13 @@ struct node_header {
3141 __le32 padding;
3142 } __packed;
3143
3144 -struct node {
3145 +struct btree_node {
3146 struct node_header header;
3147 __le64 keys[0];
3148 } __packed;
3149
3150
3151 -void inc_children(struct dm_transaction_manager *tm, struct node *n,
3152 +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
3153 struct dm_btree_value_type *vt);
3154
3155 int new_block(struct dm_btree_info *info, struct dm_block **result);
3156 @@ -64,7 +64,7 @@ struct ro_spine {
3157 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
3158 int exit_ro_spine(struct ro_spine *s);
3159 int ro_step(struct ro_spine *s, dm_block_t new_child);
3160 -struct node *ro_node(struct ro_spine *s);
3161 +struct btree_node *ro_node(struct ro_spine *s);
3162
3163 struct shadow_spine {
3164 struct dm_btree_info *info;
3165 @@ -98,17 +98,17 @@ int shadow_root(struct shadow_spine *s);
3166 /*
3167 * Some inlines.
3168 */
3169 -static inline __le64 *key_ptr(struct node *n, uint32_t index)
3170 +static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
3171 {
3172 return n->keys + index;
3173 }
3174
3175 -static inline void *value_base(struct node *n)
3176 +static inline void *value_base(struct btree_node *n)
3177 {
3178 return &n->keys[le32_to_cpu(n->header.max_entries)];
3179 }
3180
3181 -static inline void *value_ptr(struct node *n, uint32_t index)
3182 +static inline void *value_ptr(struct btree_node *n, uint32_t index)
3183 {
3184 uint32_t value_size = le32_to_cpu(n->header.value_size);
3185 return value_base(n) + (value_size * index);
3186 @@ -117,7 +117,7 @@ static inline void *value_ptr(struct node *n, uint32_t index)
3187 /*
3188 * Assumes the values are suitably-aligned and converts to core format.
3189 */
3190 -static inline uint64_t value64(struct node *n, uint32_t index)
3191 +static inline uint64_t value64(struct btree_node *n, uint32_t index)
3192 {
3193 __le64 *values_le = value_base(n);
3194
3195 @@ -127,7 +127,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
3196 /*
3197 * Searching for a key within a single node.
3198 */
3199 -int lower_bound(struct node *n, uint64_t key);
3200 +int lower_bound(struct btree_node *n, uint64_t key);
3201
3202 extern struct dm_block_validator btree_node_validator;
3203
3204 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
3205 index aa71e23..c4f2813 100644
3206 --- a/drivers/md/persistent-data/dm-btree-remove.c
3207 +++ b/drivers/md/persistent-data/dm-btree-remove.c
3208 @@ -53,7 +53,7 @@
3209 /*
3210 * Some little utilities for moving node data around.
3211 */
3212 -static void node_shift(struct node *n, int shift)
3213 +static void node_shift(struct btree_node *n, int shift)
3214 {
3215 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
3216 uint32_t value_size = le32_to_cpu(n->header.value_size);
3217 @@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
3218 }
3219 }
3220
3221 -static void node_copy(struct node *left, struct node *right, int shift)
3222 +static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
3223 {
3224 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3225 uint32_t value_size = le32_to_cpu(left->header.value_size);
3226 @@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
3227 /*
3228 * Delete a specific entry from a leaf node.
3229 */
3230 -static void delete_at(struct node *n, unsigned index)
3231 +static void delete_at(struct btree_node *n, unsigned index)
3232 {
3233 unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
3234 unsigned nr_to_copy = nr_entries - (index + 1);
3235 @@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
3236 n->header.nr_entries = cpu_to_le32(nr_entries - 1);
3237 }
3238
3239 -static unsigned merge_threshold(struct node *n)
3240 +static unsigned merge_threshold(struct btree_node *n)
3241 {
3242 return le32_to_cpu(n->header.max_entries) / 3;
3243 }
3244 @@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
3245 struct child {
3246 unsigned index;
3247 struct dm_block *block;
3248 - struct node *n;
3249 + struct btree_node *n;
3250 };
3251
3252 static struct dm_btree_value_type le64_type = {
3253 @@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
3254 .equal = NULL
3255 };
3256
3257 -static int init_child(struct dm_btree_info *info, struct node *parent,
3258 +static int init_child(struct dm_btree_info *info, struct btree_node *parent,
3259 unsigned index, struct child *result)
3260 {
3261 int r, inc;
3262 @@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
3263 return dm_tm_unlock(info->tm, c->block);
3264 }
3265
3266 -static void shift(struct node *left, struct node *right, int count)
3267 +static void shift(struct btree_node *left, struct btree_node *right, int count)
3268 {
3269 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3270 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
3271 @@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
3272 right->header.nr_entries = cpu_to_le32(nr_right + count);
3273 }
3274
3275 -static void __rebalance2(struct dm_btree_info *info, struct node *parent,
3276 +static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
3277 struct child *l, struct child *r)
3278 {
3279 - struct node *left = l->n;
3280 - struct node *right = r->n;
3281 + struct btree_node *left = l->n;
3282 + struct btree_node *right = r->n;
3283 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3284 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
3285 unsigned threshold = 2 * merge_threshold(left) + 1;
3286 @@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
3287 unsigned left_index)
3288 {
3289 int r;
3290 - struct node *parent;
3291 + struct btree_node *parent;
3292 struct child left, right;
3293
3294 parent = dm_block_data(shadow_current(s));
3295 @@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
3296 * in right, then rebalance2. This wastes some cpu, but I want something
3297 * simple atm.
3298 */
3299 -static void delete_center_node(struct dm_btree_info *info, struct node *parent,
3300 +static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
3301 struct child *l, struct child *c, struct child *r,
3302 - struct node *left, struct node *center, struct node *right,
3303 + struct btree_node *left, struct btree_node *center, struct btree_node *right,
3304 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
3305 {
3306 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
3307 @@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
3308 /*
3309 * Redistributes entries among 3 sibling nodes.
3310 */
3311 -static void redistribute3(struct dm_btree_info *info, struct node *parent,
3312 +static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
3313 struct child *l, struct child *c, struct child *r,
3314 - struct node *left, struct node *center, struct node *right,
3315 + struct btree_node *left, struct btree_node *center, struct btree_node *right,
3316 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
3317 {
3318 int s;
3319 @@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
3320 *key_ptr(parent, r->index) = right->keys[0];
3321 }
3322
3323 -static void __rebalance3(struct dm_btree_info *info, struct node *parent,
3324 +static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
3325 struct child *l, struct child *c, struct child *r)
3326 {
3327 - struct node *left = l->n;
3328 - struct node *center = c->n;
3329 - struct node *right = r->n;
3330 + struct btree_node *left = l->n;
3331 + struct btree_node *center = c->n;
3332 + struct btree_node *right = r->n;
3333
3334 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
3335 uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
3336 @@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
3337 unsigned left_index)
3338 {
3339 int r;
3340 - struct node *parent = dm_block_data(shadow_current(s));
3341 + struct btree_node *parent = dm_block_data(shadow_current(s));
3342 struct child left, center, right;
3343
3344 /*
3345 @@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
3346 {
3347 int r;
3348 struct dm_block *block;
3349 - struct node *n;
3350 + struct btree_node *n;
3351
3352 r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
3353 if (r)
3354 @@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
3355 {
3356 int i, r, has_left_sibling, has_right_sibling;
3357 uint32_t child_entries;
3358 - struct node *n;
3359 + struct btree_node *n;
3360
3361 n = dm_block_data(shadow_current(s));
3362
3363 @@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
3364 return r;
3365 }
3366
3367 -static int do_leaf(struct node *n, uint64_t key, unsigned *index)
3368 +static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
3369 {
3370 int i = lower_bound(n, key);
3371
3372 @@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
3373 uint64_t key, unsigned *index)
3374 {
3375 int i = *index, r;
3376 - struct node *n;
3377 + struct btree_node *n;
3378
3379 for (;;) {
3380 r = shadow_step(s, root, vt);
3381 @@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
3382 unsigned level, last_level = info->levels - 1;
3383 int index = 0, r = 0;
3384 struct shadow_spine spine;
3385 - struct node *n;
3386 + struct btree_node *n;
3387
3388 init_shadow_spine(&spine, info);
3389 for (level = 0; level < info->levels; level++) {
3390 diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
3391 index d9a7912..2f0805c 100644
3392 --- a/drivers/md/persistent-data/dm-btree-spine.c
3393 +++ b/drivers/md/persistent-data/dm-btree-spine.c
3394 @@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
3395 struct dm_block *b,
3396 size_t block_size)
3397 {
3398 - struct node *n = dm_block_data(b);
3399 + struct btree_node *n = dm_block_data(b);
3400 struct node_header *h = &n->header;
3401
3402 h->blocknr = cpu_to_le64(dm_block_location(b));
3403 @@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v,
3404 struct dm_block *b,
3405 size_t block_size)
3406 {
3407 - struct node *n = dm_block_data(b);
3408 + struct btree_node *n = dm_block_data(b);
3409 struct node_header *h = &n->header;
3410 size_t value_size;
3411 __le32 csum_disk;
3412 @@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
3413 return r;
3414 }
3415
3416 -struct node *ro_node(struct ro_spine *s)
3417 +struct btree_node *ro_node(struct ro_spine *s)
3418 {
3419 struct dm_block *block;
3420
3421 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
3422 index d12b2cc..371f3d4 100644
3423 --- a/drivers/md/persistent-data/dm-btree.c
3424 +++ b/drivers/md/persistent-data/dm-btree.c
3425 @@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
3426 /*----------------------------------------------------------------*/
3427
3428 /* makes the assumption that no two keys are the same. */
3429 -static int bsearch(struct node *n, uint64_t key, int want_hi)
3430 +static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
3431 {
3432 int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
3433
3434 @@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
3435 return want_hi ? hi : lo;
3436 }
3437
3438 -int lower_bound(struct node *n, uint64_t key)
3439 +int lower_bound(struct btree_node *n, uint64_t key)
3440 {
3441 return bsearch(n, key, 0);
3442 }
3443
3444 -void inc_children(struct dm_transaction_manager *tm, struct node *n,
3445 +void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
3446 struct dm_btree_value_type *vt)
3447 {
3448 unsigned i;
3449 @@ -77,7 +77,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
3450 vt->inc(vt->context, value_ptr(n, i));
3451 }
3452
3453 -static int insert_at(size_t value_size, struct node *node, unsigned index,
3454 +static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
3455 uint64_t key, void *value)
3456 __dm_written_to_disk(value)
3457 {
3458 @@ -122,7 +122,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
3459 {
3460 int r;
3461 struct dm_block *b;
3462 - struct node *n;
3463 + struct btree_node *n;
3464 size_t block_size;
3465 uint32_t max_entries;
3466
3467 @@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
3468 #define MAX_SPINE_DEPTH 64
3469 struct frame {
3470 struct dm_block *b;
3471 - struct node *n;
3472 + struct btree_node *n;
3473 unsigned level;
3474 unsigned nr_children;
3475 unsigned current_child;
3476 @@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
3477 /*----------------------------------------------------------------*/
3478
3479 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
3480 - int (*search_fn)(struct node *, uint64_t),
3481 + int (*search_fn)(struct btree_node *, uint64_t),
3482 uint64_t *result_key, void *v, size_t value_size)
3483 {
3484 int i, r;
3485 @@ -406,7 +406,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
3486 size_t size;
3487 unsigned nr_left, nr_right;
3488 struct dm_block *left, *right, *parent;
3489 - struct node *ln, *rn, *pn;
3490 + struct btree_node *ln, *rn, *pn;
3491 __le64 location;
3492
3493 left = shadow_current(s);
3494 @@ -491,7 +491,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
3495 size_t size;
3496 unsigned nr_left, nr_right;
3497 struct dm_block *left, *right, *new_parent;
3498 - struct node *pn, *ln, *rn;
3499 + struct btree_node *pn, *ln, *rn;
3500 __le64 val;
3501
3502 new_parent = shadow_current(s);
3503 @@ -576,7 +576,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
3504 uint64_t key, unsigned *index)
3505 {
3506 int r, i = *index, top = 1;
3507 - struct node *node;
3508 + struct btree_node *node;
3509
3510 for (;;) {
3511 r = shadow_step(s, root, vt);
3512 @@ -643,7 +643,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
3513 unsigned level, index = -1, last_level = info->levels - 1;
3514 dm_block_t block = root;
3515 struct shadow_spine spine;
3516 - struct node *n;
3517 + struct btree_node *n;
3518 struct dm_btree_value_type le64_type;
3519
3520 le64_type.context = NULL;
3521 diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
3522 index f8b7771..7604f4e 100644
3523 --- a/drivers/mfd/mfd-core.c
3524 +++ b/drivers/mfd/mfd-core.c
3525 @@ -21,6 +21,10 @@
3526 #include <linux/irqdomain.h>
3527 #include <linux/of.h>
3528
3529 +static struct device_type mfd_dev_type = {
3530 + .name = "mfd_device",
3531 +};
3532 +
3533 int mfd_cell_enable(struct platform_device *pdev)
3534 {
3535 const struct mfd_cell *cell = mfd_get_cell(pdev);
3536 @@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
3537 goto fail_device;
3538
3539 pdev->dev.parent = parent;
3540 + pdev->dev.type = &mfd_dev_type;
3541
3542 if (parent->of_node && cell->of_compatible) {
3543 for_each_child_of_node(parent->of_node, np) {
3544 @@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
3545
3546 static int mfd_remove_devices_fn(struct device *dev, void *c)
3547 {
3548 - struct platform_device *pdev = to_platform_device(dev);
3549 - const struct mfd_cell *cell = mfd_get_cell(pdev);
3550 + struct platform_device *pdev;
3551 + const struct mfd_cell *cell;
3552 atomic_t **usage_count = c;
3553
3554 + if (dev->type != &mfd_dev_type)
3555 + return 0;
3556 +
3557 + pdev = to_platform_device(dev);
3558 + cell = mfd_get_cell(pdev);
3559 +
3560 /* find the base address of usage_count pointers (for freeing) */
3561 if (!*usage_count || (cell->usage_count < *usage_count))
3562 *usage_count = cell->usage_count;
3563 diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
3564 index 8fefc96..f1ac288 100644
3565 --- a/drivers/mfd/wm8994-core.c
3566 +++ b/drivers/mfd/wm8994-core.c
3567 @@ -557,6 +557,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
3568 case 1:
3569 case 2:
3570 case 3:
3571 + case 4:
3572 regmap_patch = wm1811_reva_patch;
3573 patch_regs = ARRAY_SIZE(wm1811_reva_patch);
3574 break;
3575 diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
3576 index adb6c3e..2cdeab8 100644
3577 --- a/drivers/mtd/nand/cs553x_nand.c
3578 +++ b/drivers/mtd/nand/cs553x_nand.c
3579 @@ -237,6 +237,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
3580 this->ecc.hwctl = cs_enable_hwecc;
3581 this->ecc.calculate = cs_calculate_ecc;
3582 this->ecc.correct = nand_correct_data;
3583 + this->ecc.strength = 1;
3584
3585 /* Enable the following for a flash based bad block table */
3586 this->bbt_options = NAND_BBT_USE_FLASH;
3587 @@ -247,8 +248,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
3588 goto out_ior;
3589 }
3590
3591 - this->ecc.strength = 1;
3592 -
3593 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
3594
3595 cs553x_mtd[cs] = new_mtd;
3596 diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3597 index 3502acc..84f0526 100644
3598 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3599 +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
3600 @@ -166,6 +166,15 @@ int gpmi_init(struct gpmi_nand_data *this)
3601 if (ret)
3602 goto err_out;
3603
3604 + /*
3605 + * Reset BCH here, too. We got failures otherwise :(
3606 + * See later BCH reset for explanation of MX23 handling
3607 + */
3608 + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
3609 + if (ret)
3610 + goto err_out;
3611 +
3612 +
3613 /* Choose NAND mode. */
3614 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
3615
3616 diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
3617 index 3d1899f..c4c80f6 100644
3618 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
3619 +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
3620 @@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3621 u32 reply;
3622 u8 is_going_down = 0;
3623 int i;
3624 + unsigned long flags;
3625
3626 slave_state[slave].comm_toggle ^= 1;
3627 reply = (u32) slave_state[slave].comm_toggle << 31;
3628 @@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3629 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
3630 goto reset_slave;
3631 }
3632 - spin_lock(&priv->mfunc.master.slave_state_lock);
3633 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3634 if (!slave_state[slave].is_slave_going_down)
3635 slave_state[slave].last_cmd = cmd;
3636 else
3637 is_going_down = 1;
3638 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3639 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3640 if (is_going_down) {
3641 mlx4_warn(dev, "Slave is going down aborting command(%d)"
3642 " executing from slave:%d\n",
3643 @@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
3644 reset_slave:
3645 /* cleanup any slave resources */
3646 mlx4_delete_all_resources_for_slave(dev, slave);
3647 - spin_lock(&priv->mfunc.master.slave_state_lock);
3648 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3649 if (!slave_state[slave].is_slave_going_down)
3650 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
3651 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3652 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3653 /*with slave in the middle of flr, no need to clean resources again.*/
3654 inform_slave_state:
3655 memset(&slave_state[slave].event_eq, 0,
3656 diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
3657 index b84a88b..cda430b 100644
3658 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
3659 +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
3660 @@ -401,6 +401,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
3661 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
3662 int i;
3663 int err;
3664 + unsigned long flags;
3665
3666 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
3667
3668 @@ -412,10 +413,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
3669
3670 mlx4_delete_all_resources_for_slave(dev, i);
3671 /*return the slave to running mode*/
3672 - spin_lock(&priv->mfunc.master.slave_state_lock);
3673 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3674 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
3675 slave_state[i].is_slave_going_down = 0;
3676 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3677 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3678 /*notify the FW:*/
3679 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
3680 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
3681 @@ -440,6 +441,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
3682 u8 update_slave_state;
3683 int i;
3684 enum slave_port_gen_event gen_event;
3685 + unsigned long flags;
3686
3687 while ((eqe = next_eqe_sw(eq))) {
3688 /*
3689 @@ -647,13 +649,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
3690 } else
3691 update_slave_state = 1;
3692
3693 - spin_lock(&priv->mfunc.master.slave_state_lock);
3694 + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
3695 if (update_slave_state) {
3696 priv->mfunc.master.slave_state[flr_slave].active = false;
3697 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
3698 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
3699 }
3700 - spin_unlock(&priv->mfunc.master.slave_state_lock);
3701 + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
3702 queue_work(priv->mfunc.master.comm_wq,
3703 &priv->mfunc.master.slave_flr_event_work);
3704 break;
3705 diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
3706 index 6650fde..9f1e947 100644
3707 --- a/drivers/net/wimax/i2400m/i2400m-usb.h
3708 +++ b/drivers/net/wimax/i2400m/i2400m-usb.h
3709 @@ -152,6 +152,9 @@ enum {
3710 /* Device IDs */
3711 USB_DEVICE_ID_I6050 = 0x0186,
3712 USB_DEVICE_ID_I6050_2 = 0x0188,
3713 + USB_DEVICE_ID_I6150 = 0x07d6,
3714 + USB_DEVICE_ID_I6150_2 = 0x07d7,
3715 + USB_DEVICE_ID_I6150_3 = 0x07d9,
3716 USB_DEVICE_ID_I6250 = 0x0187,
3717 };
3718
3719 diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
3720 index 713d033..080f363 100644
3721 --- a/drivers/net/wimax/i2400m/usb.c
3722 +++ b/drivers/net/wimax/i2400m/usb.c
3723 @@ -510,6 +510,9 @@ int i2400mu_probe(struct usb_interface *iface,
3724 switch (id->idProduct) {
3725 case USB_DEVICE_ID_I6050:
3726 case USB_DEVICE_ID_I6050_2:
3727 + case USB_DEVICE_ID_I6150:
3728 + case USB_DEVICE_ID_I6150_2:
3729 + case USB_DEVICE_ID_I6150_3:
3730 case USB_DEVICE_ID_I6250:
3731 i2400mu->i6050 = 1;
3732 break;
3733 @@ -759,6 +762,9 @@ static
3734 struct usb_device_id i2400mu_id_table[] = {
3735 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
3736 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
3737 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
3738 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
3739 + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
3740 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
3741 { USB_DEVICE(0x8086, 0x0181) },
3742 { USB_DEVICE(0x8086, 0x1403) },
3743 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
3744 index 9f31cfa..a4ee253 100644
3745 --- a/drivers/net/wireless/ath/ath5k/base.c
3746 +++ b/drivers/net/wireless/ath/ath5k/base.c
3747 @@ -848,7 +848,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
3748 return;
3749 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
3750 DMA_TO_DEVICE);
3751 - dev_kfree_skb_any(bf->skb);
3752 + ieee80211_free_txskb(ah->hw, bf->skb);
3753 bf->skb = NULL;
3754 bf->skbaddr = 0;
3755 bf->desc->ds_data = 0;
3756 @@ -1575,7 +1575,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
3757 return;
3758
3759 drop_packet:
3760 - dev_kfree_skb_any(skb);
3761 + ieee80211_free_txskb(hw, skb);
3762 }
3763
3764 static void
3765 diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3766 index 7a28538..c1369ff 100644
3767 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3768 +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
3769 @@ -62,7 +62,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
3770 u16 qnum = skb_get_queue_mapping(skb);
3771
3772 if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
3773 - dev_kfree_skb_any(skb);
3774 + ieee80211_free_txskb(hw, skb);
3775 return;
3776 }
3777
3778 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3779 index 6f7cf49..262e1e0 100644
3780 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3781 +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
3782 @@ -534,98 +534,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
3783
3784 static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
3785 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
3786 - {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3787 - {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3788 - {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3789 + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3790 + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3791 + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3792 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3793 - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
3794 - {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3795 - {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
3796 - {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
3797 - {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
3798 - {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
3799 - {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
3800 - {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
3801 - {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
3802 - {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
3803 - {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
3804 - {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
3805 - {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
3806 - {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
3807 - {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
3808 - {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
3809 - {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
3810 - {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
3811 - {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
3812 - {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
3813 - {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
3814 - {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
3815 - {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
3816 - {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
3817 - {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
3818 - {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
3819 - {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3820 - {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3821 - {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3822 - {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3823 - {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3824 - {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3825 - {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
3826 - {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
3827 - {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
3828 - {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
3829 - {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
3830 - {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
3831 - {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
3832 - {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
3833 - {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
3834 - {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
3835 - {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
3836 - {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
3837 - {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
3838 - {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
3839 - {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
3840 - {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
3841 - {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
3842 - {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
3843 - {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
3844 - {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
3845 - {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
3846 - {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
3847 - {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
3848 - {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
3849 - {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
3850 - {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
3851 - {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3852 - {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3853 - {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3854 - {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3855 - {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3856 - {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3857 - {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
3858 + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
3859 + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
3860 + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
3861 + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
3862 + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
3863 + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
3864 + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
3865 + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
3866 + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
3867 + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
3868 + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
3869 + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
3870 + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
3871 + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
3872 + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
3873 + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
3874 + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
3875 + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
3876 + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
3877 + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
3878 + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
3879 + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
3880 + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
3881 + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
3882 + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
3883 + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
3884 + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3885 + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3886 + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3887 + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3888 + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3889 + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3890 + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
3891 + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
3892 + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
3893 + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
3894 + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
3895 + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
3896 + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
3897 + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
3898 + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
3899 + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
3900 + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
3901 + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
3902 + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
3903 + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
3904 + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
3905 + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
3906 + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
3907 + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
3908 + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
3909 + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
3910 + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
3911 + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
3912 + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
3913 + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
3914 + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
3915 + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
3916 + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3917 + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3918 + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3919 + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3920 + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3921 + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3922 + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
3923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3926 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3927 - {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
3928 - {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
3929 - {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
3930 - {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
3931 - {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
3932 - {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
3933 - {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
3934 - {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3935 - {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3936 - {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3937 - {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3938 - {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
3939 - {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3940 - {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3941 - {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3942 + {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
3943 + {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
3944 + {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
3945 + {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
3946 + {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
3947 + {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
3948 + {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
3949 + {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3950 + {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3951 + {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3952 + {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3953 + {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
3954 + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3955 + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3956 + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3957 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3958 - {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
3959 - {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
3960 - {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
3961 + {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
3962 + {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
3963 + {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
3964 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
3965 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
3966 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
3967 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3968 index 41b1a75..54ba42f 100644
3969 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3970 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
3971 @@ -68,13 +68,13 @@
3972 #define AR9300_BASE_ADDR 0x3ff
3973 #define AR9300_BASE_ADDR_512 0x1ff
3974
3975 -#define AR9300_OTP_BASE 0x14000
3976 -#define AR9300_OTP_STATUS 0x15f18
3977 +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
3978 +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
3979 #define AR9300_OTP_STATUS_TYPE 0x7
3980 #define AR9300_OTP_STATUS_VALID 0x4
3981 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
3982 #define AR9300_OTP_STATUS_SM_BUSY 0x1
3983 -#define AR9300_OTP_READ_DATA 0x15f1c
3984 +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
3985
3986 enum targetPowerHTRates {
3987 HT_TARGET_RATE_0_8_16,
3988 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3989 index 1a36fa2..226dd13 100644
3990 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3991 +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
3992 @@ -219,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
3993
3994 /* Awake -> Sleep Setting */
3995 INIT_INI_ARRAY(&ah->iniPcieSerdes,
3996 - ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
3997 + ar9462_pciephy_clkreq_disable_L1_2p0);
3998 /* Sleep -> Awake Setting */
3999 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
4000 - ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
4001 + ar9462_pciephy_clkreq_disable_L1_2p0);
4002
4003 /* Fast clock modal settings */
4004 INIT_INI_ARRAY(&ah->iniModesFastClock,
4005 @@ -540,7 +540,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
4006 ar9340Common_rx_gain_table_1p0);
4007 else if (AR_SREV_9485_11(ah))
4008 INIT_INI_ARRAY(&ah->iniModesRxGain,
4009 - ar9485Common_wo_xlna_rx_gain_1_1);
4010 + ar9485_common_rx_gain_1_1);
4011 else if (AR_SREV_9550(ah)) {
4012 INIT_INI_ARRAY(&ah->iniModesRxGain,
4013 ar955x_1p0_common_rx_gain_table);
4014 diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
4015 index e5cceb0..bbd249d 100644
4016 --- a/drivers/net/wireless/ath/ath9k/calib.c
4017 +++ b/drivers/net/wireless/ath/ath9k/calib.c
4018 @@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
4019
4020 if (chan && chan->noisefloor) {
4021 s8 delta = chan->noisefloor -
4022 + ATH9K_NF_CAL_NOISE_THRESH -
4023 ath9k_hw_get_default_nf(ah, chan);
4024 if (delta > 0)
4025 noise += delta;
4026 diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
4027 index 1060c19..60dcb6c 100644
4028 --- a/drivers/net/wireless/ath/ath9k/calib.h
4029 +++ b/drivers/net/wireless/ath/ath9k/calib.h
4030 @@ -21,6 +21,9 @@
4031
4032 #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
4033
4034 +/* Internal noise floor can vary by about 6db depending on the frequency */
4035 +#define ATH9K_NF_CAL_NOISE_THRESH 6
4036 +
4037 #define NUM_NF_READINGS 6
4038 #define ATH9K_NF_CAL_HIST_MAX 5
4039
4040 diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
4041 index 24ac287..98f4010 100644
4042 --- a/drivers/net/wireless/ath/carl9170/fw.c
4043 +++ b/drivers/net/wireless/ath/carl9170/fw.c
4044 @@ -341,8 +341,12 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
4045 if (SUPP(CARL9170FW_WLANTX_CAB)) {
4046 if_comb_types |=
4047 BIT(NL80211_IFTYPE_AP) |
4048 - BIT(NL80211_IFTYPE_MESH_POINT) |
4049 BIT(NL80211_IFTYPE_P2P_GO);
4050 +
4051 +#ifdef CONFIG_MAC80211_MESH
4052 + if_comb_types |=
4053 + BIT(NL80211_IFTYPE_MESH_POINT);
4054 +#endif /* CONFIG_MAC80211_MESH */
4055 }
4056 }
4057
4058 diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
4059 index b298e5d..10e288d 100644
4060 --- a/drivers/net/wireless/b43/b43.h
4061 +++ b/drivers/net/wireless/b43/b43.h
4062 @@ -7,6 +7,7 @@
4063 #include <linux/hw_random.h>
4064 #include <linux/bcma/bcma.h>
4065 #include <linux/ssb/ssb.h>
4066 +#include <linux/completion.h>
4067 #include <net/mac80211.h>
4068
4069 #include "debugfs.h"
4070 @@ -722,6 +723,10 @@ enum b43_firmware_file_type {
4071 struct b43_request_fw_context {
4072 /* The device we are requesting the fw for. */
4073 struct b43_wldev *dev;
4074 + /* a completion event structure needed if this call is asynchronous */
4075 + struct completion fw_load_complete;
4076 + /* a pointer to the firmware object */
4077 + const struct firmware *blob;
4078 /* The type of firmware to request. */
4079 enum b43_firmware_file_type req_type;
4080 /* Error messages for each firmware type. */
4081 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
4082 index 2911e20..263667f 100644
4083 --- a/drivers/net/wireless/b43/main.c
4084 +++ b/drivers/net/wireless/b43/main.c
4085 @@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
4086 b43warn(wl, text);
4087 }
4088
4089 +static void b43_fw_cb(const struct firmware *firmware, void *context)
4090 +{
4091 + struct b43_request_fw_context *ctx = context;
4092 +
4093 + ctx->blob = firmware;
4094 + complete(&ctx->fw_load_complete);
4095 +}
4096 +
4097 int b43_do_request_fw(struct b43_request_fw_context *ctx,
4098 const char *name,
4099 - struct b43_firmware_file *fw)
4100 + struct b43_firmware_file *fw, bool async)
4101 {
4102 - const struct firmware *blob;
4103 struct b43_fw_header *hdr;
4104 u32 size;
4105 int err;
4106 @@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4107 B43_WARN_ON(1);
4108 return -ENOSYS;
4109 }
4110 - err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
4111 + if (async) {
4112 + /* do this part asynchronously */
4113 + init_completion(&ctx->fw_load_complete);
4114 + err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
4115 + ctx->dev->dev->dev, GFP_KERNEL,
4116 + ctx, b43_fw_cb);
4117 + if (err < 0) {
4118 + pr_err("Unable to load firmware\n");
4119 + return err;
4120 + }
4121 + /* stall here until fw ready */
4122 + wait_for_completion(&ctx->fw_load_complete);
4123 + if (ctx->blob)
4124 + goto fw_ready;
4125 + /* On some ARM systems, the async request will fail, but the next sync
4126 + * request works. For this reason, we dall through here
4127 + */
4128 + }
4129 + err = request_firmware(&ctx->blob, ctx->fwname,
4130 + ctx->dev->dev->dev);
4131 if (err == -ENOENT) {
4132 snprintf(ctx->errors[ctx->req_type],
4133 sizeof(ctx->errors[ctx->req_type]),
4134 - "Firmware file \"%s\" not found\n", ctx->fwname);
4135 + "Firmware file \"%s\" not found\n",
4136 + ctx->fwname);
4137 return err;
4138 } else if (err) {
4139 snprintf(ctx->errors[ctx->req_type],
4140 @@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4141 ctx->fwname, err);
4142 return err;
4143 }
4144 - if (blob->size < sizeof(struct b43_fw_header))
4145 +fw_ready:
4146 + if (ctx->blob->size < sizeof(struct b43_fw_header))
4147 goto err_format;
4148 - hdr = (struct b43_fw_header *)(blob->data);
4149 + hdr = (struct b43_fw_header *)(ctx->blob->data);
4150 switch (hdr->type) {
4151 case B43_FW_TYPE_UCODE:
4152 case B43_FW_TYPE_PCM:
4153 size = be32_to_cpu(hdr->size);
4154 - if (size != blob->size - sizeof(struct b43_fw_header))
4155 + if (size != ctx->blob->size - sizeof(struct b43_fw_header))
4156 goto err_format;
4157 /* fallthrough */
4158 case B43_FW_TYPE_IV:
4159 @@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
4160 goto err_format;
4161 }
4162
4163 - fw->data = blob;
4164 + fw->data = ctx->blob;
4165 fw->filename = name;
4166 fw->type = ctx->req_type;
4167
4168 @@ -2172,7 +2200,7 @@ err_format:
4169 snprintf(ctx->errors[ctx->req_type],
4170 sizeof(ctx->errors[ctx->req_type]),
4171 "Firmware file \"%s\" format error.\n", ctx->fwname);
4172 - release_firmware(blob);
4173 + release_firmware(ctx->blob);
4174
4175 return -EPROTO;
4176 }
4177 @@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4178 goto err_no_ucode;
4179 }
4180 }
4181 - err = b43_do_request_fw(ctx, filename, &fw->ucode);
4182 + err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
4183 if (err)
4184 goto err_load;
4185
4186 @@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4187 else
4188 goto err_no_pcm;
4189 fw->pcm_request_failed = false;
4190 - err = b43_do_request_fw(ctx, filename, &fw->pcm);
4191 + err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
4192 if (err == -ENOENT) {
4193 /* We did not find a PCM file? Not fatal, but
4194 * core rev <= 10 must do without hwcrypto then. */
4195 @@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4196 default:
4197 goto err_no_initvals;
4198 }
4199 - err = b43_do_request_fw(ctx, filename, &fw->initvals);
4200 + err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
4201 if (err)
4202 goto err_load;
4203
4204 @@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
4205 default:
4206 goto err_no_initvals;
4207 }
4208 - err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
4209 + err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
4210 if (err)
4211 goto err_load;
4212
4213 diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
4214 index 8c684cd..abac25e 100644
4215 --- a/drivers/net/wireless/b43/main.h
4216 +++ b/drivers/net/wireless/b43/main.h
4217 @@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
4218
4219
4220 struct b43_request_fw_context;
4221 -int b43_do_request_fw(struct b43_request_fw_context *ctx,
4222 - const char *name,
4223 - struct b43_firmware_file *fw);
4224 +int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
4225 + struct b43_firmware_file *fw, bool async);
4226 void b43_do_release_fw(struct b43_firmware_file *fw);
4227
4228 #endif /* B43_MAIN_H_ */
4229 diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4230 index 481345c..0caa4c3 100644
4231 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4232 +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
4233 @@ -3730,10 +3730,11 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
4234
4235 len = wpa_ie->len + TLV_HDR_LEN;
4236 data = (u8 *)wpa_ie;
4237 - offset = 0;
4238 + offset = TLV_HDR_LEN;
4239 if (!is_rsn_ie)
4240 offset += VS_IE_FIXED_HDR_LEN;
4241 - offset += WPA_IE_VERSION_LEN;
4242 + else
4243 + offset += WPA_IE_VERSION_LEN;
4244
4245 /* check for multicast cipher suite */
4246 if (offset + WPA_IE_MIN_OUI_LEN > len) {
4247 diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
4248 index f5ca73a..aecf1ce 100644
4249 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c
4250 +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
4251 @@ -1100,29 +1100,6 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
4252 }
4253 }
4254
4255 -static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
4256 - int txq_id, int ssn, struct sk_buff_head *skbs)
4257 -{
4258 - if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
4259 - tid != IWL_TID_NON_QOS &&
4260 - txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
4261 - /*
4262 - * FIXME: this is a uCode bug which need to be addressed,
4263 - * log the information and return for now.
4264 - * Since it is can possibly happen very often and in order
4265 - * not to fill the syslog, don't use IWL_ERR or IWL_WARN
4266 - */
4267 - IWL_DEBUG_TX_QUEUES(priv,
4268 - "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
4269 - txq_id, sta_id, tid,
4270 - priv->tid_data[sta_id][tid].agg.txq_id);
4271 - return 1;
4272 - }
4273 -
4274 - iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
4275 - return 0;
4276 -}
4277 -
4278 int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4279 struct iwl_device_cmd *cmd)
4280 {
4281 @@ -1177,16 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4282 next_reclaimed = ssn;
4283 }
4284
4285 - if (tid != IWL_TID_NON_QOS) {
4286 - priv->tid_data[sta_id][tid].next_reclaimed =
4287 - next_reclaimed;
4288 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
4289 - next_reclaimed);
4290 - }
4291 + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
4292
4293 - /*we can free until ssn % q.n_bd not inclusive */
4294 - WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
4295 - txq_id, ssn, &skbs));
4296 iwlagn_check_ratid_empty(priv, sta_id, tid);
4297 freed = 0;
4298
4299 @@ -1235,11 +1204,28 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
4300 if (!is_agg)
4301 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
4302
4303 + /*
4304 + * W/A for FW bug - the seq_ctl isn't updated when the
4305 + * queues are flushed. Fetch it from the packet itself
4306 + */
4307 + if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
4308 + next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
4309 + next_reclaimed =
4310 + SEQ_TO_SN(next_reclaimed + 0x10);
4311 + }
4312 +
4313 is_offchannel_skb =
4314 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
4315 freed++;
4316 }
4317
4318 + if (tid != IWL_TID_NON_QOS) {
4319 + priv->tid_data[sta_id][tid].next_reclaimed =
4320 + next_reclaimed;
4321 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
4322 + next_reclaimed);
4323 + }
4324 +
4325 WARN_ON(!is_agg && freed != 1);
4326
4327 /*
4328 @@ -1311,16 +1297,27 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
4329 return 0;
4330 }
4331
4332 + if (unlikely(scd_flow != agg->txq_id)) {
4333 + /*
4334 + * FIXME: this is a uCode bug which need to be addressed,
4335 + * log the information and return for now.
4336 + * Since it is can possibly happen very often and in order
4337 + * not to fill the syslog, don't use IWL_ERR or IWL_WARN
4338 + */
4339 + IWL_DEBUG_TX_QUEUES(priv,
4340 + "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
4341 + scd_flow, sta_id, tid, agg->txq_id);
4342 + spin_unlock(&priv->sta_lock);
4343 + return 0;
4344 + }
4345 +
4346 __skb_queue_head_init(&reclaimed_skbs);
4347
4348 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4349 * block-ack window (we assume that they've been successfully
4350 * transmitted ... if not, it's too late anyway). */
4351 - if (iwl_reclaim(priv, sta_id, tid, scd_flow,
4352 - ba_resp_scd_ssn, &reclaimed_skbs)) {
4353 - spin_unlock(&priv->sta_lock);
4354 - return 0;
4355 - }
4356 + iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
4357 + &reclaimed_skbs);
4358
4359 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
4360 "sta_id = %d\n",
4361 diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
4362 index bb69f8f..3654de2 100644
4363 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c
4364 +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
4365 @@ -927,12 +927,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
4366 * back-to-back ISRs and sporadic interrupts from our NIC.
4367 * If we have something to service, the tasklet will re-enable ints.
4368 * If we *don't* have something, we'll re-enable before leaving here. */
4369 - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
4370 + inta_mask = iwl_read32(trans, CSR_INT_MASK);
4371 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
4372
4373 /* Discover which interrupts are active/pending */
4374 inta = iwl_read32(trans, CSR_INT);
4375
4376 + if (inta & (~inta_mask)) {
4377 + IWL_DEBUG_ISR(trans,
4378 + "We got a masked interrupt (0x%08x)...Ack and ignore\n",
4379 + inta & (~inta_mask));
4380 + iwl_write32(trans, CSR_INT, inta & (~inta_mask));
4381 + inta &= inta_mask;
4382 + }
4383 +
4384 /* Ignore interrupt if there's nothing in NIC to service.
4385 * This may be due to IRQ shared with another device,
4386 * or due to sporadic interrupts thrown from our NIC. */
4387 @@ -963,6 +971,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
4388 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
4389 !trans_pcie->inta)
4390 iwl_enable_interrupts(trans);
4391 + return IRQ_HANDLED;
4392
4393 none:
4394 /* re-enable interrupts here since we don't have anything to service. */
4395 @@ -1015,7 +1024,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
4396 * If we have something to service, the tasklet will re-enable ints.
4397 * If we *don't* have something, we'll re-enable before leaving here.
4398 */
4399 - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
4400 + inta_mask = iwl_read32(trans, CSR_INT_MASK);
4401 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
4402
4403
4404 diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
4405 index 0c9f70b..786bc11 100644
4406 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c
4407 +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
4408 @@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
4409 */
4410 int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
4411 {
4412 - bool cancel_flag = false;
4413 int status;
4414 struct cmd_ctrl_node *cmd_queued;
4415
4416 @@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
4417 atomic_inc(&adapter->cmd_pending);
4418
4419 /* Wait for completion */
4420 - wait_event_interruptible(adapter->cmd_wait_q.wait,
4421 - *(cmd_queued->condition));
4422 - if (!*(cmd_queued->condition))
4423 - cancel_flag = true;
4424 -
4425 - if (cancel_flag) {
4426 - mwifiex_cancel_pending_ioctl(adapter);
4427 - dev_dbg(adapter->dev, "cmd cancel\n");
4428 + status = wait_event_interruptible(adapter->cmd_wait_q.wait,
4429 + *(cmd_queued->condition));
4430 + if (status) {
4431 + dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
4432 + return status;
4433 }
4434
4435 status = adapter->cmd_wait_q.status;
4436 @@ -480,8 +476,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
4437 return false;
4438 }
4439
4440 - wait_event_interruptible(adapter->hs_activate_wait_q,
4441 - adapter->hs_activate_wait_q_woken);
4442 + if (wait_event_interruptible(adapter->hs_activate_wait_q,
4443 + adapter->hs_activate_wait_q_woken)) {
4444 + dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
4445 + return false;
4446 + }
4447
4448 return true;
4449 }
4450 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
4451 index 59474ae..175a9b9 100644
4452 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
4453 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
4454 @@ -5036,7 +5036,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
4455 IEEE80211_HW_SUPPORTS_PS |
4456 IEEE80211_HW_PS_NULLFUNC_STACK |
4457 IEEE80211_HW_AMPDU_AGGREGATION |
4458 - IEEE80211_HW_REPORTS_TX_ACK_STATUS;
4459 + IEEE80211_HW_REPORTS_TX_ACK_STATUS |
4460 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
4461
4462 /*
4463 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
4464 diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
4465 index 69097d1..ee38e4c 100644
4466 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
4467 +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
4468 @@ -391,10 +391,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
4469 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
4470 tx_info->status.ampdu_len = 1;
4471 tx_info->status.ampdu_ack_len = success ? 1 : 0;
4472 - /*
4473 - * TODO: Need to tear down BA session here
4474 - * if not successful.
4475 - */
4476 +
4477 + if (!success)
4478 + tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
4479 }
4480
4481 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
4482 @@ -1123,6 +1122,9 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
4483 struct ieee80211_iface_limit *if_limit;
4484 struct ieee80211_iface_combination *if_combination;
4485
4486 + if (rt2x00dev->ops->max_ap_intf < 2)
4487 + return;
4488 +
4489 /*
4490 * Build up AP interface limits structure.
4491 */
4492 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
4493 index fc24eb9..472c5c1 100644
4494 --- a/drivers/net/xen-netfront.c
4495 +++ b/drivers/net/xen-netfront.c
4496 @@ -1015,29 +1015,10 @@ err:
4497 i = xennet_fill_frags(np, skb, &tmpq);
4498
4499 /*
4500 - * Truesize approximates the size of true data plus
4501 - * any supervisor overheads. Adding hypervisor
4502 - * overheads has been shown to significantly reduce
4503 - * achievable bandwidth with the default receive
4504 - * buffer size. It is therefore not wise to account
4505 - * for it here.
4506 - *
4507 - * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
4508 - * to RX_COPY_THRESHOLD + the supervisor
4509 - * overheads. Here, we add the size of the data pulled
4510 - * in xennet_fill_frags().
4511 - *
4512 - * We also adjust for any unused space in the main
4513 - * data area by subtracting (RX_COPY_THRESHOLD -
4514 - * len). This is especially important with drivers
4515 - * which split incoming packets into header and data,
4516 - * using only 66 bytes of the main data area (see the
4517 - * e1000 driver for example.) On such systems,
4518 - * without this last adjustement, our achievable
4519 - * receive throughout using the standard receive
4520 - * buffer size was cut by 25%(!!!).
4521 - */
4522 - skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
4523 + * Truesize is the actual allocation size, even if the
4524 + * allocation is only partially used.
4525 + */
4526 + skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
4527 skb->len += skb->data_len;
4528
4529 if (rx->flags & XEN_NETRXF_csum_blank)
4530 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
4531 index dd90d15..71623a2 100644
4532 --- a/drivers/platform/x86/samsung-laptop.c
4533 +++ b/drivers/platform/x86/samsung-laptop.c
4534 @@ -1523,6 +1523,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
4535 },
4536 .driver_data = &samsung_broken_acpi_video,
4537 },
4538 + {
4539 + .callback = samsung_dmi_matched,
4540 + .ident = "N250P",
4541 + .matches = {
4542 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
4543 + DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
4544 + DMI_MATCH(DMI_BOARD_NAME, "N250P"),
4545 + },
4546 + .driver_data = &samsung_broken_acpi_video,
4547 + },
4548 { },
4549 };
4550 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
4551 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
4552 index daaddec..b8ad71f 100644
4553 --- a/drivers/platform/x86/sony-laptop.c
4554 +++ b/drivers/platform/x86/sony-laptop.c
4555 @@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
4556 static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
4557 void *buffer, size_t buflen)
4558 {
4559 + int ret = 0;
4560 size_t len = len;
4561 union acpi_object *object = __call_snc_method(handle, name, value);
4562
4563 if (!object)
4564 return -EINVAL;
4565
4566 - if (object->type == ACPI_TYPE_BUFFER)
4567 + if (object->type == ACPI_TYPE_BUFFER) {
4568 len = MIN(buflen, object->buffer.length);
4569 + memcpy(buffer, object->buffer.pointer, len);
4570
4571 - else if (object->type == ACPI_TYPE_INTEGER)
4572 + } else if (object->type == ACPI_TYPE_INTEGER) {
4573 len = MIN(buflen, sizeof(object->integer.value));
4574 + memcpy(buffer, &object->integer.value, len);
4575
4576 - else {
4577 + } else {
4578 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
4579 ACPI_TYPE_BUFFER, object->type);
4580 - kfree(object);
4581 - return -EINVAL;
4582 + ret = -EINVAL;
4583 }
4584
4585 - memcpy(buffer, object->buffer.pointer, len);
4586 kfree(object);
4587 - return 0;
4588 + return ret;
4589 }
4590
4591 struct sony_nc_handles {
4592 diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
4593 index e39a0c7..70cd467 100644
4594 --- a/drivers/regulator/max8997.c
4595 +++ b/drivers/regulator/max8997.c
4596 @@ -69,26 +69,26 @@ struct voltage_map_desc {
4597 int step;
4598 };
4599
4600 -/* Voltage maps in mV */
4601 +/* Voltage maps in uV */
4602 static const struct voltage_map_desc ldo_voltage_map_desc = {
4603 - .min = 800, .max = 3950, .step = 50,
4604 + .min = 800000, .max = 3950000, .step = 50000,
4605 }; /* LDO1 ~ 18, 21 all */
4606
4607 static const struct voltage_map_desc buck1245_voltage_map_desc = {
4608 - .min = 650, .max = 2225, .step = 25,
4609 + .min = 650000, .max = 2225000, .step = 25000,
4610 }; /* Buck1, 2, 4, 5 */
4611
4612 static const struct voltage_map_desc buck37_voltage_map_desc = {
4613 - .min = 750, .max = 3900, .step = 50,
4614 + .min = 750000, .max = 3900000, .step = 50000,
4615 }; /* Buck3, 7 */
4616
4617 -/* current map in mA */
4618 +/* current map in uA */
4619 static const struct voltage_map_desc charger_current_map_desc = {
4620 - .min = 200, .max = 950, .step = 50,
4621 + .min = 200000, .max = 950000, .step = 50000,
4622 };
4623
4624 static const struct voltage_map_desc topoff_current_map_desc = {
4625 - .min = 50, .max = 200, .step = 10,
4626 + .min = 50000, .max = 200000, .step = 10000,
4627 };
4628
4629 static const struct voltage_map_desc *reg_voltage_map[] = {
4630 @@ -192,7 +192,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
4631 if (val > desc->max)
4632 return -EINVAL;
4633
4634 - return val * 1000;
4635 + return val;
4636 }
4637
4638 static int max8997_get_enable_register(struct regulator_dev *rdev,
4639 @@ -483,7 +483,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
4640 {
4641 struct max8997_data *max8997 = rdev_get_drvdata(rdev);
4642 struct i2c_client *i2c = max8997->iodev->i2c;
4643 - int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
4644 const struct voltage_map_desc *desc;
4645 int rid = rdev_get_id(rdev);
4646 int i, reg, shift, mask, ret;
4647 @@ -507,7 +506,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
4648
4649 desc = reg_voltage_map[rid];
4650
4651 - i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
4652 + i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
4653 if (i < 0)
4654 return i;
4655
4656 @@ -555,7 +554,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
4657 case MAX8997_BUCK4:
4658 case MAX8997_BUCK5:
4659 return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
4660 - max8997->ramp_delay);
4661 + max8997->ramp_delay * 1000);
4662 }
4663
4664 return 0;
4665 @@ -654,7 +653,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
4666 const struct voltage_map_desc *desc;
4667 int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
4668 bool gpio_dvs_mode = false;
4669 - int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
4670
4671 if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
4672 return -EINVAL;
4673 @@ -679,7 +677,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
4674 selector);
4675
4676 desc = reg_voltage_map[rid];
4677 - new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
4678 + new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
4679 if (new_val < 0)
4680 return new_val;
4681
4682 @@ -977,8 +975,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4683 max8997->buck1_vol[i] = ret =
4684 max8997_get_voltage_proper_val(
4685 &buck1245_voltage_map_desc,
4686 - pdata->buck1_voltage[i] / 1000,
4687 - pdata->buck1_voltage[i] / 1000 +
4688 + pdata->buck1_voltage[i],
4689 + pdata->buck1_voltage[i] +
4690 buck1245_voltage_map_desc.step);
4691 if (ret < 0)
4692 goto err_out;
4693 @@ -986,8 +984,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4694 max8997->buck2_vol[i] = ret =
4695 max8997_get_voltage_proper_val(
4696 &buck1245_voltage_map_desc,
4697 - pdata->buck2_voltage[i] / 1000,
4698 - pdata->buck2_voltage[i] / 1000 +
4699 + pdata->buck2_voltage[i],
4700 + pdata->buck2_voltage[i] +
4701 buck1245_voltage_map_desc.step);
4702 if (ret < 0)
4703 goto err_out;
4704 @@ -995,8 +993,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
4705 max8997->buck5_vol[i] = ret =
4706 max8997_get_voltage_proper_val(
4707 &buck1245_voltage_map_desc,
4708 - pdata->buck5_voltage[i] / 1000,
4709 - pdata->buck5_voltage[i] / 1000 +
4710 + pdata->buck5_voltage[i],
4711 + pdata->buck5_voltage[i] +
4712 buck1245_voltage_map_desc.step);
4713 if (ret < 0)
4714 goto err_out;
4715 diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
4716 index 5dfa920..6a20019 100644
4717 --- a/drivers/regulator/max8998.c
4718 +++ b/drivers/regulator/max8998.c
4719 @@ -51,39 +51,39 @@ struct voltage_map_desc {
4720 int step;
4721 };
4722
4723 -/* Voltage maps */
4724 +/* Voltage maps in uV*/
4725 static const struct voltage_map_desc ldo23_voltage_map_desc = {
4726 - .min = 800, .step = 50, .max = 1300,
4727 + .min = 800000, .step = 50000, .max = 1300000,
4728 };
4729 static const struct voltage_map_desc ldo456711_voltage_map_desc = {
4730 - .min = 1600, .step = 100, .max = 3600,
4731 + .min = 1600000, .step = 100000, .max = 3600000,
4732 };
4733 static const struct voltage_map_desc ldo8_voltage_map_desc = {
4734 - .min = 3000, .step = 100, .max = 3600,
4735 + .min = 3000000, .step = 100000, .max = 3600000,
4736 };
4737 static const struct voltage_map_desc ldo9_voltage_map_desc = {
4738 - .min = 2800, .step = 100, .max = 3100,
4739 + .min = 2800000, .step = 100000, .max = 3100000,
4740 };
4741 static const struct voltage_map_desc ldo10_voltage_map_desc = {
4742 - .min = 950, .step = 50, .max = 1300,
4743 + .min = 95000, .step = 50000, .max = 1300000,
4744 };
4745 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
4746 - .min = 800, .step = 100, .max = 3300,
4747 + .min = 800000, .step = 100000, .max = 3300000,
4748 };
4749 static const struct voltage_map_desc ldo1415_voltage_map_desc = {
4750 - .min = 1200, .step = 100, .max = 3300,
4751 + .min = 1200000, .step = 100000, .max = 3300000,
4752 };
4753 static const struct voltage_map_desc ldo1617_voltage_map_desc = {
4754 - .min = 1600, .step = 100, .max = 3600,
4755 + .min = 1600000, .step = 100000, .max = 3600000,
4756 };
4757 static const struct voltage_map_desc buck12_voltage_map_desc = {
4758 - .min = 750, .step = 25, .max = 1525,
4759 + .min = 750000, .step = 25000, .max = 1525000,
4760 };
4761 static const struct voltage_map_desc buck3_voltage_map_desc = {
4762 - .min = 1600, .step = 100, .max = 3600,
4763 + .min = 1600000, .step = 100000, .max = 3600000,
4764 };
4765 static const struct voltage_map_desc buck4_voltage_map_desc = {
4766 - .min = 800, .step = 100, .max = 2300,
4767 + .min = 800000, .step = 100000, .max = 2300000,
4768 };
4769
4770 static const struct voltage_map_desc *ldo_voltage_map[] = {
4771 @@ -445,9 +445,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
4772 if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
4773 return 0;
4774
4775 - difference = (new_selector - old_selector) * desc->step;
4776 + difference = (new_selector - old_selector) * desc->step / 1000;
4777 if (difference > 0)
4778 - return difference / ((val & 0x0f) + 1);
4779 + return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
4780
4781 return 0;
4782 }
4783 @@ -702,7 +702,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4784 i = 0;
4785 while (buck12_voltage_map_desc.min +
4786 buck12_voltage_map_desc.step*i
4787 - < (pdata->buck1_voltage1 / 1000))
4788 + < pdata->buck1_voltage1)
4789 i++;
4790 max8998->buck1_vol[0] = i;
4791 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
4792 @@ -713,7 +713,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4793 i = 0;
4794 while (buck12_voltage_map_desc.min +
4795 buck12_voltage_map_desc.step*i
4796 - < (pdata->buck1_voltage2 / 1000))
4797 + < pdata->buck1_voltage2)
4798 i++;
4799
4800 max8998->buck1_vol[1] = i;
4801 @@ -725,7 +725,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4802 i = 0;
4803 while (buck12_voltage_map_desc.min +
4804 buck12_voltage_map_desc.step*i
4805 - < (pdata->buck1_voltage3 / 1000))
4806 + < pdata->buck1_voltage3)
4807 i++;
4808
4809 max8998->buck1_vol[2] = i;
4810 @@ -737,7 +737,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4811 i = 0;
4812 while (buck12_voltage_map_desc.min +
4813 buck12_voltage_map_desc.step*i
4814 - < (pdata->buck1_voltage4 / 1000))
4815 + < pdata->buck1_voltage4)
4816 i++;
4817
4818 max8998->buck1_vol[3] = i;
4819 @@ -763,7 +763,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4820 i = 0;
4821 while (buck12_voltage_map_desc.min +
4822 buck12_voltage_map_desc.step*i
4823 - < (pdata->buck2_voltage1 / 1000))
4824 + < pdata->buck2_voltage1)
4825 i++;
4826 max8998->buck2_vol[0] = i;
4827 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
4828 @@ -774,7 +774,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4829 i = 0;
4830 while (buck12_voltage_map_desc.min +
4831 buck12_voltage_map_desc.step*i
4832 - < (pdata->buck2_voltage2 / 1000))
4833 + < pdata->buck2_voltage2)
4834 i++;
4835 max8998->buck2_vol[1] = i;
4836 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
4837 @@ -792,8 +792,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
4838 int count = (desc->max - desc->min) / desc->step + 1;
4839
4840 regulators[index].n_voltages = count;
4841 - regulators[index].min_uV = desc->min * 1000;
4842 - regulators[index].uV_step = desc->step * 1000;
4843 + regulators[index].min_uV = desc->min;
4844 + regulators[index].uV_step = desc->step;
4845 }
4846
4847 config.dev = max8998->dev;
4848 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
4849 index 926f9c8..3fd1b88 100644
4850 --- a/drivers/regulator/s2mps11.c
4851 +++ b/drivers/regulator/s2mps11.c
4852 @@ -269,16 +269,16 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
4853
4854 if (ramp_enable) {
4855 if (s2mps11->buck2_ramp)
4856 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6;
4857 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) << 6;
4858 if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
4859 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4;
4860 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) << 4;
4861 sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
4862 }
4863
4864 ramp_reg &= 0x00;
4865 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6;
4866 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4;
4867 - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2;
4868 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) << 6;
4869 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) << 4;
4870 + ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) << 2;
4871 ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
4872 sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
4873
4874 diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
4875 index 782c228..416fe0a 100644
4876 --- a/drivers/regulator/wm831x-dcdc.c
4877 +++ b/drivers/regulator/wm831x-dcdc.c
4878 @@ -290,7 +290,7 @@ static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
4879 if (vsel > dcdc->dvs_vsel) {
4880 ret = wm831x_set_bits(wm831x, dvs_reg,
4881 WM831X_DC1_DVS_VSEL_MASK,
4882 - dcdc->dvs_vsel);
4883 + vsel);
4884 if (ret == 0)
4885 dcdc->dvs_vsel = vsel;
4886 else
4887 diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
4888 index 368368f..908d287 100644
4889 --- a/drivers/s390/cio/device_pgid.c
4890 +++ b/drivers/s390/cio/device_pgid.c
4891 @@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
4892 * Determine pathgroup state from PGID data.
4893 */
4894 static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
4895 - int *mismatch, int *reserved, u8 *reset)
4896 + int *mismatch, u8 *reserved, u8 *reset)
4897 {
4898 struct pgid *pgid = &cdev->private->pgid[0];
4899 struct pgid *first = NULL;
4900 @@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
4901 if ((cdev->private->pgid_valid_mask & lpm) == 0)
4902 continue;
4903 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
4904 - *reserved = 1;
4905 + *reserved |= lpm;
4906 if (pgid_is_reset(pgid)) {
4907 *reset |= lpm;
4908 continue;
4909 @@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
4910 struct subchannel *sch = to_subchannel(cdev->dev.parent);
4911 struct pgid *pgid;
4912 int mismatch = 0;
4913 - int reserved = 0;
4914 + u8 reserved = 0;
4915 u8 reset = 0;
4916 u8 donepm;
4917
4918 if (rc)
4919 goto out;
4920 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
4921 - if (reserved)
4922 + if (reserved == cdev->private->pgid_valid_mask)
4923 rc = -EUSERS;
4924 else if (mismatch)
4925 rc = -EOPNOTSUPP;
4926 @@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
4927 }
4928 out:
4929 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
4930 - "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
4931 + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
4932 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
4933 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
4934 switch (rc) {
4935 diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
4936 index 8f7eb4f..487aa6f 100644
4937 --- a/drivers/scsi/mvsas/mv_94xx.h
4938 +++ b/drivers/scsi/mvsas/mv_94xx.h
4939 @@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
4940 #define SPI_ADDR_VLD_94XX (1U << 1)
4941 #define SPI_CTRL_SpiStart_94XX (1U << 0)
4942
4943 -#define mv_ffc(x) ffz(x)
4944 -
4945 static inline int
4946 mv_ffc64(u64 v)
4947 {
4948 - int i;
4949 - i = mv_ffc((u32)v);
4950 - if (i >= 0)
4951 - return i;
4952 - i = mv_ffc((u32)(v>>32));
4953 -
4954 - if (i != 0)
4955 - return 32 + i;
4956 -
4957 - return -1;
4958 + u64 x = ~v;
4959 + return x ? __ffs64(x) : -1;
4960 }
4961
4962 #define r_reg_set_enable(i) \
4963 diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
4964 index c04a4f5..da24955 100644
4965 --- a/drivers/scsi/mvsas/mv_sas.h
4966 +++ b/drivers/scsi/mvsas/mv_sas.h
4967 @@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
4968 #define DEV_IS_EXPANDER(type) \
4969 ((type == EDGE_DEV) || (type == FANOUT_DEV))
4970
4971 -#define bit(n) ((u32)1 << n)
4972 +#define bit(n) ((u64)1 << n)
4973
4974 #define for_each_phy(__lseq_mask, __mc, __lseq) \
4975 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
4976 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
4977 index 1c28215..83d7984 100644
4978 --- a/drivers/scsi/qla2xxx/qla_attr.c
4979 +++ b/drivers/scsi/qla2xxx/qla_attr.c
4980 @@ -1615,8 +1615,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
4981 * At this point all fcport's software-states are cleared. Perform any
4982 * final cleanup of firmware resources (PCBs and XCBs).
4983 */
4984 - if (fcport->loop_id != FC_NO_LOOP_ID &&
4985 - !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
4986 + if (fcport->loop_id != FC_NO_LOOP_ID) {
4987 if (IS_FWI2_CAPABLE(fcport->vha->hw))
4988 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
4989 fcport->loop_id, fcport->d_id.b.domain,
4990 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
4991 index 2f9bddd..9f34ded 100644
4992 --- a/drivers/scsi/qla2xxx/qla_bsg.c
4993 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
4994 @@ -219,7 +219,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
4995 break;
4996 }
4997 exit_fcp_prio_cfg:
4998 - bsg_job->job_done(bsg_job);
4999 + if (!ret)
5000 + bsg_job->job_done(bsg_job);
5001 return ret;
5002 }
5003
5004 @@ -741,9 +742,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5005 if (qla81xx_get_port_config(vha, config)) {
5006 ql_log(ql_log_warn, vha, 0x701f,
5007 "Get port config failed.\n");
5008 - bsg_job->reply->result = (DID_ERROR << 16);
5009 rval = -EPERM;
5010 - goto done_free_dma_req;
5011 + goto done_free_dma_rsp;
5012 }
5013
5014 ql_dbg(ql_dbg_user, vha, 0x70c0,
5015 @@ -761,9 +761,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5016 new_config, elreq.options);
5017
5018 if (rval) {
5019 - bsg_job->reply->result = (DID_ERROR << 16);
5020 rval = -EPERM;
5021 - goto done_free_dma_req;
5022 + goto done_free_dma_rsp;
5023 }
5024
5025 type = "FC_BSG_HST_VENDOR_LOOPBACK";
5026 @@ -795,9 +794,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5027 "MPI reset failed.\n");
5028 }
5029
5030 - bsg_job->reply->result = (DID_ERROR << 16);
5031 rval = -EIO;
5032 - goto done_free_dma_req;
5033 + goto done_free_dma_rsp;
5034 }
5035 } else {
5036 type = "FC_BSG_HST_VENDOR_LOOPBACK";
5037 @@ -812,34 +810,27 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
5038 ql_log(ql_log_warn, vha, 0x702c,
5039 "Vendor request %s failed.\n", type);
5040
5041 - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5042 - sizeof(struct fc_bsg_reply);
5043 -
5044 - memcpy(fw_sts_ptr, response, sizeof(response));
5045 - fw_sts_ptr += sizeof(response);
5046 - *fw_sts_ptr = command_sent;
5047 rval = 0;
5048 bsg_job->reply->result = (DID_ERROR << 16);
5049 + bsg_job->reply->reply_payload_rcv_len = 0;
5050 } else {
5051 ql_dbg(ql_dbg_user, vha, 0x702d,
5052 "Vendor request %s completed.\n", type);
5053 -
5054 - bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5055 - sizeof(response) + sizeof(uint8_t);
5056 - bsg_job->reply->reply_payload_rcv_len =
5057 - bsg_job->reply_payload.payload_len;
5058 - fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5059 - sizeof(struct fc_bsg_reply);
5060 - memcpy(fw_sts_ptr, response, sizeof(response));
5061 - fw_sts_ptr += sizeof(response);
5062 - *fw_sts_ptr = command_sent;
5063 - bsg_job->reply->result = DID_OK;
5064 + bsg_job->reply->result = (DID_OK << 16);
5065 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
5066 bsg_job->reply_payload.sg_cnt, rsp_data,
5067 rsp_data_len);
5068 }
5069 - bsg_job->job_done(bsg_job);
5070
5071 + bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5072 + sizeof(response) + sizeof(uint8_t);
5073 + fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
5074 + sizeof(struct fc_bsg_reply);
5075 + memcpy(fw_sts_ptr, response, sizeof(response));
5076 + fw_sts_ptr += sizeof(response);
5077 + *fw_sts_ptr = command_sent;
5078 +
5079 +done_free_dma_rsp:
5080 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
5081 rsp_data, rsp_data_dma);
5082 done_free_dma_req:
5083 @@ -853,6 +844,8 @@ done_unmap_req_sg:
5084 dma_unmap_sg(&ha->pdev->dev,
5085 bsg_job->request_payload.sg_list,
5086 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
5087 + if (!rval)
5088 + bsg_job->job_done(bsg_job);
5089 return rval;
5090 }
5091
5092 @@ -877,16 +870,15 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
5093 if (rval) {
5094 ql_log(ql_log_warn, vha, 0x7030,
5095 "Vendor request 84xx reset failed.\n");
5096 - rval = 0;
5097 - bsg_job->reply->result = (DID_ERROR << 16);
5098 + rval = (DID_ERROR << 16);
5099
5100 } else {
5101 ql_dbg(ql_dbg_user, vha, 0x7031,
5102 "Vendor request 84xx reset completed.\n");
5103 bsg_job->reply->result = DID_OK;
5104 + bsg_job->job_done(bsg_job);
5105 }
5106
5107 - bsg_job->job_done(bsg_job);
5108 return rval;
5109 }
5110
5111 @@ -976,8 +968,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
5112 ql_log(ql_log_warn, vha, 0x7037,
5113 "Vendor request 84xx updatefw failed.\n");
5114
5115 - rval = 0;
5116 - bsg_job->reply->result = (DID_ERROR << 16);
5117 + rval = (DID_ERROR << 16);
5118 } else {
5119 ql_dbg(ql_dbg_user, vha, 0x7038,
5120 "Vendor request 84xx updatefw completed.\n");
5121 @@ -986,7 +977,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
5122 bsg_job->reply->result = DID_OK;
5123 }
5124
5125 - bsg_job->job_done(bsg_job);
5126 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
5127
5128 done_free_fw_buf:
5129 @@ -996,6 +986,8 @@ done_unmap_sg:
5130 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
5131 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
5132
5133 + if (!rval)
5134 + bsg_job->job_done(bsg_job);
5135 return rval;
5136 }
5137
5138 @@ -1163,8 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
5139 ql_log(ql_log_warn, vha, 0x7043,
5140 "Vendor request 84xx mgmt failed.\n");
5141
5142 - rval = 0;
5143 - bsg_job->reply->result = (DID_ERROR << 16);
5144 + rval = (DID_ERROR << 16);
5145
5146 } else {
5147 ql_dbg(ql_dbg_user, vha, 0x7044,
5148 @@ -1184,8 +1175,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
5149 }
5150 }
5151
5152 - bsg_job->job_done(bsg_job);
5153 -
5154 done_unmap_sg:
5155 if (mgmt_b)
5156 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
5157 @@ -1200,6 +1189,8 @@ done_unmap_sg:
5158 exit_mgmt:
5159 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
5160
5161 + if (!rval)
5162 + bsg_job->job_done(bsg_job);
5163 return rval;
5164 }
5165
5166 @@ -1276,9 +1267,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
5167 fcport->port_name[3], fcport->port_name[4],
5168 fcport->port_name[5], fcport->port_name[6],
5169 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
5170 - rval = 0;
5171 - bsg_job->reply->result = (DID_ERROR << 16);
5172 -
5173 + rval = (DID_ERROR << 16);
5174 } else {
5175 if (!port_param->mode) {
5176 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
5177 @@ -1292,9 +1281,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
5178 }
5179
5180 bsg_job->reply->result = DID_OK;
5181 + bsg_job->job_done(bsg_job);
5182 }
5183
5184 - bsg_job->job_done(bsg_job);
5185 return rval;
5186 }
5187
5188 @@ -1887,8 +1876,6 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
5189 return qla24xx_process_bidir_cmd(bsg_job);
5190
5191 default:
5192 - bsg_job->reply->result = (DID_ERROR << 16);
5193 - bsg_job->job_done(bsg_job);
5194 return -ENOSYS;
5195 }
5196 }
5197 @@ -1919,8 +1906,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
5198 ql_dbg(ql_dbg_user, vha, 0x709f,
5199 "BSG: ISP abort active/needed -- cmd=%d.\n",
5200 bsg_job->request->msgcode);
5201 - bsg_job->reply->result = (DID_ERROR << 16);
5202 - bsg_job->job_done(bsg_job);
5203 return -EBUSY;
5204 }
5205
5206 @@ -1943,7 +1928,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
5207 case FC_BSG_RPT_CT:
5208 default:
5209 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
5210 - bsg_job->reply->result = ret;
5211 break;
5212 }
5213 return ret;
5214 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
5215 index d501bf5..f4b1fc8 100644
5216 --- a/drivers/scsi/qla2xxx/qla_os.c
5217 +++ b/drivers/scsi/qla2xxx/qla_os.c
5218 @@ -2755,6 +2755,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
5219
5220 ha->flags.host_shutting_down = 1;
5221
5222 + set_bit(UNLOADING, &base_vha->dpc_flags);
5223 mutex_lock(&ha->vport_lock);
5224 while (ha->cur_vport_count) {
5225 struct Scsi_Host *scsi_host;
5226 @@ -2784,8 +2785,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
5227 "Error while clearing DRV-Presence.\n");
5228 }
5229
5230 - set_bit(UNLOADING, &base_vha->dpc_flags);
5231 -
5232 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
5233
5234 qla2x00_dfs_remove(base_vha);
5235 @@ -4505,9 +4504,9 @@ qla2x00_do_dpc(void *data)
5236 "ISP abort end.\n");
5237 }
5238
5239 - if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
5240 + if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
5241 + &base_vha->dpc_flags)) {
5242 qla2x00_update_fcports(base_vha);
5243 - clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
5244 }
5245
5246 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
5247 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
5248 index 62aa558..661d33e 100644
5249 --- a/drivers/scsi/qla2xxx/qla_target.c
5250 +++ b/drivers/scsi/qla2xxx/qla_target.c
5251 @@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
5252 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
5253 {
5254 struct qla_hw_data *ha = vha->hw;
5255 + struct se_session *se_sess = sess->se_sess;
5256 struct qla_tgt_mgmt_cmd *mcmd;
5257 + struct se_cmd *se_cmd;
5258 + u32 lun = 0;
5259 int rc;
5260 + bool found_lun = false;
5261 +
5262 + spin_lock(&se_sess->sess_cmd_lock);
5263 + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
5264 + struct qla_tgt_cmd *cmd =
5265 + container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
5266 + if (cmd->tag == abts->exchange_addr_to_abort) {
5267 + lun = cmd->unpacked_lun;
5268 + found_lun = true;
5269 + break;
5270 + }
5271 + }
5272 + spin_unlock(&se_sess->sess_cmd_lock);
5273 +
5274 + if (!found_lun)
5275 + return -ENOENT;
5276
5277 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
5278 "qla_target(%d): task abort (tag=%d)\n",
5279 @@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
5280 mcmd->sess = sess;
5281 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
5282
5283 - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
5284 + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
5285 abts->exchange_addr_to_abort);
5286 if (rc != 0) {
5287 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
5288 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
5289 index ce5224c..931a7d9 100644
5290 --- a/drivers/scsi/scsi_sysfs.c
5291 +++ b/drivers/scsi/scsi_sysfs.c
5292 @@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
5293
5294 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
5295
5296 -static int check_reset_type(char *str)
5297 +static int check_reset_type(const char *str)
5298 {
5299 - if (strncmp(str, "adapter", 10) == 0)
5300 + if (sysfs_streq(str, "adapter"))
5301 return SCSI_ADAPTER_RESET;
5302 - else if (strncmp(str, "firmware", 10) == 0)
5303 + else if (sysfs_streq(str, "firmware"))
5304 return SCSI_FIRMWARE_RESET;
5305 else
5306 return 0;
5307 @@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
5308 struct Scsi_Host *shost = class_to_shost(dev);
5309 struct scsi_host_template *sht = shost->hostt;
5310 int ret = -EINVAL;
5311 - char str[10];
5312 int type;
5313
5314 - sscanf(buf, "%s", str);
5315 - type = check_reset_type(str);
5316 -
5317 + type = check_reset_type(buf);
5318 if (!type)
5319 goto exit_store_host_reset;
5320
5321 diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
5322 index 2093403..3464d14 100644
5323 --- a/drivers/staging/comedi/Kconfig
5324 +++ b/drivers/staging/comedi/Kconfig
5325 @@ -444,6 +444,7 @@ config COMEDI_ADQ12B
5326
5327 config COMEDI_NI_AT_A2150
5328 tristate "NI AT-A2150 ISA card support"
5329 + select COMEDI_FC
5330 depends on VIRT_TO_BUS
5331 ---help---
5332 Enable support for National Instruments AT-A2150 cards
5333 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
5334 index c2a32cf..1ab8037 100644
5335 --- a/drivers/staging/comedi/comedi_fops.c
5336 +++ b/drivers/staging/comedi/comedi_fops.c
5337 @@ -1546,6 +1546,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
5338 if (cmd == COMEDI_DEVCONFIG) {
5339 rc = do_devconfig_ioctl(dev,
5340 (struct comedi_devconfig __user *)arg);
5341 + if (rc == 0)
5342 + /* Evade comedi_auto_unconfig(). */
5343 + dev_file_info->hardware_device = NULL;
5344 goto done;
5345 }
5346
5347 diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
5348 index 7817def..ec7cf62 100644
5349 --- a/drivers/staging/comedi/drivers/comedi_test.c
5350 +++ b/drivers/staging/comedi/drivers/comedi_test.c
5351 @@ -372,7 +372,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
5352 struct waveform_private *devpriv = dev->private;
5353
5354 devpriv->timer_running = 0;
5355 - del_timer(&devpriv->timer);
5356 + del_timer_sync(&devpriv->timer);
5357 return 0;
5358 }
5359
5360 diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
5361 index f284a90..f3f5478 100644
5362 --- a/drivers/staging/comedi/drivers/ni_pcimio.c
5363 +++ b/drivers/staging/comedi/drivers/ni_pcimio.c
5364 @@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
5365 .ao_range_table = &range_ni_M_625x_ao,
5366 .reg_type = ni_reg_625x,
5367 .ao_unipolar = 0,
5368 - .ao_speed = 357,
5369 + .ao_speed = 350,
5370 .num_p0_dio_channels = 8,
5371 .caldac = {caldac_none},
5372 .has_8255 = 0,
5373 @@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
5374 .ao_range_table = &range_ni_M_625x_ao,
5375 .reg_type = ni_reg_625x,
5376 .ao_unipolar = 0,
5377 - .ao_speed = 357,
5378 + .ao_speed = 350,
5379 .num_p0_dio_channels = 8,
5380 .caldac = {caldac_none},
5381 .has_8255 = 0,
5382 @@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
5383 .ao_range_table = &range_ni_M_625x_ao,
5384 .reg_type = ni_reg_625x,
5385 .ao_unipolar = 0,
5386 - .ao_speed = 357,
5387 + .ao_speed = 350,
5388 .num_p0_dio_channels = 8,
5389 .caldac = {caldac_none},
5390 .has_8255 = 0,
5391 @@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
5392 .ao_range_table = &range_ni_M_625x_ao,
5393 .reg_type = ni_reg_625x,
5394 .ao_unipolar = 0,
5395 - .ao_speed = 357,
5396 + .ao_speed = 350,
5397 .num_p0_dio_channels = 32,
5398 .caldac = {caldac_none},
5399 .has_8255 = 0,
5400 @@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
5401 .ao_range_table = &range_ni_M_625x_ao,
5402 .reg_type = ni_reg_625x,
5403 .ao_unipolar = 0,
5404 - .ao_speed = 357,
5405 + .ao_speed = 350,
5406 .num_p0_dio_channels = 32,
5407 .caldac = {caldac_none},
5408 .has_8255 = 0,
5409 @@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
5410 .ao_range_table = &range_ni_M_628x_ao,
5411 .reg_type = ni_reg_628x,
5412 .ao_unipolar = 1,
5413 - .ao_speed = 357,
5414 + .ao_speed = 350,
5415 .num_p0_dio_channels = 8,
5416 .caldac = {caldac_none},
5417 .has_8255 = 0,
5418 @@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
5419 .ao_range_table = &range_ni_M_628x_ao,
5420 .reg_type = ni_reg_628x,
5421 .ao_unipolar = 1,
5422 - .ao_speed = 357,
5423 + .ao_speed = 350,
5424 .num_p0_dio_channels = 8,
5425 .caldac = {caldac_none},
5426 .has_8255 = 0,
5427 @@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
5428 .ao_range_table = &range_ni_M_628x_ao,
5429 .reg_type = ni_reg_628x,
5430 .ao_unipolar = 1,
5431 - .ao_speed = 357,
5432 + .ao_speed = 350,
5433 .num_p0_dio_channels = 32,
5434 .caldac = {caldac_none},
5435 .has_8255 = 0,
5436 diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/staging/omapdrm/omap_gem_dmabuf.c
5437 index c6f3ef6..784fa4d 100644
5438 --- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
5439 +++ b/drivers/staging/omapdrm/omap_gem_dmabuf.c
5440 @@ -207,7 +207,12 @@ struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
5441 obj = buffer->priv;
5442 /* is it from our device? */
5443 if (obj->dev == dev) {
5444 + /*
5445 + * Importing dmabuf exported from out own gem increases
5446 + * refcount on gem itself instead of f_count of dmabuf.
5447 + */
5448 drm_gem_object_reference(obj);
5449 + dma_buf_put(buffer);
5450 return obj;
5451 }
5452 }
5453 diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
5454 index 6b73843..a96cd06 100644
5455 --- a/drivers/staging/rtl8712/usb_intf.c
5456 +++ b/drivers/staging/rtl8712/usb_intf.c
5457 @@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
5458 {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
5459 /* Belkin */
5460 {USB_DEVICE(0x050D, 0x945A)},
5461 + /* ISY IWL - Belkin clone */
5462 + {USB_DEVICE(0x050D, 0x11F1)},
5463 /* Corega */
5464 {USB_DEVICE(0x07AA, 0x0047)},
5465 /* D-Link */
5466 diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
5467 index df95337..7616f05 100644
5468 --- a/drivers/staging/speakup/synth.c
5469 +++ b/drivers/staging/speakup/synth.c
5470 @@ -342,7 +342,7 @@ int synth_init(char *synth_name)
5471
5472 mutex_lock(&spk_mutex);
5473 /* First, check if we already have it loaded. */
5474 - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
5475 + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
5476 if (strcmp(synths[i]->name, synth_name) == 0)
5477 synth = synths[i];
5478
5479 @@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
5480 int i;
5481 int status = 0;
5482 mutex_lock(&spk_mutex);
5483 - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
5484 + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
5485 /* synth_remove() is responsible for rotating the array down */
5486 if (in_synth == synths[i]) {
5487 mutex_unlock(&spk_mutex);
5488 diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
5489 index 28edf9e..16a229d 100644
5490 --- a/drivers/staging/vt6656/dpc.c
5491 +++ b/drivers/staging/vt6656/dpc.c
5492 @@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption (
5493
5494 PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
5495 *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
5496 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
5497 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
5498 if (byDecMode == KEY_CTL_TKIP) {
5499 *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
5500 } else {
5501 @@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption (
5502
5503 PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
5504 *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
5505 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
5506 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
5507
5508 if (byDecMode == KEY_CTL_TKIP) {
5509 *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
5510 diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
5511 index a61fcb9..bf24adb 100644
5512 --- a/drivers/staging/vt6656/key.c
5513 +++ b/drivers/staging/vt6656/key.c
5514 @@ -223,7 +223,7 @@ BOOL KeybSetKey(
5515 PSKeyManagement pTable,
5516 PBYTE pbyBSSID,
5517 DWORD dwKeyIndex,
5518 - unsigned long uKeyLength,
5519 + u32 uKeyLength,
5520 PQWORD pKeyRSC,
5521 PBYTE pbyKey,
5522 BYTE byKeyDecMode
5523 @@ -235,7 +235,8 @@ BOOL KeybSetKey(
5524 PSKeyItem pKey;
5525 unsigned int uKeyIdx;
5526
5527 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
5528 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5529 + "Enter KeybSetKey: %X\n", dwKeyIndex);
5530
5531 j = (MAX_KEY_TABLE-1);
5532 for (i=0;i<(MAX_KEY_TABLE-1);i++) {
5533 @@ -261,7 +262,9 @@ BOOL KeybSetKey(
5534 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5535 // Group transmit key
5536 pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
5537 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
5538 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5539 + "Group transmit key(R)[%X]: %d\n",
5540 + pTable->KeyTable[i].dwGTKeyIndex, i);
5541 }
5542 pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
5543 pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
5544 @@ -302,9 +305,12 @@ BOOL KeybSetKey(
5545 }
5546 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5547
5548 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
5549 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
5550 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
5551 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
5552 + pKey->dwTSC47_16);
5553 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ",
5554 + pKey->wTSC15_0);
5555 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
5556 + pKey->dwKeyIndex);
5557
5558 return (TRUE);
5559 }
5560 @@ -326,7 +332,9 @@ BOOL KeybSetKey(
5561 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5562 // Group transmit key
5563 pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
5564 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j);
5565 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5566 + "Group transmit key(N)[%X]: %d\n",
5567 + pTable->KeyTable[j].dwGTKeyIndex, j);
5568 }
5569 pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
5570 pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
5571 @@ -367,9 +375,11 @@ BOOL KeybSetKey(
5572 }
5573 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5574
5575 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
5576 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
5577 + pKey->dwTSC47_16);
5578 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
5579 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
5580 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
5581 + pKey->dwKeyIndex);
5582
5583 return (TRUE);
5584 }
5585 @@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
5586 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]);
5587 }
5588 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5589 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
5590 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
5591 + pTable->KeyTable[i].dwGTKeyIndex);
5592
5593 return (TRUE);
5594 }
5595 @@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey(
5596 void *pDeviceHandler,
5597 PSKeyManagement pTable,
5598 DWORD dwKeyIndex,
5599 - unsigned long uKeyLength,
5600 + u32 uKeyLength,
5601 PQWORD pKeyRSC,
5602 PBYTE pbyKey,
5603 BYTE byKeyDecMode
5604 @@ -696,7 +707,10 @@ BOOL KeybSetDefaultKey(
5605 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5606 // Group transmit key
5607 pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
5608 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1);
5609 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5610 + "Group transmit key(R)[%X]: %d\n",
5611 + pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
5612 + MAX_KEY_TABLE-1);
5613
5614 }
5615 pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
5616 @@ -747,9 +761,11 @@ BOOL KeybSetDefaultKey(
5617 }
5618 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
5619
5620 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
5621 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n",
5622 + pKey->dwTSC47_16);
5623 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
5624 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
5625 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
5626 + pKey->dwKeyIndex);
5627
5628 return (TRUE);
5629 }
5630 @@ -775,7 +791,7 @@ BOOL KeybSetAllGroupKey(
5631 void *pDeviceHandler,
5632 PSKeyManagement pTable,
5633 DWORD dwKeyIndex,
5634 - unsigned long uKeyLength,
5635 + u32 uKeyLength,
5636 PQWORD pKeyRSC,
5637 PBYTE pbyKey,
5638 BYTE byKeyDecMode
5639 @@ -787,7 +803,8 @@ BOOL KeybSetAllGroupKey(
5640 PSKeyItem pKey;
5641 unsigned int uKeyIdx;
5642
5643 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
5644 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
5645 + dwKeyIndex);
5646
5647
5648 if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
5649 @@ -804,7 +821,9 @@ BOOL KeybSetAllGroupKey(
5650 if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
5651 // Group transmit key
5652 pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
5653 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
5654 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
5655 + "Group transmit key(R)[%X]: %d\n",
5656 + pTable->KeyTable[i].dwGTKeyIndex, i);
5657
5658 }
5659 pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
5660 diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
5661 index f749c7a..bd35d39 100644
5662 --- a/drivers/staging/vt6656/key.h
5663 +++ b/drivers/staging/vt6656/key.h
5664 @@ -58,7 +58,7 @@
5665 typedef struct tagSKeyItem
5666 {
5667 BOOL bKeyValid;
5668 - unsigned long uKeyLength;
5669 + u32 uKeyLength;
5670 BYTE abyKey[MAX_KEY_LEN];
5671 QWORD KeyRSC;
5672 DWORD dwTSC47_16;
5673 @@ -107,7 +107,7 @@ BOOL KeybSetKey(
5674 PSKeyManagement pTable,
5675 PBYTE pbyBSSID,
5676 DWORD dwKeyIndex,
5677 - unsigned long uKeyLength,
5678 + u32 uKeyLength,
5679 PQWORD pKeyRSC,
5680 PBYTE pbyKey,
5681 BYTE byKeyDecMode
5682 @@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey(
5683 void *pDeviceHandler,
5684 PSKeyManagement pTable,
5685 DWORD dwKeyIndex,
5686 - unsigned long uKeyLength,
5687 + u32 uKeyLength,
5688 PQWORD pKeyRSC,
5689 PBYTE pbyKey,
5690 BYTE byKeyDecMode
5691 @@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey(
5692 void *pDeviceHandler,
5693 PSKeyManagement pTable,
5694 DWORD dwKeyIndex,
5695 - unsigned long uKeyLength,
5696 + u32 uKeyLength,
5697 PQWORD pKeyRSC,
5698 PBYTE pbyKey,
5699 BYTE byKeyDecMode
5700 diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
5701 index af4a29d..8fddc7b 100644
5702 --- a/drivers/staging/vt6656/mac.c
5703 +++ b/drivers/staging/vt6656/mac.c
5704 @@ -260,7 +260,8 @@ BYTE pbyData[24];
5705 dwData1 <<= 16;
5706 dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
5707
5708 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
5709 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
5710 + " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
5711
5712 //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
5713 //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
5714 @@ -277,7 +278,8 @@ BYTE pbyData[24];
5715 dwData2 <<= 8;
5716 dwData2 |= *(pbyAddr+0);
5717
5718 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2);
5719 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
5720 + wOffset, dwData2);
5721
5722 //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
5723 //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
5724 diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
5725 index 593cdc7..74c0598 100644
5726 --- a/drivers/staging/vt6656/rf.c
5727 +++ b/drivers/staging/vt6656/rf.c
5728 @@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr;
5729 return TRUE;
5730 }
5731
5732 + if (uCH == 0)
5733 + return -EINVAL;
5734 +
5735 switch (uRATE) {
5736 case RATE_1M:
5737 case RATE_2M:
5738 diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
5739 index 3390838..5c154e3 100644
5740 --- a/drivers/staging/vt6656/rxtx.c
5741 +++ b/drivers/staging/vt6656/rxtx.c
5742 @@ -375,7 +375,8 @@ s_vFillTxKey (
5743 *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
5744 // Append IV&ExtIV after Mac Header
5745 *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
5746 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
5747 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
5748 + *pdwExtIV);
5749
5750 } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
5751 pTransmitKey->wTSC15_0++;
5752 @@ -1751,7 +1752,8 @@ s_bPacketToWirelessUsb(
5753 MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
5754 dwMIC_Priority = 0;
5755 MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
5756 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
5757 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n",
5758 + dwMICKey0, dwMICKey1);
5759
5760 ///////////////////////////////////////////////////////////////////
5761
5762 @@ -2633,7 +2635,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
5763 MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
5764 dwMIC_Priority = 0;
5765 MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
5766 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
5767 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\
5768 + " %X, %X\n", dwMICKey0, dwMICKey1);
5769
5770 uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
5771
5772 @@ -2653,7 +2656,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
5773
5774 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
5775 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
5776 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
5777 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n",
5778 + *pdwMIC_L, *pdwMIC_R);
5779
5780 }
5781
5782 @@ -3027,7 +3031,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
5783 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
5784 }
5785 else {
5786 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
5787 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
5788 + pTransmitKey->dwKeyIndex);
5789 bNeedEncryption = TRUE;
5790 }
5791 }
5792 @@ -3041,7 +3046,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
5793 if (pDevice->bEnableHostWEP) {
5794 if ((uNodeIndex != 0) &&
5795 (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
5796 - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
5797 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
5798 + pTransmitKey->dwKeyIndex);
5799 bNeedEncryption = TRUE;
5800 }
5801 }
5802 diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
5803 index 8e9450e..dfbf747 100644
5804 --- a/drivers/staging/vt6656/ttype.h
5805 +++ b/drivers/staging/vt6656/ttype.h
5806 @@ -29,6 +29,8 @@
5807 #ifndef __TTYPE_H__
5808 #define __TTYPE_H__
5809
5810 +#include <linux/types.h>
5811 +
5812 /******* Common definitions and typedefs ***********************************/
5813
5814 typedef int BOOL;
5815 @@ -42,17 +44,17 @@ typedef int BOOL;
5816
5817 /****** Simple typedefs ***************************************************/
5818
5819 -typedef unsigned char BYTE; // 8-bit
5820 -typedef unsigned short WORD; // 16-bit
5821 -typedef unsigned long DWORD; // 32-bit
5822 +typedef u8 BYTE;
5823 +typedef u16 WORD;
5824 +typedef u32 DWORD;
5825
5826 // QWORD is for those situation that we want
5827 // an 8-byte-aligned 8 byte long structure
5828 // which is NOT really a floating point number.
5829 typedef union tagUQuadWord {
5830 struct {
5831 - DWORD dwLowDword;
5832 - DWORD dwHighDword;
5833 + u32 dwLowDword;
5834 + u32 dwHighDword;
5835 } u;
5836 double DoNotUseThisField;
5837 } UQuadWord;
5838 @@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit
5839
5840 /****** Common pointer types ***********************************************/
5841
5842 -typedef unsigned long ULONG_PTR; // 32-bit
5843 -typedef unsigned long DWORD_PTR; // 32-bit
5844 +typedef u32 ULONG_PTR;
5845 +typedef u32 DWORD_PTR;
5846
5847 // boolean pointer
5848
5849 diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
5850 index 586fbe1..b854d7e 100644
5851 --- a/drivers/staging/vt6656/wcmd.c
5852 +++ b/drivers/staging/vt6656/wcmd.c
5853 @@ -316,17 +316,19 @@ s_MgrMakeProbeRequest(
5854 return pTxPacket;
5855 }
5856
5857 -void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
5858 +void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
5859 {
5860 - PSDevice pDevice = (PSDevice)hDeviceContext;
5861 + PSDevice pDevice = (PSDevice)hDeviceContext;
5862
5863 - init_timer(&pDevice->sTimerCommand);
5864 - pDevice->sTimerCommand.data = (unsigned long)pDevice;
5865 - pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
5866 - // RUN_AT :1 msec ~= (HZ/1024)
5867 - pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
5868 - add_timer(&pDevice->sTimerCommand);
5869 - return;
5870 + init_timer(&pDevice->sTimerCommand);
5871 +
5872 + pDevice->sTimerCommand.data = (unsigned long)pDevice;
5873 + pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
5874 + pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
5875 +
5876 + add_timer(&pDevice->sTimerCommand);
5877 +
5878 + return;
5879 }
5880
5881 void vRunCommand(void *hDeviceContext)
5882 diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
5883 index 46c2959..c359252 100644
5884 --- a/drivers/staging/vt6656/wpa2.h
5885 +++ b/drivers/staging/vt6656/wpa2.h
5886 @@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo {
5887 } PMKIDInfo, *PPMKIDInfo;
5888
5889 typedef struct tagSPMKIDCache {
5890 - unsigned long BSSIDInfoCount;
5891 - PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
5892 + u32 BSSIDInfoCount;
5893 + PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
5894 } SPMKIDCache, *PSPMKIDCache;
5895
5896
5897 diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
5898 index 6edefde..f2a73bd 100644
5899 --- a/drivers/staging/zram/zram_drv.c
5900 +++ b/drivers/staging/zram/zram_drv.c
5901 @@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
5902 return bvec->bv_len != PAGE_SIZE;
5903 }
5904
5905 -static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5906 - u32 index, int offset, struct bio *bio)
5907 +static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
5908 {
5909 - int ret;
5910 - size_t clen;
5911 - struct page *page;
5912 - unsigned char *user_mem, *cmem, *uncmem = NULL;
5913 -
5914 - page = bvec->bv_page;
5915 -
5916 - if (zram_test_flag(zram, index, ZRAM_ZERO)) {
5917 - handle_zero_page(bvec);
5918 - return 0;
5919 - }
5920 + int ret = LZO_E_OK;
5921 + size_t clen = PAGE_SIZE;
5922 + unsigned char *cmem;
5923 + unsigned long handle = zram->table[index].handle;
5924
5925 - /* Requested page is not present in compressed area */
5926 - if (unlikely(!zram->table[index].handle)) {
5927 - pr_debug("Read before write: sector=%lu, size=%u",
5928 - (ulong)(bio->bi_sector), bio->bi_size);
5929 - handle_zero_page(bvec);
5930 + if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
5931 + memset(mem, 0, PAGE_SIZE);
5932 return 0;
5933 }
5934
5935 - if (is_partial_io(bvec)) {
5936 - /* Use a temporary buffer to decompress the page */
5937 - uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
5938 - if (!uncmem) {
5939 - pr_info("Error allocating temp memory!\n");
5940 - return -ENOMEM;
5941 - }
5942 - }
5943 -
5944 - user_mem = kmap_atomic(page);
5945 - if (!is_partial_io(bvec))
5946 - uncmem = user_mem;
5947 - clen = PAGE_SIZE;
5948 -
5949 - cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
5950 - ZS_MM_RO);
5951 -
5952 - if (zram->table[index].size == PAGE_SIZE) {
5953 - memcpy(uncmem, cmem, PAGE_SIZE);
5954 - ret = LZO_E_OK;
5955 - } else {
5956 + cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
5957 + if (zram->table[index].size == PAGE_SIZE)
5958 + memcpy(mem, cmem, PAGE_SIZE);
5959 + else
5960 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
5961 - uncmem, &clen);
5962 - }
5963 -
5964 - if (is_partial_io(bvec)) {
5965 - memcpy(user_mem + bvec->bv_offset, uncmem + offset,
5966 - bvec->bv_len);
5967 - kfree(uncmem);
5968 - }
5969 -
5970 - zs_unmap_object(zram->mem_pool, zram->table[index].handle);
5971 - kunmap_atomic(user_mem);
5972 + mem, &clen);
5973 + zs_unmap_object(zram->mem_pool, handle);
5974
5975 /* Should NEVER happen. Return bio error if it does. */
5976 if (unlikely(ret != LZO_E_OK)) {
5977 @@ -247,42 +210,62 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5978 return ret;
5979 }
5980
5981 - flush_dcache_page(page);
5982 -
5983 return 0;
5984 }
5985
5986 -static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
5987 +static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
5988 + u32 index, int offset, struct bio *bio)
5989 {
5990 int ret;
5991 - size_t clen = PAGE_SIZE;
5992 - unsigned char *cmem;
5993 - unsigned long handle = zram->table[index].handle;
5994 + struct page *page;
5995 + unsigned char *user_mem, *uncmem = NULL;
5996
5997 - if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
5998 - memset(mem, 0, PAGE_SIZE);
5999 + page = bvec->bv_page;
6000 +
6001 + if (unlikely(!zram->table[index].handle) ||
6002 + zram_test_flag(zram, index, ZRAM_ZERO)) {
6003 + handle_zero_page(bvec);
6004 return 0;
6005 }
6006
6007 - cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
6008 - ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
6009 - mem, &clen);
6010 - zs_unmap_object(zram->mem_pool, handle);
6011 + user_mem = kmap_atomic(page);
6012 + if (is_partial_io(bvec))
6013 + /* Use a temporary buffer to decompress the page */
6014 + uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
6015 + else
6016 + uncmem = user_mem;
6017 +
6018 + if (!uncmem) {
6019 + pr_info("Unable to allocate temp memory\n");
6020 + ret = -ENOMEM;
6021 + goto out_cleanup;
6022 + }
6023
6024 + ret = zram_decompress_page(zram, uncmem, index);
6025 /* Should NEVER happen. Return bio error if it does. */
6026 if (unlikely(ret != LZO_E_OK)) {
6027 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
6028 zram_stat64_inc(zram, &zram->stats.failed_reads);
6029 - return ret;
6030 + goto out_cleanup;
6031 }
6032
6033 - return 0;
6034 + if (is_partial_io(bvec))
6035 + memcpy(user_mem + bvec->bv_offset, uncmem + offset,
6036 + bvec->bv_len);
6037 +
6038 + flush_dcache_page(page);
6039 + ret = 0;
6040 +out_cleanup:
6041 + kunmap_atomic(user_mem);
6042 + if (is_partial_io(bvec))
6043 + kfree(uncmem);
6044 + return ret;
6045 }
6046
6047 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6048 int offset)
6049 {
6050 - int ret;
6051 + int ret = 0;
6052 size_t clen;
6053 unsigned long handle;
6054 struct page *page;
6055 @@ -302,11 +285,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6056 ret = -ENOMEM;
6057 goto out;
6058 }
6059 - ret = zram_read_before_write(zram, uncmem, index);
6060 - if (ret) {
6061 - kfree(uncmem);
6062 + ret = zram_decompress_page(zram, uncmem, index);
6063 + if (ret)
6064 goto out;
6065 - }
6066 }
6067
6068 /*
6069 @@ -319,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6070
6071 user_mem = kmap_atomic(page);
6072
6073 - if (is_partial_io(bvec))
6074 + if (is_partial_io(bvec)) {
6075 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
6076 bvec->bv_len);
6077 - else
6078 + kunmap_atomic(user_mem);
6079 + user_mem = NULL;
6080 + } else {
6081 uncmem = user_mem;
6082 + }
6083
6084 if (page_zero_filled(uncmem)) {
6085 - kunmap_atomic(user_mem);
6086 - if (is_partial_io(bvec))
6087 - kfree(uncmem);
6088 + if (!is_partial_io(bvec))
6089 + kunmap_atomic(user_mem);
6090 zram_stat_inc(&zram->stats.pages_zero);
6091 zram_set_flag(zram, index, ZRAM_ZERO);
6092 ret = 0;
6093 @@ -338,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6094 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
6095 zram->compress_workmem);
6096
6097 - kunmap_atomic(user_mem);
6098 - if (is_partial_io(bvec))
6099 - kfree(uncmem);
6100 + if (!is_partial_io(bvec)) {
6101 + kunmap_atomic(user_mem);
6102 + user_mem = NULL;
6103 + uncmem = NULL;
6104 + }
6105
6106 if (unlikely(ret != LZO_E_OK)) {
6107 pr_err("Compression failed! err=%d\n", ret);
6108 @@ -349,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6109
6110 if (unlikely(clen > max_zpage_size)) {
6111 zram_stat_inc(&zram->stats.bad_compress);
6112 - src = uncmem;
6113 clen = PAGE_SIZE;
6114 + src = NULL;
6115 + if (is_partial_io(bvec))
6116 + src = uncmem;
6117 }
6118
6119 handle = zs_malloc(zram->mem_pool, clen);
6120 @@ -362,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6121 }
6122 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
6123
6124 + if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
6125 + src = kmap_atomic(page);
6126 memcpy(cmem, src, clen);
6127 + if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
6128 + kunmap_atomic(src);
6129
6130 zs_unmap_object(zram->mem_pool, handle);
6131
6132 @@ -375,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
6133 if (clen <= PAGE_SIZE / 2)
6134 zram_stat_inc(&zram->stats.good_compress);
6135
6136 - return 0;
6137 -
6138 out:
6139 + if (is_partial_io(bvec))
6140 + kfree(uncmem);
6141 +
6142 if (ret)
6143 zram_stat64_inc(zram, &zram->stats.failed_writes);
6144 return ret;
6145 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
6146 index 035c2c7..bb34855 100644
6147 --- a/drivers/target/iscsi/iscsi_target.c
6148 +++ b/drivers/target/iscsi/iscsi_target.c
6149 @@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
6150 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
6151 spin_lock(&cmd->istate_lock);
6152 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
6153 - (cmd->stat_sn < exp_statsn)) {
6154 + iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
6155 cmd->i_state = ISTATE_REMOVE;
6156 spin_unlock(&cmd->istate_lock);
6157 iscsit_add_cmd_to_immediate_queue(cmd, conn,
6158 @@ -2360,7 +2360,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
6159 if (!conn_p)
6160 return;
6161
6162 - cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
6163 + cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
6164 if (!cmd) {
6165 iscsit_dec_conn_usage_count(conn_p);
6166 return;
6167 diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
6168 index 17d8c20..ba6091b 100644
6169 --- a/drivers/target/iscsi/iscsi_target_erl2.c
6170 +++ b/drivers/target/iscsi/iscsi_target_erl2.c
6171 @@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
6172 * made generic here.
6173 */
6174 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
6175 - (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
6176 + iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
6177 list_del(&cmd->i_conn_node);
6178 spin_unlock_bh(&conn->cmd_lock);
6179 iscsit_free_cmd(cmd);
6180 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
6181 index f8dbec0..10b40bb 100644
6182 --- a/drivers/target/iscsi/iscsi_target_login.c
6183 +++ b/drivers/target/iscsi/iscsi_target_login.c
6184 @@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
6185
6186 initiatorname_param = iscsi_find_param_from_key(
6187 INITIATORNAME, conn->param_list);
6188 - if (!initiatorname_param)
6189 - return -1;
6190 -
6191 sessiontype_param = iscsi_find_param_from_key(
6192 SESSIONTYPE, conn->param_list);
6193 - if (!sessiontype_param)
6194 + if (!initiatorname_param || !sessiontype_param) {
6195 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6196 + ISCSI_LOGIN_STATUS_MISSING_FIELDS);
6197 return -1;
6198 + }
6199
6200 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
6201
6202 diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
6203 index e9053a0..9d902ae 100644
6204 --- a/drivers/target/iscsi/iscsi_target_nego.c
6205 +++ b/drivers/target/iscsi/iscsi_target_nego.c
6206 @@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
6207 login->req_buf,
6208 payload_length,
6209 conn);
6210 - if (ret < 0)
6211 + if (ret < 0) {
6212 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6213 + ISCSI_LOGIN_STATUS_INIT_ERR);
6214 return -1;
6215 + }
6216
6217 if (login->first_request)
6218 if (iscsi_target_check_first_request(conn, login) < 0)
6219 @@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
6220 login->rsp_buf,
6221 &login->rsp_length,
6222 conn->param_list);
6223 - if (ret < 0)
6224 + if (ret < 0) {
6225 + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
6226 + ISCSI_LOGIN_STATUS_INIT_ERR);
6227 return -1;
6228 + }
6229
6230 if (!login->auth_complete &&
6231 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
6232 diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
6233 index 4a99820..9d4417a 100644
6234 --- a/drivers/target/iscsi/iscsi_target_tmr.c
6235 +++ b/drivers/target/iscsi/iscsi_target_tmr.c
6236 @@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
6237 if (!ref_cmd) {
6238 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
6239 " %hu.\n", hdr->rtt, conn->cid);
6240 - return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
6241 - be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
6242 + return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
6243 + iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
6244 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
6245 }
6246 if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
6247 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
6248 index 0d6d7c1..f9e1e8a 100644
6249 --- a/drivers/target/sbp/sbp_target.c
6250 +++ b/drivers/target/sbp/sbp_target.c
6251 @@ -2207,20 +2207,23 @@ static struct se_portal_group *sbp_make_tpg(
6252 tport->mgt_agt = sbp_management_agent_register(tport);
6253 if (IS_ERR(tport->mgt_agt)) {
6254 ret = PTR_ERR(tport->mgt_agt);
6255 - kfree(tpg);
6256 - return ERR_PTR(ret);
6257 + goto out_free_tpg;
6258 }
6259
6260 ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
6261 &tpg->se_tpg, (void *)tpg,
6262 TRANSPORT_TPG_TYPE_NORMAL);
6263 - if (ret < 0) {
6264 - sbp_management_agent_unregister(tport->mgt_agt);
6265 - kfree(tpg);
6266 - return ERR_PTR(ret);
6267 - }
6268 + if (ret < 0)
6269 + goto out_unreg_mgt_agt;
6270
6271 return &tpg->se_tpg;
6272 +
6273 +out_unreg_mgt_agt:
6274 + sbp_management_agent_unregister(tport->mgt_agt);
6275 +out_free_tpg:
6276 + tport->tpg = NULL;
6277 + kfree(tpg);
6278 + return ERR_PTR(ret);
6279 }
6280
6281 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
6282 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
6283 index 0360383..c639b42 100644
6284 --- a/drivers/target/target_core_file.c
6285 +++ b/drivers/target/target_core_file.c
6286 @@ -260,7 +260,7 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
6287
6288 for_each_sg(sgl, sg, sgl_nents, i) {
6289 iov[i].iov_len = sg->length;
6290 - iov[i].iov_base = sg_virt(sg);
6291 + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
6292 }
6293
6294 old_fs = get_fs();
6295 @@ -268,6 +268,8 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
6296 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
6297 set_fs(old_fs);
6298
6299 + for_each_sg(sgl, sg, sgl_nents, i)
6300 + kunmap(sg_page(sg));
6301 kfree(iov);
6302 /*
6303 * Return zeros and GOOD status even if the READ did not return
6304 @@ -313,7 +315,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
6305
6306 for_each_sg(sgl, sg, sgl_nents, i) {
6307 iov[i].iov_len = sg->length;
6308 - iov[i].iov_base = sg_virt(sg);
6309 + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
6310 }
6311
6312 old_fs = get_fs();
6313 @@ -321,6 +323,9 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
6314 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
6315 set_fs(old_fs);
6316
6317 + for_each_sg(sgl, sg, sgl_nents, i)
6318 + kunmap(sg_page(sg));
6319 +
6320 kfree(iov);
6321
6322 if (ret < 0 || ret != cmd->data_length) {
6323 diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
6324 index 9585010..12d6fa2 100644
6325 --- a/drivers/target/tcm_fc/tfc_sess.c
6326 +++ b/drivers/target/tcm_fc/tfc_sess.c
6327 @@ -430,7 +430,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
6328 {
6329 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
6330
6331 - transport_deregister_session(sess->se_sess);
6332 kfree(sess);
6333 }
6334
6335 @@ -438,6 +437,7 @@ static void ft_sess_free(struct kref *kref)
6336 {
6337 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
6338
6339 + transport_deregister_session(sess->se_sess);
6340 call_rcu(&sess->rcu, ft_sess_rcu_free);
6341 }
6342
6343 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
6344 index 8d809a8..2d92cce 100644
6345 --- a/drivers/usb/class/cdc-acm.c
6346 +++ b/drivers/usb/class/cdc-acm.c
6347 @@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
6348 { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
6349 .driver_info = NO_UNION_NORMAL,
6350 },
6351 + { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
6352 + .driver_info = NO_UNION_NORMAL,
6353 + },
6354 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
6355 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
6356 },
6357 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
6358 index 1af04bd..e6cc4e6 100644
6359 --- a/drivers/usb/core/hub.c
6360 +++ b/drivers/usb/core/hub.c
6361 @@ -876,6 +876,60 @@ static int hub_hub_status(struct usb_hub *hub,
6362 return ret;
6363 }
6364
6365 +static int hub_set_port_link_state(struct usb_hub *hub, int port1,
6366 + unsigned int link_status)
6367 +{
6368 + return set_port_feature(hub->hdev,
6369 + port1 | (link_status << 3),
6370 + USB_PORT_FEAT_LINK_STATE);
6371 +}
6372 +
6373 +/*
6374 + * If USB 3.0 ports are placed into the Disabled state, they will no longer
6375 + * detect any device connects or disconnects. This is generally not what the
6376 + * USB core wants, since it expects a disabled port to produce a port status
6377 + * change event when a new device connects.
6378 + *
6379 + * Instead, set the link state to Disabled, wait for the link to settle into
6380 + * that state, clear any change bits, and then put the port into the RxDetect
6381 + * state.
6382 + */
6383 +static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
6384 +{
6385 + int ret;
6386 + int total_time;
6387 + u16 portchange, portstatus;
6388 +
6389 + if (!hub_is_superspeed(hub->hdev))
6390 + return -EINVAL;
6391 +
6392 + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
6393 + if (ret) {
6394 + dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
6395 + port1, ret);
6396 + return ret;
6397 + }
6398 +
6399 + /* Wait for the link to enter the disabled state. */
6400 + for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
6401 + ret = hub_port_status(hub, port1, &portstatus, &portchange);
6402 + if (ret < 0)
6403 + return ret;
6404 +
6405 + if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
6406 + USB_SS_PORT_LS_SS_DISABLED)
6407 + break;
6408 + if (total_time >= HUB_DEBOUNCE_TIMEOUT)
6409 + break;
6410 + msleep(HUB_DEBOUNCE_STEP);
6411 + }
6412 + if (total_time >= HUB_DEBOUNCE_TIMEOUT)
6413 + dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
6414 + port1, total_time);
6415 +
6416 + return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
6417 +}
6418 +
6419 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
6420 {
6421 struct usb_device *hdev = hub->hdev;
6422 @@ -884,8 +938,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
6423 if (hub->ports[port1 - 1]->child && set_state)
6424 usb_set_device_state(hub->ports[port1 - 1]->child,
6425 USB_STATE_NOTATTACHED);
6426 - if (!hub->error && !hub_is_superspeed(hub->hdev))
6427 - ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
6428 + if (!hub->error) {
6429 + if (hub_is_superspeed(hub->hdev))
6430 + ret = hub_usb3_port_disable(hub, port1);
6431 + else
6432 + ret = clear_port_feature(hdev, port1,
6433 + USB_PORT_FEAT_ENABLE);
6434 + }
6435 if (ret)
6436 dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
6437 port1, ret);
6438 @@ -2401,7 +2460,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
6439 #define HUB_SHORT_RESET_TIME 10
6440 #define HUB_BH_RESET_TIME 50
6441 #define HUB_LONG_RESET_TIME 200
6442 -#define HUB_RESET_TIMEOUT 500
6443 +#define HUB_RESET_TIMEOUT 800
6444
6445 static int hub_port_reset(struct usb_hub *hub, int port1,
6446 struct usb_device *udev, unsigned int delay, bool warm);
6447 @@ -2436,6 +2495,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6448 if (ret < 0)
6449 return ret;
6450
6451 + /* The port state is unknown until the reset completes. */
6452 + if ((portstatus & USB_PORT_STAT_RESET))
6453 + goto delay;
6454 +
6455 /*
6456 * Some buggy devices require a warm reset to be issued even
6457 * when the port appears not to be connected.
6458 @@ -2481,11 +2544,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6459 if ((portchange & USB_PORT_STAT_C_CONNECTION))
6460 return -ENOTCONN;
6461
6462 - /* if we`ve finished resetting, then break out of
6463 - * the loop
6464 - */
6465 - if (!(portstatus & USB_PORT_STAT_RESET) &&
6466 - (portstatus & USB_PORT_STAT_ENABLE)) {
6467 + if ((portstatus & USB_PORT_STAT_ENABLE)) {
6468 if (hub_is_wusb(hub))
6469 udev->speed = USB_SPEED_WIRELESS;
6470 else if (hub_is_superspeed(hub->hdev))
6471 @@ -2499,10 +2558,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
6472 return 0;
6473 }
6474 } else {
6475 - if (portchange & USB_PORT_STAT_C_BH_RESET)
6476 - return 0;
6477 + if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
6478 + hub_port_warm_reset_required(hub,
6479 + portstatus))
6480 + return -ENOTCONN;
6481 +
6482 + return 0;
6483 }
6484
6485 +delay:
6486 /* switch to the long delay after two short delay failures */
6487 if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
6488 delay = HUB_LONG_RESET_TIME;
6489 @@ -2526,14 +2590,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
6490 msleep(10 + 40);
6491 update_devnum(udev, 0);
6492 hcd = bus_to_hcd(udev->bus);
6493 - if (hcd->driver->reset_device) {
6494 - *status = hcd->driver->reset_device(hcd, udev);
6495 - if (*status < 0) {
6496 - dev_err(&udev->dev, "Cannot reset "
6497 - "HCD device state\n");
6498 - break;
6499 - }
6500 - }
6501 + /* The xHC may think the device is already reset,
6502 + * so ignore the status.
6503 + */
6504 + if (hcd->driver->reset_device)
6505 + hcd->driver->reset_device(hcd, udev);
6506 }
6507 /* FALL THROUGH */
6508 case -ENOTCONN:
6509 @@ -2541,16 +2602,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
6510 clear_port_feature(hub->hdev,
6511 port1, USB_PORT_FEAT_C_RESET);
6512 /* FIXME need disconnect() for NOTATTACHED device */
6513 - if (warm) {
6514 + if (hub_is_superspeed(hub->hdev)) {
6515 clear_port_feature(hub->hdev, port1,
6516 USB_PORT_FEAT_C_BH_PORT_RESET);
6517 clear_port_feature(hub->hdev, port1,
6518 USB_PORT_FEAT_C_PORT_LINK_STATE);
6519 - } else {
6520 + }
6521 + if (!warm)
6522 usb_set_device_state(udev, *status
6523 ? USB_STATE_NOTATTACHED
6524 : USB_STATE_DEFAULT);
6525 - }
6526 break;
6527 }
6528 }
6529 @@ -2899,7 +2960,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
6530 static int finish_port_resume(struct usb_device *udev)
6531 {
6532 int status = 0;
6533 - u16 devstatus;
6534 + u16 devstatus = 0;
6535
6536 /* caller owns the udev device lock */
6537 dev_dbg(&udev->dev, "%s\n",
6538 @@ -2944,7 +3005,13 @@ static int finish_port_resume(struct usb_device *udev)
6539 if (status) {
6540 dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
6541 status);
6542 - } else if (udev->actconfig) {
6543 + /*
6544 + * There are a few quirky devices which violate the standard
6545 + * by claiming to have remote wakeup enabled after a reset,
6546 + * which crash if the feature is cleared, hence check for
6547 + * udev->reset_resume
6548 + */
6549 + } else if (udev->actconfig && !udev->reset_resume) {
6550 le16_to_cpus(&devstatus);
6551 if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
6552 status = usb_control_msg(udev,
6553 @@ -4572,9 +4639,14 @@ static void hub_events(void)
6554 * SS.Inactive state.
6555 */
6556 if (hub_port_warm_reset_required(hub, portstatus)) {
6557 + int status;
6558 +
6559 dev_dbg(hub_dev, "warm reset port %d\n", i);
6560 - hub_port_reset(hub, i, NULL,
6561 + status = hub_port_reset(hub, i, NULL,
6562 HUB_BH_RESET_TIME, true);
6563 + if (status < 0)
6564 + hub_port_disable(hub, i, 1);
6565 + connect_change = 0;
6566 }
6567
6568 if (connect_change)
6569 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6570 index fdefd9c..3113c1d 100644
6571 --- a/drivers/usb/core/quirks.c
6572 +++ b/drivers/usb/core/quirks.c
6573 @@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
6574 /* Creative SB Audigy 2 NX */
6575 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
6576
6577 + /* Microsoft LifeCam-VX700 v2.0 */
6578 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
6579 +
6580 /* Logitech Quickcam Fusion */
6581 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
6582
6583 diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
6584 index 0f7541b..559b06c 100644
6585 --- a/drivers/usb/gadget/dummy_hcd.c
6586 +++ b/drivers/usb/gadget/dummy_hcd.c
6587 @@ -126,10 +126,7 @@ static const char ep0name[] = "ep0";
6588 static const char *const ep_name[] = {
6589 ep0name, /* everyone has ep0 */
6590
6591 - /* act like a net2280: high speed, six configurable endpoints */
6592 - "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
6593 -
6594 - /* or like pxa250: fifteen fixed function endpoints */
6595 + /* act like a pxa250: fifteen fixed function endpoints */
6596 "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
6597 "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
6598 "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
6599 @@ -137,6 +134,10 @@ static const char *const ep_name[] = {
6600
6601 /* or like sa1100: two fixed function endpoints */
6602 "ep1out-bulk", "ep2in-bulk",
6603 +
6604 + /* and now some generic EPs so we have enough in multi config */
6605 + "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
6606 + "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
6607 };
6608 #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
6609
6610 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
6611 index f42b68e..d2ea004 100644
6612 --- a/drivers/usb/host/ehci-pci.c
6613 +++ b/drivers/usb/host/ehci-pci.c
6614 @@ -192,6 +192,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
6615 break;
6616 }
6617
6618 + /* optional debug port, normally in the first BAR */
6619 + temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
6620 + if (temp) {
6621 + pci_read_config_dword(pdev, temp, &temp);
6622 + temp >>= 16;
6623 + if (((temp >> 13) & 7) == 1) {
6624 + u32 hcs_params = ehci_readl(ehci,
6625 + &ehci->caps->hcs_params);
6626 +
6627 + temp &= 0x1fff;
6628 + ehci->debug = hcd->regs + temp;
6629 + temp = ehci_readl(ehci, &ehci->debug->control);
6630 + ehci_info(ehci, "debug port %d%s\n",
6631 + HCS_DEBUG_PORT(hcs_params),
6632 + (temp & DBGP_ENABLED) ? " IN USE" : "");
6633 + if (!(temp & DBGP_ENABLED))
6634 + ehci->debug = NULL;
6635 + }
6636 + }
6637 +
6638 retval = ehci_setup(hcd);
6639 if (retval)
6640 return retval;
6641 @@ -226,25 +246,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
6642 break;
6643 }
6644
6645 - /* optional debug port, normally in the first BAR */
6646 - temp = pci_find_capability(pdev, 0x0a);
6647 - if (temp) {
6648 - pci_read_config_dword(pdev, temp, &temp);
6649 - temp >>= 16;
6650 - if ((temp & (3 << 13)) == (1 << 13)) {
6651 - temp &= 0x1fff;
6652 - ehci->debug = hcd->regs + temp;
6653 - temp = ehci_readl(ehci, &ehci->debug->control);
6654 - ehci_info(ehci, "debug port %d%s\n",
6655 - HCS_DEBUG_PORT(ehci->hcs_params),
6656 - (temp & DBGP_ENABLED)
6657 - ? " IN USE"
6658 - : "");
6659 - if (!(temp & DBGP_ENABLED))
6660 - ehci->debug = NULL;
6661 - }
6662 - }
6663 -
6664 /* at least the Genesys GL880S needs fixup here */
6665 temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
6666 temp &= 0x0f;
6667 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
6668 index a686cf4..6891442 100644
6669 --- a/drivers/usb/host/xhci-hub.c
6670 +++ b/drivers/usb/host/xhci-hub.c
6671 @@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
6672 break;
6673 case USB_PORT_FEAT_LINK_STATE:
6674 temp = xhci_readl(xhci, port_array[wIndex]);
6675 +
6676 + /* Disable port */
6677 + if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
6678 + xhci_dbg(xhci, "Disable port %d\n", wIndex);
6679 + temp = xhci_port_state_to_neutral(temp);
6680 + /*
6681 + * Clear all change bits, so that we get a new
6682 + * connection event.
6683 + */
6684 + temp |= PORT_CSC | PORT_PEC | PORT_WRC |
6685 + PORT_OCC | PORT_RC | PORT_PLC |
6686 + PORT_CEC;
6687 + xhci_writel(xhci, temp | PORT_PE,
6688 + port_array[wIndex]);
6689 + temp = xhci_readl(xhci, port_array[wIndex]);
6690 + break;
6691 + }
6692 +
6693 + /* Put link in RxDetect (enable port) */
6694 + if (link_state == USB_SS_PORT_LS_RX_DETECT) {
6695 + xhci_dbg(xhci, "Enable port %d\n", wIndex);
6696 + xhci_set_link_state(xhci, port_array, wIndex,
6697 + link_state);
6698 + temp = xhci_readl(xhci, port_array[wIndex]);
6699 + break;
6700 + }
6701 +
6702 /* Software should not attempt to set
6703 - * port link state above '5' (Rx.Detect) and the port
6704 + * port link state above '3' (U3) and the port
6705 * must be enabled.
6706 */
6707 if ((temp & PORT_PE) == 0 ||
6708 - (link_state > USB_SS_PORT_LS_RX_DETECT)) {
6709 + (link_state > USB_SS_PORT_LS_U3)) {
6710 xhci_warn(xhci, "Cannot set link state.\n");
6711 goto error;
6712 }
6713 @@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
6714 int max_ports;
6715 __le32 __iomem **port_array;
6716 struct xhci_bus_state *bus_state;
6717 + bool reset_change = false;
6718
6719 max_ports = xhci_get_ports(hcd, &port_array);
6720 bus_state = &xhci->bus_state[hcd_index(hcd)];
6721 @@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
6722 buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
6723 status = 1;
6724 }
6725 + if ((temp & PORT_RC))
6726 + reset_change = true;
6727 + }
6728 + if (!status && !reset_change) {
6729 + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
6730 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6731 }
6732 spin_unlock_irqrestore(&xhci->lock, flags);
6733 return status ? retval : 0;
6734 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
6735 index fb51c70..35616ff 100644
6736 --- a/drivers/usb/host/xhci-mem.c
6737 +++ b/drivers/usb/host/xhci-mem.c
6738 @@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
6739 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
6740 struct usb_host_endpoint *ep)
6741 {
6742 + if (ep->desc.bInterval == 0)
6743 + return 0;
6744 return xhci_microframes_to_exponent(udev, ep,
6745 ep->desc.bInterval, 0, 15);
6746 }
6747 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
6748 index 1189cf3..e80c49d 100644
6749 --- a/drivers/usb/host/xhci-ring.c
6750 +++ b/drivers/usb/host/xhci-ring.c
6751 @@ -1725,6 +1725,15 @@ cleanup:
6752 if (bogus_port_status)
6753 return;
6754
6755 + /*
6756 + * xHCI port-status-change events occur when the "or" of all the
6757 + * status-change bits in the portsc register changes from 0 to 1.
6758 + * New status changes won't cause an event if any other change
6759 + * bits are still set. When an event occurs, switch over to
6760 + * polling to avoid losing status changes.
6761 + */
6762 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
6763 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6764 spin_unlock(&xhci->lock);
6765 /* Pass this up to the core */
6766 usb_hcd_poll_rh_status(hcd);
6767 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
6768 index 389829e..c9b886e 100644
6769 --- a/drivers/usb/host/xhci.c
6770 +++ b/drivers/usb/host/xhci.c
6771 @@ -880,6 +880,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
6772 struct usb_hcd *hcd = xhci_to_hcd(xhci);
6773 u32 command;
6774
6775 + /* Don't poll the roothubs on bus suspend. */
6776 + xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
6777 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6778 + del_timer_sync(&hcd->rh_timer);
6779 +
6780 spin_lock_irq(&xhci->lock);
6781 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
6782 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
6783 @@ -1064,6 +1069,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
6784 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
6785 compliance_mode_recovery_timer_init(xhci);
6786
6787 + /* Re-enable port polling. */
6788 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
6789 + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
6790 + usb_hcd_poll_rh_status(hcd);
6791 +
6792 return retval;
6793 }
6794 #endif /* CONFIG_PM */
6795 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
6796 index bb56a0e..30e8551 100644
6797 --- a/drivers/usb/musb/musb_core.c
6798 +++ b/drivers/usb/musb/musb_core.c
6799 @@ -2351,10 +2351,7 @@ static int __init musb_init(void)
6800 if (usb_disabled())
6801 return 0;
6802
6803 - pr_info("%s: version " MUSB_VERSION ", "
6804 - "?dma?"
6805 - ", "
6806 - "otg (peripheral+host)",
6807 + pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
6808 musb_driver_name);
6809 return platform_driver_register(&musb_driver);
6810 }
6811 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
6812 index 2641d36..71e80ab 100644
6813 --- a/drivers/usb/serial/ftdi_sio.c
6814 +++ b/drivers/usb/serial/ftdi_sio.c
6815 @@ -876,6 +876,8 @@ static struct usb_device_id id_table_combined [] = {
6816 { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
6817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
6818 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
6819 + /* Crucible Devices */
6820 + { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
6821 { }, /* Optional parameter entry */
6822 { } /* Terminating entry */
6823 };
6824 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
6825 index 049b6e7..fa5d560 100644
6826 --- a/drivers/usb/serial/ftdi_sio_ids.h
6827 +++ b/drivers/usb/serial/ftdi_sio_ids.h
6828 @@ -1259,3 +1259,9 @@
6829 * ATI command output: Cinterion MC55i
6830 */
6831 #define FTDI_CINTERION_MC55I_PID 0xA951
6832 +
6833 +/*
6834 + * Product: Comet Caller ID decoder
6835 + * Manufacturer: Crucible Technologies
6836 + */
6837 +#define FTDI_CT_COMET_PID 0x8e08
6838 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
6839 index da36dc7..fd47369 100644
6840 --- a/drivers/usb/serial/option.c
6841 +++ b/drivers/usb/serial/option.c
6842 @@ -289,6 +289,7 @@ static void option_instat_callback(struct urb *urb);
6843 #define ALCATEL_VENDOR_ID 0x1bbb
6844 #define ALCATEL_PRODUCT_X060S_X200 0x0000
6845 #define ALCATEL_PRODUCT_X220_X500D 0x0017
6846 +#define ALCATEL_PRODUCT_L100V 0x011e
6847
6848 #define PIRELLI_VENDOR_ID 0x1266
6849 #define PIRELLI_PRODUCT_C100_1 0x1002
6850 @@ -430,9 +431,12 @@ static void option_instat_callback(struct urb *urb);
6851 #define MEDIATEK_VENDOR_ID 0x0e8d
6852 #define MEDIATEK_PRODUCT_DC_1COM 0x00a0
6853 #define MEDIATEK_PRODUCT_DC_4COM 0x00a5
6854 +#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
6855 #define MEDIATEK_PRODUCT_DC_5COM 0x00a4
6856 #define MEDIATEK_PRODUCT_7208_1COM 0x7101
6857 #define MEDIATEK_PRODUCT_7208_2COM 0x7102
6858 +#define MEDIATEK_PRODUCT_7103_2COM 0x7103
6859 +#define MEDIATEK_PRODUCT_7106_2COM 0x7106
6860 #define MEDIATEK_PRODUCT_FP_1COM 0x0003
6861 #define MEDIATEK_PRODUCT_FP_2COM 0x0023
6862 #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
6863 @@ -442,6 +446,10 @@ static void option_instat_callback(struct urb *urb);
6864 #define CELLIENT_VENDOR_ID 0x2692
6865 #define CELLIENT_PRODUCT_MEN200 0x9005
6866
6867 +/* Hyundai Petatel Inc. products */
6868 +#define PETATEL_VENDOR_ID 0x1ff4
6869 +#define PETATEL_PRODUCT_NP10T 0x600e
6870 +
6871 /* some devices interfaces need special handling due to a number of reasons */
6872 enum option_blacklist_reason {
6873 OPTION_BLACKLIST_NONE = 0,
6874 @@ -924,7 +932,8 @@ static const struct usb_device_id option_ids[] = {
6875 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
6876 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
6877 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
6878 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
6879 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
6880 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6881 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
6882 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
6883 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6884 @@ -1191,6 +1200,8 @@ static const struct usb_device_id option_ids[] = {
6885 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
6886 },
6887 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
6888 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
6889 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
6890 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
6891 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
6892 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
6893 @@ -1295,7 +1306,12 @@ static const struct usb_device_id option_ids[] = {
6894 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
6895 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
6896 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
6897 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
6898 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
6899 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
6900 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
6901 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
6902 + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
6903 { } /* Terminating entry */
6904 };
6905 MODULE_DEVICE_TABLE(usb, option_ids);
6906 diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
6907 index 49619b4..f2a49ef 100644
6908 --- a/drivers/video/mxsfb.c
6909 +++ b/drivers/video/mxsfb.c
6910 @@ -369,7 +369,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
6911 loop--;
6912 }
6913
6914 - writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
6915 + reg = readl(host->base + LCDC_VDCTRL4);
6916 + writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
6917
6918 clk_disable_unprepare(host->clk);
6919
6920 diff --git a/fs/buffer.c b/fs/buffer.c
6921 index ec0aca8..20c0aae 100644
6922 --- a/fs/buffer.c
6923 +++ b/fs/buffer.c
6924 @@ -2939,6 +2939,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
6925 void *kaddr = kmap_atomic(bh->b_page);
6926 memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
6927 kunmap_atomic(kaddr);
6928 + flush_dcache_page(bh->b_page);
6929 }
6930 }
6931
6932 diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
6933 index 6690269..d7293d6 100644
6934 --- a/fs/ceph/addr.c
6935 +++ b/fs/ceph/addr.c
6936 @@ -267,6 +267,14 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
6937 kfree(req->r_pages);
6938 }
6939
6940 +static void ceph_unlock_page_vector(struct page **pages, int num_pages)
6941 +{
6942 + int i;
6943 +
6944 + for (i = 0; i < num_pages; i++)
6945 + unlock_page(pages[i]);
6946 +}
6947 +
6948 /*
6949 * start an async read(ahead) operation. return nr_pages we submitted
6950 * a read for on success, or negative error code.
6951 @@ -347,6 +355,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
6952 return nr_pages;
6953
6954 out_pages:
6955 + ceph_unlock_page_vector(pages, nr_pages);
6956 ceph_release_page_vector(pages, nr_pages);
6957 out:
6958 ceph_osdc_put_request(req);
6959 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
6960 index 3251e9c..6be9bf7 100644
6961 --- a/fs/ceph/caps.c
6962 +++ b/fs/ceph/caps.c
6963 @@ -1349,11 +1349,15 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
6964 if (!ci->i_head_snapc)
6965 ci->i_head_snapc = ceph_get_snap_context(
6966 ci->i_snap_realm->cached_context);
6967 - dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
6968 - ci->i_head_snapc);
6969 + dout(" inode %p now dirty snapc %p auth cap %p\n",
6970 + &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
6971 BUG_ON(!list_empty(&ci->i_dirty_item));
6972 spin_lock(&mdsc->cap_dirty_lock);
6973 - list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
6974 + if (ci->i_auth_cap)
6975 + list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
6976 + else
6977 + list_add(&ci->i_dirty_item,
6978 + &mdsc->cap_dirty_migrating);
6979 spin_unlock(&mdsc->cap_dirty_lock);
6980 if (ci->i_flushing_caps == 0) {
6981 ihold(inode);
6982 @@ -2388,7 +2392,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
6983 &atime);
6984
6985 /* max size increase? */
6986 - if (max_size != ci->i_max_size) {
6987 + if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
6988 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
6989 ci->i_max_size = max_size;
6990 if (max_size >= ci->i_wanted_max_size) {
6991 @@ -2745,6 +2749,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
6992
6993 /* make sure we re-request max_size, if necessary */
6994 spin_lock(&ci->i_ceph_lock);
6995 + ci->i_wanted_max_size = 0; /* reset */
6996 ci->i_requested_max_size = 0;
6997 spin_unlock(&ci->i_ceph_lock);
6998 }
6999 @@ -2840,8 +2845,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
7000 case CEPH_CAP_OP_IMPORT:
7001 handle_cap_import(mdsc, inode, h, session,
7002 snaptrace, snaptrace_len);
7003 - ceph_check_caps(ceph_inode(inode), 0, session);
7004 - goto done_unlocked;
7005 }
7006
7007 /* the rest require a cap */
7008 @@ -2858,6 +2861,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
7009 switch (op) {
7010 case CEPH_CAP_OP_REVOKE:
7011 case CEPH_CAP_OP_GRANT:
7012 + case CEPH_CAP_OP_IMPORT:
7013 handle_cap_grant(inode, h, session, cap, msg->middle);
7014 goto done_unlocked;
7015
7016 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
7017 index ba95eea..2971eaa 100644
7018 --- a/fs/ceph/inode.c
7019 +++ b/fs/ceph/inode.c
7020 @@ -1466,7 +1466,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
7021 {
7022 struct ceph_inode_info *ci = ceph_inode(inode);
7023 u64 to;
7024 - int wrbuffer_refs, wake = 0;
7025 + int wrbuffer_refs, finish = 0;
7026
7027 retry:
7028 spin_lock(&ci->i_ceph_lock);
7029 @@ -1498,15 +1498,18 @@ retry:
7030 truncate_inode_pages(inode->i_mapping, to);
7031
7032 spin_lock(&ci->i_ceph_lock);
7033 - ci->i_truncate_pending--;
7034 - if (ci->i_truncate_pending == 0)
7035 - wake = 1;
7036 + if (to == ci->i_truncate_size) {
7037 + ci->i_truncate_pending = 0;
7038 + finish = 1;
7039 + }
7040 spin_unlock(&ci->i_ceph_lock);
7041 + if (!finish)
7042 + goto retry;
7043
7044 if (wrbuffer_refs == 0)
7045 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
7046 - if (wake)
7047 - wake_up_all(&ci->i_cap_wq);
7048 +
7049 + wake_up_all(&ci->i_cap_wq);
7050 }
7051
7052
7053 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
7054 index 1bcf712..0d9864f 100644
7055 --- a/fs/ceph/mds_client.c
7056 +++ b/fs/ceph/mds_client.c
7057 @@ -1876,9 +1876,14 @@ finish:
7058 static void __wake_requests(struct ceph_mds_client *mdsc,
7059 struct list_head *head)
7060 {
7061 - struct ceph_mds_request *req, *nreq;
7062 + struct ceph_mds_request *req;
7063 + LIST_HEAD(tmp_list);
7064 +
7065 + list_splice_init(head, &tmp_list);
7066
7067 - list_for_each_entry_safe(req, nreq, head, r_wait) {
7068 + while (!list_empty(&tmp_list)) {
7069 + req = list_entry(tmp_list.next,
7070 + struct ceph_mds_request, r_wait);
7071 list_del_init(&req->r_wait);
7072 __do_request(mdsc, req);
7073 }
7074 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
7075 index 2eb43f2..e079899 100644
7076 --- a/fs/ceph/super.c
7077 +++ b/fs/ceph/super.c
7078 @@ -403,8 +403,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
7079 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
7080 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
7081 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
7082 - if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
7083 - seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
7084 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
7085 seq_printf(m, ",osdkeepalivetimeout=%d",
7086 opt->osd_keepalive_timeout);
7087 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
7088 index cd96649..39573ee 100644
7089 --- a/fs/eventpoll.c
7090 +++ b/fs/eventpoll.c
7091 @@ -1285,7 +1285,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
7092 * otherwise we might miss an event that happens between the
7093 * f_op->poll() call and the new event set registering.
7094 */
7095 - epi->event.events = event->events;
7096 + epi->event.events = event->events; /* need barrier below */
7097 pt._key = event->events;
7098 epi->event.data = event->data; /* protected by mtx */
7099 if (epi->event.events & EPOLLWAKEUP) {
7100 @@ -1296,6 +1296,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
7101 }
7102
7103 /*
7104 + * The following barrier has two effects:
7105 + *
7106 + * 1) Flush epi changes above to other CPUs. This ensures
7107 + * we do not miss events from ep_poll_callback if an
7108 + * event occurs immediately after we call f_op->poll().
7109 + * We need this because we did not take ep->lock while
7110 + * changing epi above (but ep_poll_callback does take
7111 + * ep->lock).
7112 + *
7113 + * 2) We also need to ensure we do not miss _past_ events
7114 + * when calling f_op->poll(). This barrier also
7115 + * pairs with the barrier in wq_has_sleeper (see
7116 + * comments for wq_has_sleeper).
7117 + *
7118 + * This barrier will now guarantee ep_poll_callback or f_op->poll
7119 + * (or both) will notice the readiness of an item.
7120 + */
7121 + smp_mb();
7122 +
7123 + /*
7124 * Get current event bits. We can safely use the file* here because
7125 * its usage count has been increased by the caller of this function.
7126 */
7127 diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
7128 index d3c5b88..e6e0d98 100644
7129 --- a/fs/ext4/acl.c
7130 +++ b/fs/ext4/acl.c
7131 @@ -423,8 +423,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
7132
7133 retry:
7134 handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
7135 - if (IS_ERR(handle))
7136 - return PTR_ERR(handle);
7137 + if (IS_ERR(handle)) {
7138 + error = PTR_ERR(handle);
7139 + goto release_and_out;
7140 + }
7141 error = ext4_set_acl(handle, inode, type, acl);
7142 ext4_journal_stop(handle);
7143 if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
7144 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7145 index 7011ac9..19bb769 100644
7146 --- a/fs/ext4/extents.c
7147 +++ b/fs/ext4/extents.c
7148 @@ -2190,13 +2190,14 @@ errout:
7149 * removes index from the index block.
7150 */
7151 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
7152 - struct ext4_ext_path *path)
7153 + struct ext4_ext_path *path, int depth)
7154 {
7155 int err;
7156 ext4_fsblk_t leaf;
7157
7158 /* free index block */
7159 - path--;
7160 + depth--;
7161 + path = path + depth;
7162 leaf = ext4_idx_pblock(path->p_idx);
7163 if (unlikely(path->p_hdr->eh_entries == 0)) {
7164 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
7165 @@ -2221,6 +2222,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
7166
7167 ext4_free_blocks(handle, inode, NULL, leaf, 1,
7168 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
7169 +
7170 + while (--depth >= 0) {
7171 + if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
7172 + break;
7173 + path--;
7174 + err = ext4_ext_get_access(handle, inode, path);
7175 + if (err)
7176 + break;
7177 + path->p_idx->ei_block = (path+1)->p_idx->ei_block;
7178 + err = ext4_ext_dirty(handle, inode, path);
7179 + if (err)
7180 + break;
7181 + }
7182 return err;
7183 }
7184
7185 @@ -2557,7 +2571,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
7186 /* if this leaf is free, then we should
7187 * remove it from index block above */
7188 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
7189 - err = ext4_ext_rm_idx(handle, inode, path + depth);
7190 + err = ext4_ext_rm_idx(handle, inode, path, depth);
7191
7192 out:
7193 return err;
7194 @@ -2760,7 +2774,7 @@ again:
7195 /* index is empty, remove it;
7196 * handle must be already prepared by the
7197 * truncatei_leaf() */
7198 - err = ext4_ext_rm_idx(handle, inode, path + i);
7199 + err = ext4_ext_rm_idx(handle, inode, path, i);
7200 }
7201 /* root level has p_bh == NULL, brelse() eats this */
7202 brelse(path[i].p_bh);
7203 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
7204 index 3a100e7..c7efa88 100644
7205 --- a/fs/ext4/ialloc.c
7206 +++ b/fs/ext4/ialloc.c
7207 @@ -762,7 +762,6 @@ got:
7208
7209 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
7210 err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
7211 - brelse(block_bitmap_bh);
7212
7213 /* recheck and clear flag under lock if we still need to */
7214 ext4_lock_group(sb, group);
7215 @@ -775,6 +774,7 @@ got:
7216 ext4_group_desc_csum_set(sb, group, gdp);
7217 }
7218 ext4_unlock_group(sb, group);
7219 + brelse(block_bitmap_bh);
7220
7221 if (err)
7222 goto fail;
7223 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
7224 index 6d600a6..8fa23b4 100644
7225 --- a/fs/ext4/namei.c
7226 +++ b/fs/ext4/namei.c
7227 @@ -725,7 +725,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
7228 ext4_warning(dir->i_sb, "Node failed checksum");
7229 brelse(bh);
7230 *err = ERR_BAD_DX_DIR;
7231 - goto fail;
7232 + goto fail2;
7233 }
7234 set_buffer_verified(bh);
7235
7236 @@ -2498,7 +2498,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
7237 struct ext4_iloc iloc;
7238 int err = 0;
7239
7240 - if (!EXT4_SB(inode->i_sb)->s_journal)
7241 + if ((!EXT4_SB(inode->i_sb)->s_journal) &&
7242 + !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
7243 return 0;
7244
7245 mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
7246 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
7247 index 80928f7..d59b351 100644
7248 --- a/fs/ext4/super.c
7249 +++ b/fs/ext4/super.c
7250 @@ -1650,9 +1650,7 @@ static int parse_options(char *options, struct super_block *sb,
7251 unsigned int *journal_ioprio,
7252 int is_remount)
7253 {
7254 -#ifdef CONFIG_QUOTA
7255 struct ext4_sb_info *sbi = EXT4_SB(sb);
7256 -#endif
7257 char *p;
7258 substring_t args[MAX_OPT_ARGS];
7259 int token;
7260 @@ -1701,6 +1699,16 @@ static int parse_options(char *options, struct super_block *sb,
7261 }
7262 }
7263 #endif
7264 + if (test_opt(sb, DIOREAD_NOLOCK)) {
7265 + int blocksize =
7266 + BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
7267 +
7268 + if (blocksize < PAGE_CACHE_SIZE) {
7269 + ext4_msg(sb, KERN_ERR, "can't mount with "
7270 + "dioread_nolock if block size != PAGE_SIZE");
7271 + return 0;
7272 + }
7273 + }
7274 return 1;
7275 }
7276
7277 @@ -2217,7 +2225,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
7278 __func__, inode->i_ino, inode->i_size);
7279 jbd_debug(2, "truncating inode %lu to %lld bytes\n",
7280 inode->i_ino, inode->i_size);
7281 + mutex_lock(&inode->i_mutex);
7282 ext4_truncate(inode);
7283 + mutex_unlock(&inode->i_mutex);
7284 nr_truncates++;
7285 } else {
7286 ext4_msg(sb, KERN_DEBUG,
7287 @@ -3446,15 +3456,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7288 clear_opt(sb, DELALLOC);
7289 }
7290
7291 - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
7292 - if (test_opt(sb, DIOREAD_NOLOCK)) {
7293 - if (blocksize < PAGE_SIZE) {
7294 - ext4_msg(sb, KERN_ERR, "can't mount with "
7295 - "dioread_nolock if block size != PAGE_SIZE");
7296 - goto failed_mount;
7297 - }
7298 - }
7299 -
7300 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
7301 (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
7302
7303 @@ -3496,6 +3497,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
7304 if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
7305 goto failed_mount;
7306
7307 + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
7308 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
7309 blocksize > EXT4_MAX_BLOCK_SIZE) {
7310 ext4_msg(sb, KERN_ERR,
7311 @@ -4729,7 +4731,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
7312 }
7313
7314 ext4_setup_system_zone(sb);
7315 - if (sbi->s_journal == NULL)
7316 + if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
7317 ext4_commit_super(sb, 1);
7318
7319 #ifdef CONFIG_QUOTA
7320 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
7321 index a74ba46..6873d24 100644
7322 --- a/fs/jbd2/transaction.c
7323 +++ b/fs/jbd2/transaction.c
7324 @@ -209,7 +209,8 @@ repeat:
7325 if (!new_transaction)
7326 goto alloc_transaction;
7327 write_lock(&journal->j_state_lock);
7328 - if (!journal->j_running_transaction) {
7329 + if (!journal->j_running_transaction &&
7330 + !journal->j_barrier_count) {
7331 jbd2_get_transaction(journal, new_transaction);
7332 new_transaction = NULL;
7333 }
7334 diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
7335 index 0c96eb5..0331072 100644
7336 --- a/fs/jffs2/nodemgmt.c
7337 +++ b/fs/jffs2/nodemgmt.c
7338 @@ -417,14 +417,16 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
7339 spin_unlock(&c->erase_completion_lock);
7340
7341 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
7342 - if (ret)
7343 - return ret;
7344 +
7345 /* Just lock it again and continue. Nothing much can change because
7346 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
7347 we hold c->erase_completion_lock in the majority of this function...
7348 but that's a question for another (more caffeine-rich) day. */
7349 spin_lock(&c->erase_completion_lock);
7350
7351 + if (ret)
7352 + return ret;
7353 +
7354 waste = jeb->free_size;
7355 jffs2_link_node_ref(c, jeb,
7356 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
7357 diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
7358 index 1a4f6da..bdd840d 100644
7359 --- a/fs/pstore/ram.c
7360 +++ b/fs/pstore/ram.c
7361 @@ -374,10 +374,14 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
7362 goto fail_out;
7363 }
7364
7365 - pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
7366 - pdata->record_size = rounddown_pow_of_two(pdata->record_size);
7367 - pdata->console_size = rounddown_pow_of_two(pdata->console_size);
7368 - pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
7369 + if (!is_power_of_2(pdata->mem_size))
7370 + pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
7371 + if (!is_power_of_2(pdata->record_size))
7372 + pdata->record_size = rounddown_pow_of_two(pdata->record_size);
7373 + if (!is_power_of_2(pdata->console_size))
7374 + pdata->console_size = rounddown_pow_of_two(pdata->console_size);
7375 + if (!is_power_of_2(pdata->ftrace_size))
7376 + pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
7377
7378 cxt->dump_read_cnt = 0;
7379 cxt->size = pdata->mem_size;
7380 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
7381 index df88b95..8266f2e 100644
7382 --- a/fs/udf/inode.c
7383 +++ b/fs/udf/inode.c
7384 @@ -601,6 +601,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7385 struct udf_inode_info *iinfo = UDF_I(inode);
7386 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
7387 int lastblock = 0;
7388 + bool isBeyondEOF;
7389
7390 *err = 0;
7391 *new = 0;
7392 @@ -680,7 +681,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7393 /* Are we beyond EOF? */
7394 if (etype == -1) {
7395 int ret;
7396 -
7397 + isBeyondEOF = 1;
7398 if (count) {
7399 if (c)
7400 laarr[0] = laarr[1];
7401 @@ -723,6 +724,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7402 endnum = c + 1;
7403 lastblock = 1;
7404 } else {
7405 + isBeyondEOF = 0;
7406 endnum = startnum = ((count > 2) ? 2 : count);
7407
7408 /* if the current extent is in position 0,
7409 @@ -765,10 +767,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7410 goal, err);
7411 if (!newblocknum) {
7412 brelse(prev_epos.bh);
7413 + brelse(cur_epos.bh);
7414 + brelse(next_epos.bh);
7415 *err = -ENOSPC;
7416 return 0;
7417 }
7418 - iinfo->i_lenExtents += inode->i_sb->s_blocksize;
7419 + if (isBeyondEOF)
7420 + iinfo->i_lenExtents += inode->i_sb->s_blocksize;
7421 }
7422
7423 /* if the extent the requsted block is located in contains multiple
7424 @@ -795,6 +800,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
7425 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
7426
7427 brelse(prev_epos.bh);
7428 + brelse(cur_epos.bh);
7429 + brelse(next_epos.bh);
7430
7431 newblock = udf_get_pblock(inode->i_sb, newblocknum,
7432 iinfo->i_location.partitionReferenceNum, 0);
7433 diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
7434 index 06d7f79..a1b66b7 100644
7435 --- a/include/drm/drm_mm.h
7436 +++ b/include/drm/drm_mm.h
7437 @@ -70,7 +70,7 @@ struct drm_mm {
7438 unsigned long scan_color;
7439 unsigned long scan_size;
7440 unsigned long scan_hit_start;
7441 - unsigned scan_hit_size;
7442 + unsigned long scan_hit_end;
7443 unsigned scanned_blocks;
7444 unsigned long scan_start;
7445 unsigned long scan_end;
7446 diff --git a/include/linux/audit.h b/include/linux/audit.h
7447 index bce729a..9d5104d 100644
7448 --- a/include/linux/audit.h
7449 +++ b/include/linux/audit.h
7450 @@ -157,7 +157,8 @@ void audit_core_dumps(long signr);
7451
7452 static inline void audit_seccomp(unsigned long syscall, long signr, int code)
7453 {
7454 - if (unlikely(!audit_dummy_context()))
7455 + /* Force a record to be reported if a signal was delivered. */
7456 + if (signr || unlikely(!audit_dummy_context()))
7457 __audit_seccomp(syscall, signr, code);
7458 }
7459
7460 diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
7461 index 6470792..084d3c6 100644
7462 --- a/include/linux/ceph/libceph.h
7463 +++ b/include/linux/ceph/libceph.h
7464 @@ -43,7 +43,6 @@ struct ceph_options {
7465 struct ceph_entity_addr my_addr;
7466 int mount_timeout;
7467 int osd_idle_ttl;
7468 - int osd_timeout;
7469 int osd_keepalive_timeout;
7470
7471 /*
7472 @@ -63,7 +62,6 @@ struct ceph_options {
7473 * defaults
7474 */
7475 #define CEPH_MOUNT_TIMEOUT_DEFAULT 60
7476 -#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
7477 #define CEPH_OSD_KEEPALIVE_DEFAULT 5
7478 #define CEPH_OSD_IDLE_TTL_DEFAULT 60
7479
7480 diff --git a/include/linux/compaction.h b/include/linux/compaction.h
7481 index 6ecb6dc..cc7bdde 100644
7482 --- a/include/linux/compaction.h
7483 +++ b/include/linux/compaction.h
7484 @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
7485 extern int fragmentation_index(struct zone *zone, unsigned int order);
7486 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
7487 int order, gfp_t gfp_mask, nodemask_t *mask,
7488 - bool sync, bool *contended, struct page **page);
7489 + bool sync, bool *contended);
7490 extern int compact_pgdat(pg_data_t *pgdat, int order);
7491 extern void reset_isolation_suitable(pg_data_t *pgdat);
7492 extern unsigned long compaction_suitable(struct zone *zone, int order);
7493 @@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
7494 #else
7495 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
7496 int order, gfp_t gfp_mask, nodemask_t *nodemask,
7497 - bool sync, bool *contended, struct page **page)
7498 + bool sync, bool *contended)
7499 {
7500 return COMPACT_CONTINUE;
7501 }
7502 diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
7503 index c96ad68..956afa4 100644
7504 --- a/include/linux/mfd/da9055/core.h
7505 +++ b/include/linux/mfd/da9055/core.h
7506 @@ -1,4 +1,4 @@
7507 -/*
7508 +/*
7509 * da9055 declarations for DA9055 PMICs.
7510 *
7511 * Copyright(c) 2012 Dialog Semiconductor Ltd.
7512 diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
7513 index 147293b..b9b204e 100644
7514 --- a/include/linux/mfd/da9055/pdata.h
7515 +++ b/include/linux/mfd/da9055/pdata.h
7516 @@ -1,4 +1,4 @@
7517 -/* Copyright (C) 2012 Dialog Semiconductor Ltd.
7518 +/* Copyright (C) 2012 Dialog Semiconductor Ltd.
7519 *
7520 * This program is free software; you can redistribute it and/or modify
7521 * it under the terms of the GNU General Public License as published by
7522 diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
7523 index df237ee..2b592e0 100644
7524 --- a/include/linux/mfd/da9055/reg.h
7525 +++ b/include/linux/mfd/da9055/reg.h
7526 @@ -1,4 +1,4 @@
7527 -/*
7528 +/*
7529 * DA9055 declarations for DA9055 PMICs.
7530 *
7531 * Copyright(c) 2012 Dialog Semiconductor Ltd.
7532 diff --git a/include/linux/mm.h b/include/linux/mm.h
7533 index bcaab4e..280dae5 100644
7534 --- a/include/linux/mm.h
7535 +++ b/include/linux/mm.h
7536 @@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
7537
7538 void split_page(struct page *page, unsigned int order);
7539 int split_free_page(struct page *page);
7540 -int capture_free_page(struct page *page, int alloc_order, int migratetype);
7541
7542 /*
7543 * Compound pages have a destructor function. Provide a
7544 diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
7545 index f792794..5dc9ee4 100644
7546 --- a/include/linux/sunrpc/cache.h
7547 +++ b/include/linux/sunrpc/cache.h
7548 @@ -217,6 +217,8 @@ extern int qword_get(char **bpp, char *dest, int bufsize);
7549 static inline int get_int(char **bpp, int *anint)
7550 {
7551 char buf[50];
7552 + char *ep;
7553 + int rv;
7554 int len = qword_get(bpp, buf, sizeof(buf));
7555
7556 if (len < 0)
7557 @@ -224,9 +226,11 @@ static inline int get_int(char **bpp, int *anint)
7558 if (len == 0)
7559 return -ENOENT;
7560
7561 - if (kstrtoint(buf, 0, anint))
7562 + rv = simple_strtol(buf, &ep, 0);
7563 + if (*ep)
7564 return -EINVAL;
7565
7566 + *anint = rv;
7567 return 0;
7568 }
7569
7570 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
7571 index 82558c8..d481cc6 100644
7572 --- a/include/net/mac80211.h
7573 +++ b/include/net/mac80211.h
7574 @@ -1253,6 +1253,10 @@ struct ieee80211_tx_control {
7575 * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
7576 * P2P Interface. This will be honoured even if more than one interface
7577 * is supported.
7578 + *
7579 + * @IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL: On this hardware TX BA session
7580 + * should be tear down once BAR frame will not be acked.
7581 + *
7582 */
7583 enum ieee80211_hw_flags {
7584 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
7585 @@ -1281,6 +1285,7 @@ enum ieee80211_hw_flags {
7586 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
7587 IEEE80211_HW_SCAN_WHILE_IDLE = 1<<24,
7588 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
7589 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL = 1<<26,
7590 };
7591
7592 /**
7593 diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
7594 index 76352ac..09a2d94 100644
7595 --- a/include/uapi/linux/audit.h
7596 +++ b/include/uapi/linux/audit.h
7597 @@ -106,6 +106,7 @@
7598 #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
7599 #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
7600 #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
7601 +#define AUDIT_SECCOMP 1326 /* Secure Computing event */
7602
7603 #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
7604 #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
7605 diff --git a/include/video/omap-panel-tfp410.h b/include/video/omap-panel-tfp410.h
7606 index 68c31d7..aef35e4 100644
7607 --- a/include/video/omap-panel-tfp410.h
7608 +++ b/include/video/omap-panel-tfp410.h
7609 @@ -28,7 +28,7 @@ struct omap_dss_device;
7610 * @power_down_gpio: gpio number for PD pin (or -1 if not available)
7611 */
7612 struct tfp410_platform_data {
7613 - u16 i2c_bus_num;
7614 + int i2c_bus_num;
7615 int power_down_gpio;
7616 };
7617
7618 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
7619 index 2f186ed..157e989 100644
7620 --- a/kernel/auditsc.c
7621 +++ b/kernel/auditsc.c
7622 @@ -2735,7 +2735,7 @@ void __audit_mmap_fd(int fd, int flags)
7623 context->type = AUDIT_MMAP;
7624 }
7625
7626 -static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7627 +static void audit_log_task(struct audit_buffer *ab)
7628 {
7629 kuid_t auid, uid;
7630 kgid_t gid;
7631 @@ -2753,6 +2753,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7632 audit_log_task_context(ab);
7633 audit_log_format(ab, " pid=%d comm=", current->pid);
7634 audit_log_untrustedstring(ab, current->comm);
7635 +}
7636 +
7637 +static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
7638 +{
7639 + audit_log_task(ab);
7640 audit_log_format(ab, " reason=");
7641 audit_log_string(ab, reason);
7642 audit_log_format(ab, " sig=%ld", signr);
7643 @@ -2783,8 +2788,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
7644 {
7645 struct audit_buffer *ab;
7646
7647 - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
7648 - audit_log_abend(ab, "seccomp", signr);
7649 + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
7650 + if (unlikely(!ab))
7651 + return;
7652 + audit_log_task(ab);
7653 + audit_log_format(ab, " sig=%ld", signr);
7654 audit_log_format(ab, " syscall=%ld", syscall);
7655 audit_log_format(ab, " compat=%d", is_compat_task());
7656 audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
7657 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
7658 index c8c21be..762081c 100644
7659 --- a/kernel/watchdog.c
7660 +++ b/kernel/watchdog.c
7661 @@ -343,6 +343,10 @@ static void watchdog_enable(unsigned int cpu)
7662 {
7663 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
7664
7665 + /* kick off the timer for the hardlockup detector */
7666 + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7667 + hrtimer->function = watchdog_timer_fn;
7668 +
7669 if (!watchdog_enabled) {
7670 kthread_park(current);
7671 return;
7672 @@ -351,10 +355,6 @@ static void watchdog_enable(unsigned int cpu)
7673 /* Enable the perf event */
7674 watchdog_nmi_enable(cpu);
7675
7676 - /* kick off the timer for the hardlockup detector */
7677 - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7678 - hrtimer->function = watchdog_timer_fn;
7679 -
7680 /* done here because hrtimer_start can only pin to smp_processor_id() */
7681 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
7682 HRTIMER_MODE_REL_PINNED);
7683 @@ -368,9 +368,6 @@ static void watchdog_disable(unsigned int cpu)
7684 {
7685 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
7686
7687 - if (!watchdog_enabled)
7688 - return;
7689 -
7690 watchdog_set_prio(SCHED_NORMAL, 0);
7691 hrtimer_cancel(hrtimer);
7692 /* disable the perf event */
7693 diff --git a/mm/bootmem.c b/mm/bootmem.c
7694 index f468185..af3d5af 100644
7695 --- a/mm/bootmem.c
7696 +++ b/mm/bootmem.c
7697 @@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
7698
7699 while (start < end) {
7700 unsigned long *map, idx, vec;
7701 + unsigned shift;
7702
7703 map = bdata->node_bootmem_map;
7704 idx = start - bdata->node_min_pfn;
7705 + shift = idx & (BITS_PER_LONG - 1);
7706 + /*
7707 + * vec holds at most BITS_PER_LONG map bits,
7708 + * bit 0 corresponds to start.
7709 + */
7710 vec = ~map[idx / BITS_PER_LONG];
7711 +
7712 + if (shift) {
7713 + vec >>= shift;
7714 + if (end - start >= BITS_PER_LONG)
7715 + vec |= ~map[idx / BITS_PER_LONG + 1] <<
7716 + (BITS_PER_LONG - shift);
7717 + }
7718 /*
7719 * If we have a properly aligned and fully unreserved
7720 * BITS_PER_LONG block of pages in front of us, free
7721 @@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
7722 count += BITS_PER_LONG;
7723 start += BITS_PER_LONG;
7724 } else {
7725 - unsigned long off = 0;
7726 + unsigned long cur = start;
7727
7728 - vec >>= start & (BITS_PER_LONG - 1);
7729 - while (vec) {
7730 + start = ALIGN(start + 1, BITS_PER_LONG);
7731 + while (vec && cur != start) {
7732 if (vec & 1) {
7733 - page = pfn_to_page(start + off);
7734 + page = pfn_to_page(cur);
7735 __free_pages_bootmem(page, 0);
7736 count++;
7737 }
7738 vec >>= 1;
7739 - off++;
7740 + ++cur;
7741 }
7742 - start = ALIGN(start + 1, BITS_PER_LONG);
7743 }
7744 }
7745
7746 diff --git a/mm/compaction.c b/mm/compaction.c
7747 index 694eaab..027ebb9 100644
7748 --- a/mm/compaction.c
7749 +++ b/mm/compaction.c
7750 @@ -214,60 +214,6 @@ static bool suitable_migration_target(struct page *page)
7751 return false;
7752 }
7753
7754 -static void compact_capture_page(struct compact_control *cc)
7755 -{
7756 - unsigned long flags;
7757 - int mtype, mtype_low, mtype_high;
7758 -
7759 - if (!cc->page || *cc->page)
7760 - return;
7761 -
7762 - /*
7763 - * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
7764 - * regardless of the migratetype of the freelist is is captured from.
7765 - * This is fine because the order for a high-order MIGRATE_MOVABLE
7766 - * allocation is typically at least a pageblock size and overall
7767 - * fragmentation is not impaired. Other allocation types must
7768 - * capture pages from their own migratelist because otherwise they
7769 - * could pollute other pageblocks like MIGRATE_MOVABLE with
7770 - * difficult to move pages and making fragmentation worse overall.
7771 - */
7772 - if (cc->migratetype == MIGRATE_MOVABLE) {
7773 - mtype_low = 0;
7774 - mtype_high = MIGRATE_PCPTYPES;
7775 - } else {
7776 - mtype_low = cc->migratetype;
7777 - mtype_high = cc->migratetype + 1;
7778 - }
7779 -
7780 - /* Speculatively examine the free lists without zone lock */
7781 - for (mtype = mtype_low; mtype < mtype_high; mtype++) {
7782 - int order;
7783 - for (order = cc->order; order < MAX_ORDER; order++) {
7784 - struct page *page;
7785 - struct free_area *area;
7786 - area = &(cc->zone->free_area[order]);
7787 - if (list_empty(&area->free_list[mtype]))
7788 - continue;
7789 -
7790 - /* Take the lock and attempt capture of the page */
7791 - if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
7792 - return;
7793 - if (!list_empty(&area->free_list[mtype])) {
7794 - page = list_entry(area->free_list[mtype].next,
7795 - struct page, lru);
7796 - if (capture_free_page(page, cc->order, mtype)) {
7797 - spin_unlock_irqrestore(&cc->zone->lock,
7798 - flags);
7799 - *cc->page = page;
7800 - return;
7801 - }
7802 - }
7803 - spin_unlock_irqrestore(&cc->zone->lock, flags);
7804 - }
7805 - }
7806 -}
7807 -
7808 /*
7809 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
7810 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
7811 @@ -831,6 +777,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
7812 static int compact_finished(struct zone *zone,
7813 struct compact_control *cc)
7814 {
7815 + unsigned int order;
7816 unsigned long watermark;
7817
7818 if (fatal_signal_pending(current))
7819 @@ -865,22 +812,16 @@ static int compact_finished(struct zone *zone,
7820 return COMPACT_CONTINUE;
7821
7822 /* Direct compactor: Is a suitable page free? */
7823 - if (cc->page) {
7824 - /* Was a suitable page captured? */
7825 - if (*cc->page)
7826 + for (order = cc->order; order < MAX_ORDER; order++) {
7827 + struct free_area *area = &zone->free_area[order];
7828 +
7829 + /* Job done if page is free of the right migratetype */
7830 + if (!list_empty(&area->free_list[cc->migratetype]))
7831 + return COMPACT_PARTIAL;
7832 +
7833 + /* Job done if allocation would set block type */
7834 + if (cc->order >= pageblock_order && area->nr_free)
7835 return COMPACT_PARTIAL;
7836 - } else {
7837 - unsigned int order;
7838 - for (order = cc->order; order < MAX_ORDER; order++) {
7839 - struct free_area *area = &zone->free_area[cc->order];
7840 - /* Job done if page is free of the right migratetype */
7841 - if (!list_empty(&area->free_list[cc->migratetype]))
7842 - return COMPACT_PARTIAL;
7843 -
7844 - /* Job done if allocation would set block type */
7845 - if (cc->order >= pageblock_order && area->nr_free)
7846 - return COMPACT_PARTIAL;
7847 - }
7848 }
7849
7850 return COMPACT_CONTINUE;
7851 @@ -1018,9 +959,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
7852 goto out;
7853 }
7854 }
7855 -
7856 - /* Capture a page now if it is a suitable size */
7857 - compact_capture_page(cc);
7858 }
7859
7860 out:
7861 @@ -1033,8 +971,7 @@ out:
7862
7863 static unsigned long compact_zone_order(struct zone *zone,
7864 int order, gfp_t gfp_mask,
7865 - bool sync, bool *contended,
7866 - struct page **page)
7867 + bool sync, bool *contended)
7868 {
7869 unsigned long ret;
7870 struct compact_control cc = {
7871 @@ -1044,7 +981,6 @@ static unsigned long compact_zone_order(struct zone *zone,
7872 .migratetype = allocflags_to_migratetype(gfp_mask),
7873 .zone = zone,
7874 .sync = sync,
7875 - .page = page,
7876 };
7877 INIT_LIST_HEAD(&cc.freepages);
7878 INIT_LIST_HEAD(&cc.migratepages);
7879 @@ -1074,7 +1010,7 @@ int sysctl_extfrag_threshold = 500;
7880 */
7881 unsigned long try_to_compact_pages(struct zonelist *zonelist,
7882 int order, gfp_t gfp_mask, nodemask_t *nodemask,
7883 - bool sync, bool *contended, struct page **page)
7884 + bool sync, bool *contended)
7885 {
7886 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
7887 int may_enter_fs = gfp_mask & __GFP_FS;
7888 @@ -1100,7 +1036,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
7889 int status;
7890
7891 status = compact_zone_order(zone, order, gfp_mask, sync,
7892 - contended, page);
7893 + contended);
7894 rc = max(status, rc);
7895
7896 /* If a normal allocation would succeed, stop compacting */
7897 @@ -1156,7 +1092,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
7898 struct compact_control cc = {
7899 .order = order,
7900 .sync = false,
7901 - .page = NULL,
7902 };
7903
7904 return __compact_pgdat(pgdat, &cc);
7905 @@ -1167,14 +1102,13 @@ static int compact_node(int nid)
7906 struct compact_control cc = {
7907 .order = -1,
7908 .sync = true,
7909 - .page = NULL,
7910 };
7911
7912 return __compact_pgdat(NODE_DATA(nid), &cc);
7913 }
7914
7915 /* Compact all nodes in the system */
7916 -static int compact_nodes(void)
7917 +static void compact_nodes(void)
7918 {
7919 int nid;
7920
7921 @@ -1183,8 +1117,6 @@ static int compact_nodes(void)
7922
7923 for_each_online_node(nid)
7924 compact_node(nid);
7925 -
7926 - return COMPACT_COMPLETE;
7927 }
7928
7929 /* The written value is actually unused, all memory is compacted */
7930 @@ -1195,7 +1127,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
7931 void __user *buffer, size_t *length, loff_t *ppos)
7932 {
7933 if (write)
7934 - return compact_nodes();
7935 + compact_nodes();
7936
7937 return 0;
7938 }
7939 diff --git a/mm/internal.h b/mm/internal.h
7940 index a4fa284..3c5197d 100644
7941 --- a/mm/internal.h
7942 +++ b/mm/internal.h
7943 @@ -130,7 +130,6 @@ struct compact_control {
7944 int migratetype; /* MOVABLE, RECLAIMABLE etc */
7945 struct zone *zone;
7946 bool contended; /* True if a lock was contended */
7947 - struct page **page; /* Page captured of requested size */
7948 };
7949
7950 unsigned long
7951 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7952 index 7e208f0..ceb4168 100644
7953 --- a/mm/page_alloc.c
7954 +++ b/mm/page_alloc.c
7955 @@ -1376,14 +1376,8 @@ void split_page(struct page *page, unsigned int order)
7956 set_page_refcounted(page + i);
7957 }
7958
7959 -/*
7960 - * Similar to the split_page family of functions except that the page
7961 - * required at the given order and being isolated now to prevent races
7962 - * with parallel allocators
7963 - */
7964 -int capture_free_page(struct page *page, int alloc_order, int migratetype)
7965 +static int __isolate_free_page(struct page *page, unsigned int order)
7966 {
7967 - unsigned int order;
7968 unsigned long watermark;
7969 struct zone *zone;
7970 int mt;
7971 @@ -1391,7 +1385,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
7972 BUG_ON(!PageBuddy(page));
7973
7974 zone = page_zone(page);
7975 - order = page_order(page);
7976
7977 /* Obey watermarks as if the page was being allocated */
7978 watermark = low_wmark_pages(zone) + (1 << order);
7979 @@ -1405,13 +1398,9 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
7980
7981 mt = get_pageblock_migratetype(page);
7982 if (unlikely(mt != MIGRATE_ISOLATE))
7983 - __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
7984 -
7985 - if (alloc_order != order)
7986 - expand(zone, page, alloc_order, order,
7987 - &zone->free_area[order], migratetype);
7988 + __mod_zone_freepage_state(zone, -(1UL << order), mt);
7989
7990 - /* Set the pageblock if the captured page is at least a pageblock */
7991 + /* Set the pageblock if the isolated page is at least a pageblock */
7992 if (order >= pageblock_order - 1) {
7993 struct page *endpage = page + (1 << order) - 1;
7994 for (; page < endpage; page += pageblock_nr_pages) {
7995 @@ -1422,7 +1411,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
7996 }
7997 }
7998
7999 - return 1UL << alloc_order;
8000 + return 1UL << order;
8001 }
8002
8003 /*
8004 @@ -1440,10 +1429,9 @@ int split_free_page(struct page *page)
8005 unsigned int order;
8006 int nr_pages;
8007
8008 - BUG_ON(!PageBuddy(page));
8009 order = page_order(page);
8010
8011 - nr_pages = capture_free_page(page, order, 0);
8012 + nr_pages = __isolate_free_page(page, order);
8013 if (!nr_pages)
8014 return 0;
8015
8016 @@ -2148,8 +2136,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8017 bool *contended_compaction, bool *deferred_compaction,
8018 unsigned long *did_some_progress)
8019 {
8020 - struct page *page = NULL;
8021 -
8022 if (!order)
8023 return NULL;
8024
8025 @@ -2161,16 +2147,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8026 current->flags |= PF_MEMALLOC;
8027 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
8028 nodemask, sync_migration,
8029 - contended_compaction, &page);
8030 + contended_compaction);
8031 current->flags &= ~PF_MEMALLOC;
8032
8033 - /* If compaction captured a page, prep and use it */
8034 - if (page) {
8035 - prep_new_page(page, order, gfp_mask);
8036 - goto got_page;
8037 - }
8038 -
8039 if (*did_some_progress != COMPACT_SKIPPED) {
8040 + struct page *page;
8041 +
8042 /* Page migration frees to the PCP lists but we want merging */
8043 drain_pages(get_cpu());
8044 put_cpu();
8045 @@ -2180,7 +2162,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
8046 alloc_flags & ~ALLOC_NO_WATERMARKS,
8047 preferred_zone, migratetype);
8048 if (page) {
8049 -got_page:
8050 preferred_zone->compact_blockskip_flush = false;
8051 preferred_zone->compact_considered = 0;
8052 preferred_zone->compact_defer_shift = 0;
8053 @@ -5506,7 +5487,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
8054 pfn &= (PAGES_PER_SECTION-1);
8055 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
8056 #else
8057 - pfn = pfn - zone->zone_start_pfn;
8058 + pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
8059 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
8060 #endif /* CONFIG_SPARSEMEM */
8061 }
8062 diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
8063 index a802029..ee71ea2 100644
8064 --- a/net/ceph/ceph_common.c
8065 +++ b/net/ceph/ceph_common.c
8066 @@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name,
8067
8068 /* start with defaults */
8069 opt->flags = CEPH_OPT_DEFAULT;
8070 - opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
8071 opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
8072 opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
8073 opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
8074 @@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name,
8075
8076 /* misc */
8077 case Opt_osdtimeout:
8078 - opt->osd_timeout = intval;
8079 + pr_warning("ignoring deprecated osdtimeout option\n");
8080 break;
8081 case Opt_osdkeepalivetimeout:
8082 opt->osd_keepalive_timeout = intval;
8083 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
8084 index 3ef1759..e9f2159 100644
8085 --- a/net/ceph/messenger.c
8086 +++ b/net/ceph/messenger.c
8087 @@ -506,6 +506,7 @@ static void reset_connection(struct ceph_connection *con)
8088 {
8089 /* reset connection, out_queue, msg_ and connect_seq */
8090 /* discard existing out_queue and msg_seq */
8091 + dout("reset_connection %p\n", con);
8092 ceph_msg_remove_list(&con->out_queue);
8093 ceph_msg_remove_list(&con->out_sent);
8094
8095 @@ -561,7 +562,7 @@ void ceph_con_open(struct ceph_connection *con,
8096 mutex_lock(&con->mutex);
8097 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
8098
8099 - BUG_ON(con->state != CON_STATE_CLOSED);
8100 + WARN_ON(con->state != CON_STATE_CLOSED);
8101 con->state = CON_STATE_PREOPEN;
8102
8103 con->peer_name.type = (__u8) entity_type;
8104 @@ -1506,13 +1507,6 @@ static int process_banner(struct ceph_connection *con)
8105 return 0;
8106 }
8107
8108 -static void fail_protocol(struct ceph_connection *con)
8109 -{
8110 - reset_connection(con);
8111 - BUG_ON(con->state != CON_STATE_NEGOTIATING);
8112 - con->state = CON_STATE_CLOSED;
8113 -}
8114 -
8115 static int process_connect(struct ceph_connection *con)
8116 {
8117 u64 sup_feat = con->msgr->supported_features;
8118 @@ -1530,7 +1524,7 @@ static int process_connect(struct ceph_connection *con)
8119 ceph_pr_addr(&con->peer_addr.in_addr),
8120 sup_feat, server_feat, server_feat & ~sup_feat);
8121 con->error_msg = "missing required protocol features";
8122 - fail_protocol(con);
8123 + reset_connection(con);
8124 return -1;
8125
8126 case CEPH_MSGR_TAG_BADPROTOVER:
8127 @@ -1541,7 +1535,7 @@ static int process_connect(struct ceph_connection *con)
8128 le32_to_cpu(con->out_connect.protocol_version),
8129 le32_to_cpu(con->in_reply.protocol_version));
8130 con->error_msg = "protocol version mismatch";
8131 - fail_protocol(con);
8132 + reset_connection(con);
8133 return -1;
8134
8135 case CEPH_MSGR_TAG_BADAUTHORIZER:
8136 @@ -1631,11 +1625,11 @@ static int process_connect(struct ceph_connection *con)
8137 ceph_pr_addr(&con->peer_addr.in_addr),
8138 req_feat, server_feat, req_feat & ~server_feat);
8139 con->error_msg = "missing required protocol features";
8140 - fail_protocol(con);
8141 + reset_connection(con);
8142 return -1;
8143 }
8144
8145 - BUG_ON(con->state != CON_STATE_NEGOTIATING);
8146 + WARN_ON(con->state != CON_STATE_NEGOTIATING);
8147 con->state = CON_STATE_OPEN;
8148
8149 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
8150 @@ -2132,7 +2126,6 @@ more:
8151 if (ret < 0)
8152 goto out;
8153
8154 - BUG_ON(con->state != CON_STATE_CONNECTING);
8155 con->state = CON_STATE_NEGOTIATING;
8156
8157 /*
8158 @@ -2160,7 +2153,7 @@ more:
8159 goto more;
8160 }
8161
8162 - BUG_ON(con->state != CON_STATE_OPEN);
8163 + WARN_ON(con->state != CON_STATE_OPEN);
8164
8165 if (con->in_base_pos < 0) {
8166 /*
8167 @@ -2262,6 +2255,35 @@ static void queue_con(struct ceph_connection *con)
8168 }
8169 }
8170
8171 +static bool con_sock_closed(struct ceph_connection *con)
8172 +{
8173 + if (!test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags))
8174 + return false;
8175 +
8176 +#define CASE(x) \
8177 + case CON_STATE_ ## x: \
8178 + con->error_msg = "socket closed (con state " #x ")"; \
8179 + break;
8180 +
8181 + switch (con->state) {
8182 + CASE(CLOSED);
8183 + CASE(PREOPEN);
8184 + CASE(CONNECTING);
8185 + CASE(NEGOTIATING);
8186 + CASE(OPEN);
8187 + CASE(STANDBY);
8188 + default:
8189 + pr_warning("%s con %p unrecognized state %lu\n",
8190 + __func__, con, con->state);
8191 + con->error_msg = "unrecognized con state";
8192 + BUG();
8193 + break;
8194 + }
8195 +#undef CASE
8196 +
8197 + return true;
8198 +}
8199 +
8200 /*
8201 * Do some work on a connection. Drop a connection ref when we're done.
8202 */
8203 @@ -2273,24 +2295,8 @@ static void con_work(struct work_struct *work)
8204
8205 mutex_lock(&con->mutex);
8206 restart:
8207 - if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
8208 - switch (con->state) {
8209 - case CON_STATE_CONNECTING:
8210 - con->error_msg = "connection failed";
8211 - break;
8212 - case CON_STATE_NEGOTIATING:
8213 - con->error_msg = "negotiation failed";
8214 - break;
8215 - case CON_STATE_OPEN:
8216 - con->error_msg = "socket closed";
8217 - break;
8218 - default:
8219 - dout("unrecognized con state %d\n", (int)con->state);
8220 - con->error_msg = "unrecognized con state";
8221 - BUG();
8222 - }
8223 + if (con_sock_closed(con))
8224 goto fault;
8225 - }
8226
8227 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
8228 dout("con_work %p backing off\n", con);
8229 @@ -2356,12 +2362,12 @@ fault:
8230 static void ceph_fault(struct ceph_connection *con)
8231 __releases(con->mutex)
8232 {
8233 - pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
8234 + pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
8235 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
8236 dout("fault %p state %lu to peer %s\n",
8237 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
8238
8239 - BUG_ON(con->state != CON_STATE_CONNECTING &&
8240 + WARN_ON(con->state != CON_STATE_CONNECTING &&
8241 con->state != CON_STATE_NEGOTIATING &&
8242 con->state != CON_STATE_OPEN);
8243
8244 diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
8245 index c1d756c..eb9a444 100644
8246 --- a/net/ceph/osd_client.c
8247 +++ b/net/ceph/osd_client.c
8248 @@ -221,6 +221,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
8249 kref_init(&req->r_kref);
8250 init_completion(&req->r_completion);
8251 init_completion(&req->r_safe_completion);
8252 + RB_CLEAR_NODE(&req->r_node);
8253 INIT_LIST_HEAD(&req->r_unsafe_item);
8254 INIT_LIST_HEAD(&req->r_linger_item);
8255 INIT_LIST_HEAD(&req->r_linger_osd);
8256 @@ -580,7 +581,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
8257
8258 dout("__kick_osd_requests osd%d\n", osd->o_osd);
8259 err = __reset_osd(osdc, osd);
8260 - if (err == -EAGAIN)
8261 + if (err)
8262 return;
8263
8264 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
8265 @@ -607,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
8266 }
8267 }
8268
8269 -static void kick_osd_requests(struct ceph_osd_client *osdc,
8270 - struct ceph_osd *kickosd)
8271 -{
8272 - mutex_lock(&osdc->request_mutex);
8273 - __kick_osd_requests(osdc, kickosd);
8274 - mutex_unlock(&osdc->request_mutex);
8275 -}
8276 -
8277 /*
8278 * If the osd connection drops, we need to resubmit all requests.
8279 */
8280 @@ -628,7 +621,9 @@ static void osd_reset(struct ceph_connection *con)
8281 dout("osd_reset osd%d\n", osd->o_osd);
8282 osdc = osd->o_osdc;
8283 down_read(&osdc->map_sem);
8284 - kick_osd_requests(osdc, osd);
8285 + mutex_lock(&osdc->request_mutex);
8286 + __kick_osd_requests(osdc, osd);
8287 + mutex_unlock(&osdc->request_mutex);
8288 send_queued(osdc);
8289 up_read(&osdc->map_sem);
8290 }
8291 @@ -647,6 +642,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
8292 atomic_set(&osd->o_ref, 1);
8293 osd->o_osdc = osdc;
8294 osd->o_osd = onum;
8295 + RB_CLEAR_NODE(&osd->o_node);
8296 INIT_LIST_HEAD(&osd->o_requests);
8297 INIT_LIST_HEAD(&osd->o_linger_requests);
8298 INIT_LIST_HEAD(&osd->o_osd_lru);
8299 @@ -750,6 +746,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
8300 if (list_empty(&osd->o_requests) &&
8301 list_empty(&osd->o_linger_requests)) {
8302 __remove_osd(osdc, osd);
8303 + ret = -ENODEV;
8304 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
8305 &osd->o_con.peer_addr,
8306 sizeof(osd->o_con.peer_addr)) == 0 &&
8307 @@ -876,9 +873,9 @@ static void __unregister_request(struct ceph_osd_client *osdc,
8308 req->r_osd = NULL;
8309 }
8310
8311 + list_del_init(&req->r_req_lru_item);
8312 ceph_osdc_put_request(req);
8313
8314 - list_del_init(&req->r_req_lru_item);
8315 if (osdc->num_requests == 0) {
8316 dout(" no requests, canceling timeout\n");
8317 __cancel_osd_timeout(osdc);
8318 @@ -910,8 +907,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
8319 struct ceph_osd_request *req)
8320 {
8321 dout("__unregister_linger_request %p\n", req);
8322 + list_del_init(&req->r_linger_item);
8323 if (req->r_osd) {
8324 - list_del_init(&req->r_linger_item);
8325 list_del_init(&req->r_linger_osd);
8326
8327 if (list_empty(&req->r_osd->o_requests) &&
8328 @@ -1090,12 +1087,10 @@ static void handle_timeout(struct work_struct *work)
8329 {
8330 struct ceph_osd_client *osdc =
8331 container_of(work, struct ceph_osd_client, timeout_work.work);
8332 - struct ceph_osd_request *req, *last_req = NULL;
8333 + struct ceph_osd_request *req;
8334 struct ceph_osd *osd;
8335 - unsigned long timeout = osdc->client->options->osd_timeout * HZ;
8336 unsigned long keepalive =
8337 osdc->client->options->osd_keepalive_timeout * HZ;
8338 - unsigned long last_stamp = 0;
8339 struct list_head slow_osds;
8340 dout("timeout\n");
8341 down_read(&osdc->map_sem);
8342 @@ -1105,37 +1100,6 @@ static void handle_timeout(struct work_struct *work)
8343 mutex_lock(&osdc->request_mutex);
8344
8345 /*
8346 - * reset osds that appear to be _really_ unresponsive. this
8347 - * is a failsafe measure.. we really shouldn't be getting to
8348 - * this point if the system is working properly. the monitors
8349 - * should mark the osd as failed and we should find out about
8350 - * it from an updated osd map.
8351 - */
8352 - while (timeout && !list_empty(&osdc->req_lru)) {
8353 - req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
8354 - r_req_lru_item);
8355 -
8356 - /* hasn't been long enough since we sent it? */
8357 - if (time_before(jiffies, req->r_stamp + timeout))
8358 - break;
8359 -
8360 - /* hasn't been long enough since it was acked? */
8361 - if (req->r_request->ack_stamp == 0 ||
8362 - time_before(jiffies, req->r_request->ack_stamp + timeout))
8363 - break;
8364 -
8365 - BUG_ON(req == last_req && req->r_stamp == last_stamp);
8366 - last_req = req;
8367 - last_stamp = req->r_stamp;
8368 -
8369 - osd = req->r_osd;
8370 - BUG_ON(!osd);
8371 - pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
8372 - req->r_tid, osd->o_osd);
8373 - __kick_osd_requests(osdc, osd);
8374 - }
8375 -
8376 - /*
8377 * ping osds that are a bit slow. this ensures that if there
8378 * is a break in the TCP connection we will notice, and reopen
8379 * a connection with that osd (from the fault callback).
8380 @@ -1306,7 +1270,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
8381 * Requeue requests whose mapping to an OSD has changed. If requests map to
8382 * no osd, request a new map.
8383 *
8384 - * Caller should hold map_sem for read and request_mutex.
8385 + * Caller should hold map_sem for read.
8386 */
8387 static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8388 {
8389 @@ -1320,6 +1284,24 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8390 for (p = rb_first(&osdc->requests); p; ) {
8391 req = rb_entry(p, struct ceph_osd_request, r_node);
8392 p = rb_next(p);
8393 +
8394 + /*
8395 + * For linger requests that have not yet been
8396 + * registered, move them to the linger list; they'll
8397 + * be sent to the osd in the loop below. Unregister
8398 + * the request before re-registering it as a linger
8399 + * request to ensure the __map_request() below
8400 + * will decide it needs to be sent.
8401 + */
8402 + if (req->r_linger && list_empty(&req->r_linger_item)) {
8403 + dout("%p tid %llu restart on osd%d\n",
8404 + req, req->r_tid,
8405 + req->r_osd ? req->r_osd->o_osd : -1);
8406 + __unregister_request(osdc, req);
8407 + __register_linger_request(osdc, req);
8408 + continue;
8409 + }
8410 +
8411 err = __map_request(osdc, req, force_resend);
8412 if (err < 0)
8413 continue; /* error */
8414 @@ -1334,17 +1316,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8415 req->r_flags |= CEPH_OSD_FLAG_RETRY;
8416 }
8417 }
8418 - if (req->r_linger && list_empty(&req->r_linger_item)) {
8419 - /*
8420 - * register as a linger so that we will
8421 - * re-submit below and get a new tid
8422 - */
8423 - dout("%p tid %llu restart on osd%d\n",
8424 - req, req->r_tid,
8425 - req->r_osd ? req->r_osd->o_osd : -1);
8426 - __register_linger_request(osdc, req);
8427 - __unregister_request(osdc, req);
8428 - }
8429 }
8430
8431 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
8432 @@ -1352,6 +1323,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8433 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
8434
8435 err = __map_request(osdc, req, force_resend);
8436 + dout("__map_request returned %d\n", err);
8437 if (err == 0)
8438 continue; /* no change and no osd was specified */
8439 if (err < 0)
8440 @@ -1364,8 +1336,8 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8441
8442 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
8443 req->r_osd ? req->r_osd->o_osd : -1);
8444 - __unregister_linger_request(osdc, req);
8445 __register_request(osdc, req);
8446 + __unregister_linger_request(osdc, req);
8447 }
8448 mutex_unlock(&osdc->request_mutex);
8449
8450 @@ -1373,6 +1345,7 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
8451 dout("%d requests for down osds, need new map\n", needmap);
8452 ceph_monc_request_next_osdmap(&osdc->client->monc);
8453 }
8454 + reset_changed_osds(osdc);
8455 }
8456
8457
8458 @@ -1429,7 +1402,6 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
8459 osdc->osdmap = newmap;
8460 }
8461 kick_requests(osdc, 0);
8462 - reset_changed_osds(osdc);
8463 } else {
8464 dout("ignoring incremental map %u len %d\n",
8465 epoch, maplen);
8466 @@ -1599,6 +1571,7 @@ int ceph_osdc_create_event(struct ceph_osd_client *osdc,
8467 event->data = data;
8468 event->osdc = osdc;
8469 INIT_LIST_HEAD(&event->osd_node);
8470 + RB_CLEAR_NODE(&event->node);
8471 kref_init(&event->kref); /* one ref for us */
8472 kref_get(&event->kref); /* one ref for the caller */
8473 init_completion(&event->completion);
8474 diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
8475 index 5433fb0..f552aa4 100644
8476 --- a/net/ceph/osdmap.c
8477 +++ b/net/ceph/osdmap.c
8478 @@ -645,10 +645,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8479 ceph_decode_32_safe(p, end, max, bad);
8480 while (max--) {
8481 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
8482 + err = -ENOMEM;
8483 pi = kzalloc(sizeof(*pi), GFP_NOFS);
8484 if (!pi)
8485 goto bad;
8486 pi->id = ceph_decode_32(p);
8487 + err = -EINVAL;
8488 ev = ceph_decode_8(p); /* encoding version */
8489 if (ev > CEPH_PG_POOL_VERSION) {
8490 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
8491 @@ -664,8 +666,13 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8492 __insert_pg_pool(&map->pg_pools, pi);
8493 }
8494
8495 - if (version >= 5 && __decode_pool_names(p, end, map) < 0)
8496 - goto bad;
8497 + if (version >= 5) {
8498 + err = __decode_pool_names(p, end, map);
8499 + if (err < 0) {
8500 + dout("fail to decode pool names");
8501 + goto bad;
8502 + }
8503 + }
8504
8505 ceph_decode_32_safe(p, end, map->pool_max, bad);
8506
8507 @@ -745,7 +752,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
8508 return map;
8509
8510 bad:
8511 - dout("osdmap_decode fail\n");
8512 + dout("osdmap_decode fail err %d\n", err);
8513 ceph_osdmap_destroy(map);
8514 return ERR_PTR(err);
8515 }
8516 @@ -839,6 +846,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8517 if (ev > CEPH_PG_POOL_VERSION) {
8518 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
8519 ev, CEPH_PG_POOL_VERSION);
8520 + err = -EINVAL;
8521 goto bad;
8522 }
8523 pi = __lookup_pg_pool(&map->pg_pools, pool);
8524 @@ -855,8 +863,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8525 if (err < 0)
8526 goto bad;
8527 }
8528 - if (version >= 5 && __decode_pool_names(p, end, map) < 0)
8529 - goto bad;
8530 + if (version >= 5) {
8531 + err = __decode_pool_names(p, end, map);
8532 + if (err < 0)
8533 + goto bad;
8534 + }
8535
8536 /* old_pool */
8537 ceph_decode_32_safe(p, end, len, bad);
8538 @@ -932,15 +943,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
8539 (void) __remove_pg_mapping(&map->pg_temp, pgid);
8540
8541 /* insert */
8542 - if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
8543 - err = -EINVAL;
8544 + err = -EINVAL;
8545 + if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
8546 goto bad;
8547 - }
8548 + err = -ENOMEM;
8549 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
8550 - if (!pg) {
8551 - err = -ENOMEM;
8552 + if (!pg)
8553 goto bad;
8554 - }
8555 pg->pgid = pgid;
8556 pg->len = pglen;
8557 for (j = 0; j < pglen; j++)
8558 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
8559 index c21e33d..d9df6b8 100644
8560 --- a/net/mac80211/ibss.c
8561 +++ b/net/mac80211/ibss.c
8562 @@ -678,8 +678,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
8563 sdata_info(sdata,
8564 "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
8565
8566 - ieee80211_request_internal_scan(sdata,
8567 - ifibss->ssid, ifibss->ssid_len, NULL);
8568 + ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
8569 + NULL);
8570 }
8571
8572 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
8573 @@ -777,9 +777,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
8574 IEEE80211_SCAN_INTERVAL)) {
8575 sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
8576
8577 - ieee80211_request_internal_scan(sdata,
8578 - ifibss->ssid, ifibss->ssid_len,
8579 - ifibss->fixed_channel ? ifibss->channel : NULL);
8580 + ieee80211_request_ibss_scan(sdata, ifibss->ssid,
8581 + ifibss->ssid_len, chan);
8582 } else {
8583 int interval = IEEE80211_SCAN_INTERVAL;
8584
8585 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
8586 index 156e583..3da215c 100644
8587 --- a/net/mac80211/ieee80211_i.h
8588 +++ b/net/mac80211/ieee80211_i.h
8589 @@ -730,6 +730,10 @@ struct ieee80211_sub_if_data {
8590 u32 mntr_flags;
8591 } u;
8592
8593 + spinlock_t cleanup_stations_lock;
8594 + struct list_head cleanup_stations;
8595 + struct work_struct cleanup_stations_wk;
8596 +
8597 #ifdef CONFIG_MAC80211_DEBUGFS
8598 struct {
8599 struct dentry *dir;
8600 @@ -1247,9 +1251,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
8601
8602 /* scan/BSS handling */
8603 void ieee80211_scan_work(struct work_struct *work);
8604 -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8605 - const u8 *ssid, u8 ssid_len,
8606 - struct ieee80211_channel *chan);
8607 +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
8608 + const u8 *ssid, u8 ssid_len,
8609 + struct ieee80211_channel *chan);
8610 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
8611 struct cfg80211_scan_request *req);
8612 void ieee80211_scan_cancel(struct ieee80211_local *local);
8613 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
8614 index 7de7717..0f5af91 100644
8615 --- a/net/mac80211/iface.c
8616 +++ b/net/mac80211/iface.c
8617 @@ -793,20 +793,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
8618 flush_work(&sdata->work);
8619 /*
8620 * When we get here, the interface is marked down.
8621 - * Call rcu_barrier() to wait both for the RX path
8622 + * Call synchronize_rcu() to wait for the RX path
8623 * should it be using the interface and enqueuing
8624 - * frames at this very time on another CPU, and
8625 - * for the sta free call_rcu callbacks.
8626 + * frames at this very time on another CPU.
8627 */
8628 - rcu_barrier();
8629 -
8630 - /*
8631 - * free_sta_rcu() enqueues a work for the actual
8632 - * sta cleanup, so we need to flush it while
8633 - * sdata is still valid.
8634 - */
8635 - flush_workqueue(local->workqueue);
8636 -
8637 + synchronize_rcu();
8638 skb_queue_purge(&sdata->skb_queue);
8639
8640 /*
8641 @@ -1432,6 +1423,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
8642 mutex_unlock(&local->iflist_mtx);
8643 }
8644
8645 +static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
8646 +{
8647 + struct ieee80211_sub_if_data *sdata;
8648 +
8649 + sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
8650 +
8651 + ieee80211_cleanup_sdata_stas(sdata);
8652 +}
8653 +
8654 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
8655 struct wireless_dev **new_wdev, enum nl80211_iftype type,
8656 struct vif_params *params)
8657 @@ -1507,6 +1507,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
8658
8659 INIT_LIST_HEAD(&sdata->key_list);
8660
8661 + spin_lock_init(&sdata->cleanup_stations_lock);
8662 + INIT_LIST_HEAD(&sdata->cleanup_stations);
8663 + INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
8664 +
8665 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
8666 struct ieee80211_supported_band *sband;
8667 sband = local->hw.wiphy->bands[i];
8668 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
8669 index 43e60b5..fab706f 100644
8670 --- a/net/mac80211/scan.c
8671 +++ b/net/mac80211/scan.c
8672 @@ -819,9 +819,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
8673 return res;
8674 }
8675
8676 -int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8677 - const u8 *ssid, u8 ssid_len,
8678 - struct ieee80211_channel *chan)
8679 +int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
8680 + const u8 *ssid, u8 ssid_len,
8681 + struct ieee80211_channel *chan)
8682 {
8683 struct ieee80211_local *local = sdata->local;
8684 int ret = -EBUSY;
8685 @@ -835,22 +835,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
8686
8687 /* fill internal scan request */
8688 if (!chan) {
8689 - int i, nchan = 0;
8690 + int i, max_n;
8691 + int n_ch = 0;
8692
8693 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
8694 if (!local->hw.wiphy->bands[band])
8695 continue;
8696 - for (i = 0;
8697 - i < local->hw.wiphy->bands[band]->n_channels;
8698 - i++) {
8699 - local->int_scan_req->channels[nchan] =
8700 +
8701 + max_n = local->hw.wiphy->bands[band]->n_channels;
8702 + for (i = 0; i < max_n; i++) {
8703 + struct ieee80211_channel *tmp_ch =
8704 &local->hw.wiphy->bands[band]->channels[i];
8705 - nchan++;
8706 +
8707 + if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
8708 + IEEE80211_CHAN_DISABLED))
8709 + continue;
8710 +
8711 + local->int_scan_req->channels[n_ch] = tmp_ch;
8712 + n_ch++;
8713 }
8714 }
8715
8716 - local->int_scan_req->n_channels = nchan;
8717 + if (WARN_ON_ONCE(n_ch == 0))
8718 + goto unlock;
8719 +
8720 + local->int_scan_req->n_channels = n_ch;
8721 } else {
8722 + if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
8723 + IEEE80211_CHAN_DISABLED)))
8724 + goto unlock;
8725 +
8726 local->int_scan_req->channels[0] = chan;
8727 local->int_scan_req->n_channels = 1;
8728 }
8729 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
8730 index d2eb64e..8a9931b 100644
8731 --- a/net/mac80211/sta_info.c
8732 +++ b/net/mac80211/sta_info.c
8733 @@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
8734 return -ENOENT;
8735 }
8736
8737 -static void free_sta_work(struct work_struct *wk)
8738 +static void cleanup_single_sta(struct sta_info *sta)
8739 {
8740 - struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
8741 int ac, i;
8742 struct tid_ampdu_tx *tid_tx;
8743 struct ieee80211_sub_if_data *sdata = sta->sdata;
8744 @@ -148,11 +147,35 @@ static void free_sta_work(struct work_struct *wk)
8745 sta_info_free(local, sta);
8746 }
8747
8748 +void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
8749 +{
8750 + struct sta_info *sta;
8751 +
8752 + spin_lock_bh(&sdata->cleanup_stations_lock);
8753 + while (!list_empty(&sdata->cleanup_stations)) {
8754 + sta = list_first_entry(&sdata->cleanup_stations,
8755 + struct sta_info, list);
8756 + list_del(&sta->list);
8757 + spin_unlock_bh(&sdata->cleanup_stations_lock);
8758 +
8759 + cleanup_single_sta(sta);
8760 +
8761 + spin_lock_bh(&sdata->cleanup_stations_lock);
8762 + }
8763 +
8764 + spin_unlock_bh(&sdata->cleanup_stations_lock);
8765 +}
8766 +
8767 static void free_sta_rcu(struct rcu_head *h)
8768 {
8769 struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
8770 + struct ieee80211_sub_if_data *sdata = sta->sdata;
8771
8772 - ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
8773 + spin_lock(&sdata->cleanup_stations_lock);
8774 + list_add_tail(&sta->list, &sdata->cleanup_stations);
8775 + spin_unlock(&sdata->cleanup_stations_lock);
8776 +
8777 + ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
8778 }
8779
8780 /* protected by RCU */
8781 @@ -305,7 +328,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
8782
8783 spin_lock_init(&sta->lock);
8784 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
8785 - INIT_WORK(&sta->free_sta_wk, free_sta_work);
8786 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
8787 mutex_init(&sta->ampdu_mlme.mtx);
8788
8789 @@ -848,7 +870,7 @@ void sta_info_init(struct ieee80211_local *local)
8790
8791 void sta_info_stop(struct ieee80211_local *local)
8792 {
8793 - del_timer(&local->sta_cleanup);
8794 + del_timer_sync(&local->sta_cleanup);
8795 sta_info_flush(local, NULL);
8796 }
8797
8798 @@ -877,6 +899,20 @@ int sta_info_flush(struct ieee80211_local *local,
8799 }
8800 mutex_unlock(&local->sta_mtx);
8801
8802 + rcu_barrier();
8803 +
8804 + if (sdata) {
8805 + ieee80211_cleanup_sdata_stas(sdata);
8806 + cancel_work_sync(&sdata->cleanup_stations_wk);
8807 + } else {
8808 + mutex_lock(&local->iflist_mtx);
8809 + list_for_each_entry(sdata, &local->interfaces, list) {
8810 + ieee80211_cleanup_sdata_stas(sdata);
8811 + cancel_work_sync(&sdata->cleanup_stations_wk);
8812 + }
8813 + mutex_unlock(&local->iflist_mtx);
8814 + }
8815 +
8816 return ret;
8817 }
8818
8819 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
8820 index c88f161f..3c4c0f6 100644
8821 --- a/net/mac80211/sta_info.h
8822 +++ b/net/mac80211/sta_info.h
8823 @@ -298,7 +298,6 @@ struct sta_info {
8824 spinlock_t lock;
8825
8826 struct work_struct drv_unblock_wk;
8827 - struct work_struct free_sta_wk;
8828
8829 u16 listen_interval;
8830
8831 @@ -558,4 +557,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
8832 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
8833 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
8834
8835 +void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
8836 +
8837 #endif /* STA_INFO_H */
8838 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
8839 index 101eb88..c511e9c 100644
8840 --- a/net/mac80211/status.c
8841 +++ b/net/mac80211/status.c
8842 @@ -432,7 +432,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
8843 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
8844 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
8845
8846 - ieee80211_set_bar_pending(sta, tid, ssn);
8847 + if (local->hw.flags &
8848 + IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
8849 + ieee80211_stop_tx_ba_session(&sta->sta, tid);
8850 + else
8851 + ieee80211_set_bar_pending(sta, tid, ssn);
8852 }
8853 }
8854
8855 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
8856 index cdc7564..10b18b2 100644
8857 --- a/net/sunrpc/clnt.c
8858 +++ b/net/sunrpc/clnt.c
8859 @@ -234,7 +234,7 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
8860 spin_lock(&sn->rpc_client_lock);
8861 list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
8862 if (clnt->cl_program->pipe_dir_name == NULL)
8863 - break;
8864 + continue;
8865 if (rpc_clnt_skip_event(clnt, event))
8866 continue;
8867 if (atomic_inc_not_zero(&clnt->cl_count) == 0)
8868 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
8869 index 80f5dd2..e659def 100644
8870 --- a/net/sunrpc/rpc_pipe.c
8871 +++ b/net/sunrpc/rpc_pipe.c
8872 @@ -1152,14 +1152,19 @@ static void rpc_kill_sb(struct super_block *sb)
8873 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
8874
8875 mutex_lock(&sn->pipefs_sb_lock);
8876 + if (sn->pipefs_sb != sb) {
8877 + mutex_unlock(&sn->pipefs_sb_lock);
8878 + goto out;
8879 + }
8880 sn->pipefs_sb = NULL;
8881 mutex_unlock(&sn->pipefs_sb_lock);
8882 - put_net(net);
8883 dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
8884 net, NET_NAME(net));
8885 blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
8886 RPC_PIPEFS_UMOUNT,
8887 sb);
8888 + put_net(net);
8889 +out:
8890 kill_litter_super(sb);
8891 }
8892
8893 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
8894 index 6357fcb..7865b44 100644
8895 --- a/net/sunrpc/sched.c
8896 +++ b/net/sunrpc/sched.c
8897 @@ -919,16 +919,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
8898 return task;
8899 }
8900
8901 +/*
8902 + * rpc_free_task - release rpc task and perform cleanups
8903 + *
8904 + * Note that we free up the rpc_task _after_ rpc_release_calldata()
8905 + * in order to work around a workqueue dependency issue.
8906 + *
8907 + * Tejun Heo states:
8908 + * "Workqueue currently considers two work items to be the same if they're
8909 + * on the same address and won't execute them concurrently - ie. it
8910 + * makes a work item which is queued again while being executed wait
8911 + * for the previous execution to complete.
8912 + *
8913 + * If a work function frees the work item, and then waits for an event
8914 + * which should be performed by another work item and *that* work item
8915 + * recycles the freed work item, it can create a false dependency loop.
8916 + * There really is no reliable way to detect this short of verifying
8917 + * every memory free."
8918 + *
8919 + */
8920 static void rpc_free_task(struct rpc_task *task)
8921 {
8922 - const struct rpc_call_ops *tk_ops = task->tk_ops;
8923 - void *calldata = task->tk_calldata;
8924 + unsigned short tk_flags = task->tk_flags;
8925 +
8926 + rpc_release_calldata(task->tk_ops, task->tk_calldata);
8927
8928 - if (task->tk_flags & RPC_TASK_DYNAMIC) {
8929 + if (tk_flags & RPC_TASK_DYNAMIC) {
8930 dprintk("RPC: %5u freeing task\n", task->tk_pid);
8931 mempool_free(task, rpc_task_mempool);
8932 }
8933 - rpc_release_calldata(tk_ops, calldata);
8934 }
8935
8936 static void rpc_async_release(struct work_struct *work)
8937 @@ -938,8 +957,7 @@ static void rpc_async_release(struct work_struct *work)
8938
8939 static void rpc_release_resources_task(struct rpc_task *task)
8940 {
8941 - if (task->tk_rqstp)
8942 - xprt_release(task);
8943 + xprt_release(task);
8944 if (task->tk_msg.rpc_cred) {
8945 put_rpccred(task->tk_msg.rpc_cred);
8946 task->tk_msg.rpc_cred = NULL;
8947 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
8948 index bd462a5..33811db 100644
8949 --- a/net/sunrpc/xprt.c
8950 +++ b/net/sunrpc/xprt.c
8951 @@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
8952 void xprt_release(struct rpc_task *task)
8953 {
8954 struct rpc_xprt *xprt;
8955 - struct rpc_rqst *req;
8956 + struct rpc_rqst *req = task->tk_rqstp;
8957
8958 - if (!(req = task->tk_rqstp))
8959 + if (req == NULL) {
8960 + if (task->tk_client) {
8961 + rcu_read_lock();
8962 + xprt = rcu_dereference(task->tk_client->cl_xprt);
8963 + if (xprt->snd_task == task)
8964 + xprt_release_write(xprt, task);
8965 + rcu_read_unlock();
8966 + }
8967 return;
8968 + }
8969
8970 xprt = req->rq_xprt;
8971 if (task->tk_ops->rpc_count_stats != NULL)
8972 diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
8973 index 48d7c0a..bd3ba88 100644
8974 --- a/sound/arm/pxa2xx-ac97-lib.c
8975 +++ b/sound/arm/pxa2xx-ac97-lib.c
8976 @@ -18,6 +18,7 @@
8977 #include <linux/delay.h>
8978 #include <linux/module.h>
8979 #include <linux/io.h>
8980 +#include <linux/gpio.h>
8981
8982 #include <sound/ac97_codec.h>
8983 #include <sound/pxa2xx-lib.h>
8984 @@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
8985
8986 static inline void pxa_ac97_cold_pxa27x(void)
8987 {
8988 + unsigned int timeout;
8989 +
8990 GCR &= GCR_COLD_RST; /* clear everything but nCRST */
8991 GCR &= ~GCR_COLD_RST; /* then assert nCRST */
8992
8993 @@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
8994 clk_enable(ac97conf_clk);
8995 udelay(5);
8996 clk_disable(ac97conf_clk);
8997 - GCR = GCR_COLD_RST;
8998 - udelay(50);
8999 + GCR = GCR_COLD_RST | GCR_WARM_RST;
9000 + timeout = 100; /* wait for the codec-ready bit to be set */
9001 + while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
9002 + mdelay(1);
9003 }
9004 #endif
9005
9006 @@ -340,8 +345,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
9007 }
9008
9009 if (cpu_is_pxa27x()) {
9010 - /* Use GPIO 113 as AC97 Reset on Bulverde */
9011 + /*
9012 + * This gpio is needed for a work-around to a bug in the ac97
9013 + * controller during warm reset. The direction and level is set
9014 + * here so that it is an output driven high when switching from
9015 + * AC97_nRESET alt function to generic gpio.
9016 + */
9017 + ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
9018 + "pxa27x ac97 reset");
9019 + if (ret < 0) {
9020 + pr_err("%s: gpio_request_one() failed: %d\n",
9021 + __func__, ret);
9022 + goto err_conf;
9023 + }
9024 pxa27x_assert_ac97reset(reset_gpio, 0);
9025 +
9026 ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
9027 if (IS_ERR(ac97conf_clk)) {
9028 ret = PTR_ERR(ac97conf_clk);
9029 @@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
9030
9031 void pxa2xx_ac97_hw_remove(struct platform_device *dev)
9032 {
9033 + if (cpu_is_pxa27x())
9034 + gpio_free(reset_gpio);
9035 GCR |= GCR_ACLINK_OFF;
9036 free_irq(IRQ_AC97, NULL);
9037 if (ac97conf_clk) {
9038 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
9039 index a9652d6..f419f0a9 100644
9040 --- a/sound/pci/hda/hda_intel.c
9041 +++ b/sound/pci/hda/hda_intel.c
9042 @@ -559,9 +559,12 @@ enum {
9043 #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
9044
9045 /* quirks for Intel PCH */
9046 -#define AZX_DCAPS_INTEL_PCH \
9047 +#define AZX_DCAPS_INTEL_PCH_NOPM \
9048 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
9049 - AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
9050 + AZX_DCAPS_COUNT_LPIB_DELAY)
9051 +
9052 +#define AZX_DCAPS_INTEL_PCH \
9053 + (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
9054
9055 /* quirks for ATI SB / AMD Hudson */
9056 #define AZX_DCAPS_PRESET_ATI_SB \
9057 @@ -3448,13 +3451,13 @@ static void __devexit azx_remove(struct pci_dev *pci)
9058 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
9059 /* CPT */
9060 { PCI_DEVICE(0x8086, 0x1c20),
9061 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9062 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9063 /* PBG */
9064 { PCI_DEVICE(0x8086, 0x1d20),
9065 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9066 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9067 /* Panther Point */
9068 { PCI_DEVICE(0x8086, 0x1e20),
9069 - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9070 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
9071 /* Lynx Point */
9072 { PCI_DEVICE(0x8086, 0x8c20),
9073 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
9074 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
9075 index a7b522a..8799cf1 100644
9076 --- a/sound/pci/hda/patch_conexant.c
9077 +++ b/sound/pci/hda/patch_conexant.c
9078 @@ -553,24 +553,12 @@ static int conexant_build_controls(struct hda_codec *codec)
9079 return 0;
9080 }
9081
9082 -#ifdef CONFIG_PM
9083 -static int conexant_suspend(struct hda_codec *codec)
9084 -{
9085 - snd_hda_shutup_pins(codec);
9086 - return 0;
9087 -}
9088 -#endif
9089 -
9090 static const struct hda_codec_ops conexant_patch_ops = {
9091 .build_controls = conexant_build_controls,
9092 .build_pcms = conexant_build_pcms,
9093 .init = conexant_init,
9094 .free = conexant_free,
9095 .set_power_state = conexant_set_power,
9096 -#ifdef CONFIG_PM
9097 - .suspend = conexant_suspend,
9098 -#endif
9099 - .reboot_notify = snd_hda_shutup_pins,
9100 };
9101
9102 #ifdef CONFIG_SND_HDA_INPUT_BEEP
9103 @@ -4393,10 +4381,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
9104 .init = cx_auto_init,
9105 .free = conexant_free,
9106 .unsol_event = snd_hda_jack_unsol_event,
9107 -#ifdef CONFIG_PM
9108 - .suspend = conexant_suspend,
9109 -#endif
9110 - .reboot_notify = snd_hda_shutup_pins,
9111 };
9112
9113 /*
9114 diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
9115 index 054967d..08ae3cb 100644
9116 --- a/sound/soc/codecs/arizona.c
9117 +++ b/sound/soc/codecs/arizona.c
9118 @@ -409,15 +409,9 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9119 case SND_SOC_DAIFMT_DSP_A:
9120 mode = 0;
9121 break;
9122 - case SND_SOC_DAIFMT_DSP_B:
9123 - mode = 1;
9124 - break;
9125 case SND_SOC_DAIFMT_I2S:
9126 mode = 2;
9127 break;
9128 - case SND_SOC_DAIFMT_LEFT_J:
9129 - mode = 3;
9130 - break;
9131 default:
9132 arizona_aif_err(dai, "Unsupported DAI format %d\n",
9133 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9134 @@ -677,7 +671,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
9135 snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
9136 ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
9137 snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
9138 - ARIZONA_AIF1_RATE_MASK, 8);
9139 + ARIZONA_AIF1_RATE_MASK,
9140 + 8 << ARIZONA_AIF1_RATE_SHIFT);
9141 break;
9142 default:
9143 arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
9144 diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
9145 index 36ec649..cff8dfb 100644
9146 --- a/sound/soc/codecs/arizona.h
9147 +++ b/sound/soc/codecs/arizona.h
9148 @@ -32,15 +32,15 @@
9149
9150 #define ARIZONA_FLL_SRC_MCLK1 0
9151 #define ARIZONA_FLL_SRC_MCLK2 1
9152 -#define ARIZONA_FLL_SRC_SLIMCLK 2
9153 -#define ARIZONA_FLL_SRC_FLL1 3
9154 -#define ARIZONA_FLL_SRC_FLL2 4
9155 -#define ARIZONA_FLL_SRC_AIF1BCLK 5
9156 -#define ARIZONA_FLL_SRC_AIF2BCLK 6
9157 -#define ARIZONA_FLL_SRC_AIF3BCLK 7
9158 -#define ARIZONA_FLL_SRC_AIF1LRCLK 8
9159 -#define ARIZONA_FLL_SRC_AIF2LRCLK 9
9160 -#define ARIZONA_FLL_SRC_AIF3LRCLK 10
9161 +#define ARIZONA_FLL_SRC_SLIMCLK 3
9162 +#define ARIZONA_FLL_SRC_FLL1 4
9163 +#define ARIZONA_FLL_SRC_FLL2 5
9164 +#define ARIZONA_FLL_SRC_AIF1BCLK 8
9165 +#define ARIZONA_FLL_SRC_AIF2BCLK 9
9166 +#define ARIZONA_FLL_SRC_AIF3BCLK 10
9167 +#define ARIZONA_FLL_SRC_AIF1LRCLK 12
9168 +#define ARIZONA_FLL_SRC_AIF2LRCLK 13
9169 +#define ARIZONA_FLL_SRC_AIF3LRCLK 14
9170
9171 #define ARIZONA_MIXER_VOL_MASK 0x00FE
9172 #define ARIZONA_MIXER_VOL_SHIFT 1
9173 diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
9174 index 5be42bf..4068f24 100644
9175 --- a/sound/soc/codecs/sigmadsp.c
9176 +++ b/sound/soc/codecs/sigmadsp.c
9177 @@ -225,7 +225,7 @@ EXPORT_SYMBOL(process_sigma_firmware);
9178 static int sigma_action_write_regmap(void *control_data,
9179 const struct sigma_action *sa, size_t len)
9180 {
9181 - return regmap_raw_write(control_data, le16_to_cpu(sa->addr),
9182 + return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
9183 sa->payload, len - 2);
9184 }
9185
9186 diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
9187 index 9e31448..18171ad 100644
9188 --- a/sound/soc/codecs/sta529.c
9189 +++ b/sound/soc/codecs/sta529.c
9190 @@ -74,9 +74,10 @@
9191 SNDRV_PCM_FMTBIT_S32_LE)
9192 #define S2PC_VALUE 0x98
9193 #define CLOCK_OUT 0x60
9194 -#define LEFT_J_DATA_FORMAT 0x10
9195 -#define I2S_DATA_FORMAT 0x12
9196 -#define RIGHT_J_DATA_FORMAT 0x14
9197 +#define DATA_FORMAT_MSK 0x0E
9198 +#define LEFT_J_DATA_FORMAT 0x00
9199 +#define I2S_DATA_FORMAT 0x02
9200 +#define RIGHT_J_DATA_FORMAT 0x04
9201 #define CODEC_MUTE_VAL 0x80
9202
9203 #define POWER_CNTLMSAK 0x40
9204 @@ -289,7 +290,7 @@ static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
9205 return -EINVAL;
9206 }
9207
9208 - snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
9209 + snd_soc_update_bits(codec, STA529_S2PCFG0, DATA_FORMAT_MSK, mode);
9210
9211 return 0;
9212 }
9213 diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
9214 index 683dc43..cdab549 100644
9215 --- a/sound/soc/codecs/wm2000.c
9216 +++ b/sound/soc/codecs/wm2000.c
9217 @@ -209,9 +209,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
9218
9219 ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
9220 if (wm2000->speech_clarity)
9221 - ret &= ~WM2000_SPEECH_CLARITY;
9222 - else
9223 ret |= WM2000_SPEECH_CLARITY;
9224 + else
9225 + ret &= ~WM2000_SPEECH_CLARITY;
9226 wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
9227
9228 wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
9229 diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
9230 index eab64a1..7ef4e96 100644
9231 --- a/sound/soc/codecs/wm2200.c
9232 +++ b/sound/soc/codecs/wm2200.c
9233 @@ -1380,15 +1380,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9234 case SND_SOC_DAIFMT_DSP_A:
9235 fmt_val = 0;
9236 break;
9237 - case SND_SOC_DAIFMT_DSP_B:
9238 - fmt_val = 1;
9239 - break;
9240 case SND_SOC_DAIFMT_I2S:
9241 fmt_val = 2;
9242 break;
9243 - case SND_SOC_DAIFMT_LEFT_J:
9244 - fmt_val = 3;
9245 - break;
9246 default:
9247 dev_err(codec->dev, "Unsupported DAI format %d\n",
9248 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9249 @@ -1440,7 +1434,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9250 WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
9251 lrclk);
9252 snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
9253 - WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
9254 + WM2200_AIF1_FMT_MASK, fmt_val);
9255
9256 return 0;
9257 }
9258 diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
9259 index 7f56758..a351ca0 100644
9260 --- a/sound/soc/codecs/wm5100.c
9261 +++ b/sound/soc/codecs/wm5100.c
9262 @@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
9263 case SND_SOC_DAIFMT_DSP_A:
9264 mask = 0;
9265 break;
9266 - case SND_SOC_DAIFMT_DSP_B:
9267 - mask = 1;
9268 - break;
9269 case SND_SOC_DAIFMT_I2S:
9270 mask = 2;
9271 break;
9272 - case SND_SOC_DAIFMT_LEFT_J:
9273 - mask = 3;
9274 - break;
9275 default:
9276 dev_err(codec->dev, "Unsupported DAI format %d\n",
9277 fmt & SND_SOC_DAIFMT_FORMAT_MASK);
9278 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
9279 index b2b2b37..1c2b337 100644
9280 --- a/sound/soc/codecs/wm8994.c
9281 +++ b/sound/soc/codecs/wm8994.c
9282 @@ -3839,20 +3839,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
9283 wm8994->hubs.no_cache_dac_hp_direct = true;
9284 wm8994->fll_byp = true;
9285
9286 - switch (control->cust_id) {
9287 - case 0:
9288 - case 2:
9289 - wm8994->hubs.dcs_codes_l = -9;
9290 - wm8994->hubs.dcs_codes_r = -7;
9291 - break;
9292 - case 1:
9293 - case 3:
9294 - wm8994->hubs.dcs_codes_l = -8;
9295 - wm8994->hubs.dcs_codes_r = -7;
9296 - break;
9297 - default:
9298 - break;
9299 - }
9300 + wm8994->hubs.dcs_codes_l = -9;
9301 + wm8994->hubs.dcs_codes_r = -7;
9302
9303 snd_soc_update_bits(codec, WM8994_ANALOGUE_HP_1,
9304 WM1811_HPOUT1_ATTN, WM1811_HPOUT1_ATTN);
9305 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
9306 index ef22d0b..d340644 100644
9307 --- a/sound/soc/soc-pcm.c
9308 +++ b/sound/soc/soc-pcm.c
9309 @@ -1240,6 +1240,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
9310 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
9311 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
9312 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
9313 + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
9314 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
9315 continue;
9316
9317 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9318 index be70035..6e8fa7e 100644
9319 --- a/virt/kvm/kvm_main.c
9320 +++ b/virt/kvm/kvm_main.c
9321 @@ -709,8 +709,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
9322 int r;
9323 gfn_t base_gfn;
9324 unsigned long npages;
9325 - unsigned long i;
9326 - struct kvm_memory_slot *memslot;
9327 + struct kvm_memory_slot *memslot, *slot;
9328 struct kvm_memory_slot old, new;
9329 struct kvm_memslots *slots, *old_memslots;
9330
9331 @@ -761,13 +760,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
9332
9333 /* Check for overlaps */
9334 r = -EEXIST;
9335 - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
9336 - struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
9337 -
9338 - if (s == memslot || !s->npages)
9339 + kvm_for_each_memslot(slot, kvm->memslots) {
9340 + if (slot->id >= KVM_MEMORY_SLOTS || slot == memslot)
9341 continue;
9342 - if (!((base_gfn + npages <= s->base_gfn) ||
9343 - (base_gfn >= s->base_gfn + s->npages)))
9344 + if (!((base_gfn + npages <= slot->base_gfn) ||
9345 + (base_gfn >= slot->base_gfn + slot->npages)))
9346 goto out_free;
9347 }
9348