Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.10/0104-3.10.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2253 - (show annotations) (download)
Tue Aug 13 14:26:09 2013 UTC (10 years, 8 months ago) by niro
File size: 164202 byte(s)
3.10.6-magellan-r1
1 diff --git a/Makefile b/Makefile
2 index b4df9b2..f8349d0 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 4
9 +SUBLEVEL = 5
10 EXTRAVERSION =
11 NAME = Unicycling Gorilla
12
13 diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
14 index aabc02a..d1153c8 100644
15 --- a/arch/arm/boot/compressed/atags_to_fdt.c
16 +++ b/arch/arm/boot/compressed/atags_to_fdt.c
17 @@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path,
18 return fdt_getprop(fdt, offset, property, len);
19 }
20
21 +static uint32_t get_cell_size(const void *fdt)
22 +{
23 + int len;
24 + uint32_t cell_size = 1;
25 + const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len);
26 +
27 + if (size_len)
28 + cell_size = fdt32_to_cpu(*size_len);
29 + return cell_size;
30 +}
31 +
32 static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
33 {
34 char cmdline[COMMAND_LINE_SIZE];
35 @@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
36 int atags_to_fdt(void *atag_list, void *fdt, int total_space)
37 {
38 struct tag *atag = atag_list;
39 - uint32_t mem_reg_property[2 * NR_BANKS];
40 + /* In the case of 64 bits memory size, need to reserve 2 cells for
41 + * address and size for each bank */
42 + uint32_t mem_reg_property[2 * 2 * NR_BANKS];
43 int memcount = 0;
44 - int ret;
45 + int ret, memsize;
46
47 /* make sure we've got an aligned pointer */
48 if ((u32)atag_list & 0x3)
49 @@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
50 continue;
51 if (!atag->u.mem.size)
52 continue;
53 - mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
54 - mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
55 + memsize = get_cell_size(fdt);
56 +
57 + if (memsize == 2) {
58 + /* if memsize is 2, that means that
59 + * each data needs 2 cells of 32 bits,
60 + * so the data are 64 bits */
61 + uint64_t *mem_reg_prop64 =
62 + (uint64_t *)mem_reg_property;
63 + mem_reg_prop64[memcount++] =
64 + cpu_to_fdt64(atag->u.mem.start);
65 + mem_reg_prop64[memcount++] =
66 + cpu_to_fdt64(atag->u.mem.size);
67 + } else {
68 + mem_reg_property[memcount++] =
69 + cpu_to_fdt32(atag->u.mem.start);
70 + mem_reg_property[memcount++] =
71 + cpu_to_fdt32(atag->u.mem.size);
72 + }
73 +
74 } else if (atag->hdr.tag == ATAG_INITRD2) {
75 uint32_t initrd_start, initrd_size;
76 initrd_start = atag->u.initrd.start;
77 @@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
78 }
79 }
80
81 - if (memcount)
82 - setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
83 + if (memcount) {
84 + setprop(fdt, "/memory", "reg", mem_reg_property,
85 + 4 * memcount * memsize);
86 + }
87
88 return fdt_pack(fdt);
89 }
90 diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
91 index c1df590..49fa55b 100644
92 --- a/arch/powerpc/include/asm/module.h
93 +++ b/arch/powerpc/include/asm/module.h
94 @@ -82,10 +82,9 @@ struct exception_table_entry;
95 void sort_ex_table(struct exception_table_entry *start,
96 struct exception_table_entry *finish);
97
98 -#ifdef CONFIG_MODVERSIONS
99 +#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
100 #define ARCH_RELOCATES_KCRCTAB
101 -
102 -extern const unsigned long reloc_start[];
103 +#define reloc_start PHYSICAL_START
104 #endif
105 #endif /* __KERNEL__ */
106 #endif /* _ASM_POWERPC_MODULE_H */
107 diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
108 index 654e479..f096e72 100644
109 --- a/arch/powerpc/kernel/vmlinux.lds.S
110 +++ b/arch/powerpc/kernel/vmlinux.lds.S
111 @@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
112 #endif
113 SECTIONS
114 {
115 - . = 0;
116 - reloc_start = .;
117 -
118 . = KERNELBASE;
119
120 /*
121 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
122 index b44577b..ec94e11 100644
123 --- a/arch/x86/kernel/acpi/sleep.c
124 +++ b/arch/x86/kernel/acpi/sleep.c
125 @@ -48,9 +48,20 @@ int acpi_suspend_lowlevel(void)
126 #ifndef CONFIG_64BIT
127 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
128
129 + /*
130 + * We have to check that we can write back the value, and not
131 + * just read it. At least on 90 nm Pentium M (Family 6, Model
132 + * 13), reading an invalid MSR is not guaranteed to trap, see
133 + * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
134 + * with 2-MB L2 Cache and IntelĀ® Processor A100 and A110 on 90
135 + * nm process with 512-KB L2 Cache Specification Update".
136 + */
137 if (!rdmsr_safe(MSR_EFER,
138 &header->pmode_efer_low,
139 - &header->pmode_efer_high))
140 + &header->pmode_efer_high) &&
141 + !wrmsr_safe(MSR_EFER,
142 + header->pmode_efer_low,
143 + header->pmode_efer_high))
144 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
145 #endif /* !CONFIG_64BIT */
146
147 @@ -61,7 +72,10 @@ int acpi_suspend_lowlevel(void)
148 }
149 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
150 &header->pmode_misc_en_low,
151 - &header->pmode_misc_en_high))
152 + &header->pmode_misc_en_high) &&
153 + !wrmsr_safe(MSR_IA32_MISC_ENABLE,
154 + header->pmode_misc_en_low,
155 + header->pmode_misc_en_high))
156 header->pmode_behavior |=
157 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
158 header->realmode_flags = acpi_realmode_flags;
159 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
160 index fa72a39..3982357 100644
161 --- a/arch/x86/kernel/cpu/mtrr/generic.c
162 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
163 @@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
164 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
165 unsigned long *size, mtrr_type *type)
166 {
167 - unsigned int mask_lo, mask_hi, base_lo, base_hi;
168 - unsigned int tmp, hi;
169 + u32 mask_lo, mask_hi, base_lo, base_hi;
170 + unsigned int hi;
171 + u64 tmp, mask;
172
173 /*
174 * get_mtrr doesn't need to update mtrr_state, also it could be called
175 @@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
176 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
177
178 /* Work out the shifted address mask: */
179 - tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
180 - mask_lo = size_or_mask | tmp;
181 + tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
182 + mask = size_or_mask | tmp;
183
184 /* Expand tmp with high bits to all 1s: */
185 - hi = fls(tmp);
186 + hi = fls64(tmp);
187 if (hi > 0) {
188 - tmp |= ~((1<<(hi - 1)) - 1);
189 + tmp |= ~((1ULL<<(hi - 1)) - 1);
190
191 - if (tmp != mask_lo) {
192 + if (tmp != mask) {
193 printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
194 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
195 - mask_lo = tmp;
196 + mask = tmp;
197 }
198 }
199
200 @@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
201 * This works correctly if size is a power of two, i.e. a
202 * contiguous range:
203 */
204 - *size = -mask_lo;
205 - *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
206 + *size = -mask;
207 + *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
208 *type = base_lo & 0xff;
209
210 out_put_cpu:
211 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
212 index 726bf96..ca22b73 100644
213 --- a/arch/x86/kernel/cpu/mtrr/main.c
214 +++ b/arch/x86/kernel/cpu/mtrr/main.c
215 @@ -305,7 +305,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
216 return -EINVAL;
217 }
218
219 - if (base & size_or_mask || size & size_or_mask) {
220 + if ((base | (base + size - 1)) >>
221 + (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
222 pr_warning("mtrr: base or size exceeds the MTRR width\n");
223 return -EINVAL;
224 }
225 @@ -583,6 +584,7 @@ static struct syscore_ops mtrr_syscore_ops = {
226
227 int __initdata changed_by_mtrr_cleanup;
228
229 +#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
230 /**
231 * mtrr_bp_init - initialize mtrrs on the boot CPU
232 *
233 @@ -600,7 +602,7 @@ void __init mtrr_bp_init(void)
234
235 if (cpu_has_mtrr) {
236 mtrr_if = &generic_mtrr_ops;
237 - size_or_mask = 0xff000000; /* 36 bits */
238 + size_or_mask = SIZE_OR_MASK_BITS(36);
239 size_and_mask = 0x00f00000;
240 phys_addr = 36;
241
242 @@ -619,7 +621,7 @@ void __init mtrr_bp_init(void)
243 boot_cpu_data.x86_mask == 0x4))
244 phys_addr = 36;
245
246 - size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
247 + size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
248 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
249 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
250 boot_cpu_data.x86 == 6) {
251 @@ -627,7 +629,7 @@ void __init mtrr_bp_init(void)
252 * VIA C* family have Intel style MTRRs,
253 * but don't support PAE
254 */
255 - size_or_mask = 0xfff00000; /* 32 bits */
256 + size_or_mask = SIZE_OR_MASK_BITS(32);
257 size_and_mask = 0;
258 phys_addr = 32;
259 }
260 @@ -637,21 +639,21 @@ void __init mtrr_bp_init(void)
261 if (cpu_has_k6_mtrr) {
262 /* Pre-Athlon (K6) AMD CPU MTRRs */
263 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
264 - size_or_mask = 0xfff00000; /* 32 bits */
265 + size_or_mask = SIZE_OR_MASK_BITS(32);
266 size_and_mask = 0;
267 }
268 break;
269 case X86_VENDOR_CENTAUR:
270 if (cpu_has_centaur_mcr) {
271 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
272 - size_or_mask = 0xfff00000; /* 32 bits */
273 + size_or_mask = SIZE_OR_MASK_BITS(32);
274 size_and_mask = 0;
275 }
276 break;
277 case X86_VENDOR_CYRIX:
278 if (cpu_has_cyrix_arr) {
279 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
280 - size_or_mask = 0xfff00000; /* 32 bits */
281 + size_or_mask = SIZE_OR_MASK_BITS(32);
282 size_and_mask = 0;
283 }
284 break;
285 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
286 index 321d65e..a836860 100644
287 --- a/arch/x86/kernel/head_64.S
288 +++ b/arch/x86/kernel/head_64.S
289 @@ -513,7 +513,7 @@ ENTRY(phys_base)
290 #include "../../x86/xen/xen-head.S"
291
292 .section .bss, "aw", @nobits
293 - .align L1_CACHE_BYTES
294 + .align PAGE_SIZE
295 ENTRY(idt_table)
296 .skip IDT_ENTRIES * 16
297
298 diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
299 index 5e6301e..2cf0244 100644
300 --- a/drivers/acpi/acpi_memhotplug.c
301 +++ b/drivers/acpi/acpi_memhotplug.c
302 @@ -283,6 +283,7 @@ static int acpi_memory_device_add(struct acpi_device *device,
303 /* Get the range from the _CRS */
304 result = acpi_memory_get_device_resources(mem_device);
305 if (result) {
306 + device->driver_data = NULL;
307 kfree(mem_device);
308 return result;
309 }
310 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
311 index 14807e5..af658b2 100644
312 --- a/drivers/acpi/scan.c
313 +++ b/drivers/acpi/scan.c
314 @@ -237,10 +237,12 @@ static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
315
316 mutex_lock(&acpi_scan_lock);
317
318 - acpi_bus_get_device(handle, &device);
319 - if (device) {
320 - dev_warn(&device->dev, "Attempt to re-insert\n");
321 - goto out;
322 + if (ost_source != ACPI_NOTIFY_BUS_CHECK) {
323 + acpi_bus_get_device(handle, &device);
324 + if (device) {
325 + dev_warn(&device->dev, "Attempt to re-insert\n");
326 + goto out;
327 + }
328 }
329 acpi_evaluate_hotplug_ost(handle, ost_source,
330 ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
331 @@ -1890,6 +1892,9 @@ static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
332 if (acpi_bus_get_device(handle, &device))
333 return AE_CTRL_DEPTH;
334
335 + if (device->handler)
336 + return AE_OK;
337 +
338 ret = acpi_scan_attach_handler(device);
339 if (ret)
340 return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
341 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
342 index 440eadf..0e4b96b 100644
343 --- a/drivers/acpi/video.c
344 +++ b/drivers/acpi/video.c
345 @@ -450,6 +450,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
346 },
347 {
348 .callback = video_ignore_initial_backlight,
349 + .ident = "Fujitsu E753",
350 + .matches = {
351 + DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"),
352 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"),
353 + },
354 + },
355 + {
356 + .callback = video_ignore_initial_backlight,
357 .ident = "HP Pavilion dm4",
358 .matches = {
359 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
360 diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
361 index a5a3ebc..78eabff 100644
362 --- a/drivers/ata/Kconfig
363 +++ b/drivers/ata/Kconfig
364 @@ -107,7 +107,7 @@ config SATA_FSL
365 If unsure, say N.
366
367 config SATA_INIC162X
368 - tristate "Initio 162x SATA support"
369 + tristate "Initio 162x SATA support (Very Experimental)"
370 depends on PCI
371 help
372 This option enables support for Initio 162x Serial ATA.
373 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
374 index 8eae659..b92913a 100644
375 --- a/drivers/ata/ata_piix.c
376 +++ b/drivers/ata/ata_piix.c
377 @@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
378 /* SATA Controller IDE (Wellsburg) */
379 { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
380 /* SATA Controller IDE (Wellsburg) */
381 - { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
382 + { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
383 /* SATA Controller IDE (Wellsburg) */
384 { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
385 /* SATA Controller IDE (Wellsburg) */
386 diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
387 index 1e6827c..74456fa 100644
388 --- a/drivers/ata/sata_inic162x.c
389 +++ b/drivers/ata/sata_inic162x.c
390 @@ -6,6 +6,18 @@
391 *
392 * This file is released under GPL v2.
393 *
394 + * **** WARNING ****
395 + *
396 + * This driver never worked properly and unfortunately data corruption is
397 + * relatively common. There isn't anyone working on the driver and there's
398 + * no support from the vendor. Do not use this driver in any production
399 + * environment.
400 + *
401 + * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
402 + * https://bugzilla.kernel.org/show_bug.cgi?id=60565
403 + *
404 + * *****************
405 + *
406 * This controller is eccentric and easily locks up if something isn't
407 * right. Documentation is available at initio's website but it only
408 * documents registers (not programming model).
409 @@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
410
411 ata_print_version_once(&pdev->dev, DRV_VERSION);
412
413 + dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
414 +
415 /* alloc host */
416 host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
417 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
418 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
419 index a941dcf..d0c81d1 100644
420 --- a/drivers/base/regmap/regmap.c
421 +++ b/drivers/base/regmap/regmap.c
422 @@ -1717,7 +1717,7 @@ int regmap_async_complete(struct regmap *map)
423 int ret;
424
425 /* Nothing to do with no async support */
426 - if (!map->bus->async_write)
427 + if (!map->bus || !map->bus->async_write)
428 return 0;
429
430 trace_regmap_async_complete_start(map->dev);
431 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
432 index dd5b2fe..d81dfca 100644
433 --- a/drivers/block/xen-blkback/blkback.c
434 +++ b/drivers/block/xen-blkback/blkback.c
435 @@ -647,7 +647,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
436 int status = BLKIF_RSP_OKAY;
437 struct block_device *bdev = blkif->vbd.bdev;
438 unsigned long secure;
439 + struct phys_req preq;
440 +
441 + preq.sector_number = req->u.discard.sector_number;
442 + preq.nr_sects = req->u.discard.nr_sectors;
443
444 + err = xen_vbd_translate(&preq, blkif, WRITE);
445 + if (err) {
446 + pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
447 + preq.sector_number,
448 + preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
449 + goto fail_response;
450 + }
451 blkif->st_ds_req++;
452
453 xen_blkif_get(blkif);
454 @@ -658,7 +669,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
455 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
456 req->u.discard.nr_sectors,
457 GFP_KERNEL, secure);
458 -
459 +fail_response:
460 if (err == -EOPNOTSUPP) {
461 pr_debug(DRV_PFX "discard op failed, not supported\n");
462 status = BLKIF_RSP_EOPNOTSUPP;
463 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
464 index 07f2840..6d6a0b4 100644
465 --- a/drivers/cpufreq/intel_pstate.c
466 +++ b/drivers/cpufreq/intel_pstate.c
467 @@ -103,10 +103,10 @@ struct pstate_adjust_policy {
468 static struct pstate_adjust_policy default_policy = {
469 .sample_rate_ms = 10,
470 .deadband = 0,
471 - .setpoint = 109,
472 - .p_gain_pct = 17,
473 + .setpoint = 97,
474 + .p_gain_pct = 20,
475 .d_gain_pct = 0,
476 - .i_gain_pct = 4,
477 + .i_gain_pct = 0,
478 };
479
480 struct perf_limits {
481 @@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
482 static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
483 {
484 int32_t busy_scaled;
485 - int32_t core_busy, turbo_pstate, current_pstate;
486 + int32_t core_busy, max_pstate, current_pstate;
487
488 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
489 - turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
490 + max_pstate = int_tofp(cpu->pstate.max_pstate);
491 current_pstate = int_tofp(cpu->pstate.current_pstate);
492 - busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
493 + busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
494
495 return fp_toint(busy_scaled);
496 }
497 diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
498 index 5996521..84573b4 100644
499 --- a/drivers/crypto/caam/caamhash.c
500 +++ b/drivers/crypto/caam/caamhash.c
501 @@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
502 dma_addr_t src_dma, dst_dma;
503 int ret = 0;
504
505 - desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
506 + desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
507 if (!desc) {
508 dev_err(jrdev, "unable to allocate key input memory\n");
509 return -ENOMEM;
510 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
511 index 7ef316f..ac1b43a 100644
512 --- a/drivers/firewire/core-cdev.c
513 +++ b/drivers/firewire/core-cdev.c
514 @@ -54,6 +54,7 @@
515 #define FW_CDEV_KERNEL_VERSION 5
516 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
517 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
518 +#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
519
520 struct client {
521 u32 version;
522 @@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
523 a->channel, a->speed, a->header_size, cb, client);
524 if (IS_ERR(context))
525 return PTR_ERR(context);
526 + if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
527 + context->drop_overflow_headers = true;
528
529 /* We only support one context at this time. */
530 spin_lock_irq(&client->lock);
531 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
532 index 9e1db64..afb701e 100644
533 --- a/drivers/firewire/ohci.c
534 +++ b/drivers/firewire/ohci.c
535 @@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
536 {
537 u32 *ctx_hdr;
538
539 - if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
540 + if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
541 + if (ctx->base.drop_overflow_headers)
542 + return;
543 flush_iso_completions(ctx);
544 + }
545
546 ctx_hdr = ctx->header + ctx->header_length;
547 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
548 @@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
549
550 sync_it_packet_for_cpu(context, d);
551
552 - if (ctx->header_length + 4 > PAGE_SIZE)
553 + if (ctx->header_length + 4 > PAGE_SIZE) {
554 + if (ctx->base.drop_overflow_headers)
555 + return 1;
556 flush_iso_completions(ctx);
557 + }
558
559 ctx_hdr = ctx->header + ctx->header_length;
560 ctx->last_timestamp = le16_to_cpu(last->res_count);
561 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
562 index 3b315ba..f968590 100644
563 --- a/drivers/gpu/drm/i915/i915_dma.c
564 +++ b/drivers/gpu/drm/i915/i915_dma.c
565 @@ -1511,6 +1511,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
566 dev_priv->dev = dev;
567 dev_priv->info = info;
568
569 + spin_lock_init(&dev_priv->irq_lock);
570 + spin_lock_init(&dev_priv->gpu_error.lock);
571 + spin_lock_init(&dev_priv->rps.lock);
572 + mutex_init(&dev_priv->dpio_lock);
573 + mutex_init(&dev_priv->rps.hw_lock);
574 + mutex_init(&dev_priv->modeset_restore_lock);
575 +
576 i915_dump_device_info(dev_priv);
577
578 if (i915_get_bridge_dev(dev)) {
579 @@ -1601,6 +1608,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
580 intel_detect_pch(dev);
581
582 intel_irq_init(dev);
583 + intel_pm_init(dev);
584 + intel_gt_sanitize(dev);
585 intel_gt_init(dev);
586
587 /* Try to make sure MCHBAR is enabled before poking at it */
588 @@ -1626,14 +1635,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
589 if (!IS_I945G(dev) && !IS_I945GM(dev))
590 pci_enable_msi(dev->pdev);
591
592 - spin_lock_init(&dev_priv->irq_lock);
593 - spin_lock_init(&dev_priv->gpu_error.lock);
594 - spin_lock_init(&dev_priv->rps.lock);
595 - mutex_init(&dev_priv->dpio_lock);
596 -
597 - mutex_init(&dev_priv->rps.hw_lock);
598 - mutex_init(&dev_priv->modeset_restore_lock);
599 -
600 dev_priv->num_plane = 1;
601 if (IS_VALLEYVIEW(dev))
602 dev_priv->num_plane = 2;
603 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
604 index a2e4953..bc6cd31 100644
605 --- a/drivers/gpu/drm/i915/i915_drv.c
606 +++ b/drivers/gpu/drm/i915/i915_drv.c
607 @@ -685,7 +685,7 @@ static int i915_drm_thaw(struct drm_device *dev)
608 {
609 int error = 0;
610
611 - intel_gt_reset(dev);
612 + intel_gt_sanitize(dev);
613
614 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
615 mutex_lock(&dev->struct_mutex);
616 @@ -711,7 +711,7 @@ int i915_resume(struct drm_device *dev)
617
618 pci_set_master(dev->pdev);
619
620 - intel_gt_reset(dev);
621 + intel_gt_sanitize(dev);
622
623 /*
624 * Platforms with opregion should have sane BIOS, older ones (gen3 and
625 @@ -1247,21 +1247,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
626
627 #define __i915_read(x, y) \
628 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
629 + unsigned long irqflags; \
630 u##x val = 0; \
631 + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
632 if (IS_GEN5(dev_priv->dev)) \
633 ilk_dummy_write(dev_priv); \
634 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
635 - unsigned long irqflags; \
636 - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
637 if (dev_priv->forcewake_count == 0) \
638 dev_priv->gt.force_wake_get(dev_priv); \
639 val = read##y(dev_priv->regs + reg); \
640 if (dev_priv->forcewake_count == 0) \
641 dev_priv->gt.force_wake_put(dev_priv); \
642 - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
643 } else { \
644 val = read##y(dev_priv->regs + reg); \
645 } \
646 + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
647 trace_i915_reg_rw(false, reg, val, sizeof(val)); \
648 return val; \
649 }
650 @@ -1274,8 +1274,10 @@ __i915_read(64, q)
651
652 #define __i915_write(x, y) \
653 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
654 + unsigned long irqflags; \
655 u32 __fifo_ret = 0; \
656 trace_i915_reg_rw(true, reg, val, sizeof(val)); \
657 + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
658 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
659 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
660 } \
661 @@ -1287,6 +1289,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
662 gen6_gt_check_fifodbg(dev_priv); \
663 } \
664 hsw_unclaimed_reg_check(dev_priv, reg); \
665 + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
666 }
667 __i915_write(8, b)
668 __i915_write(16, w)
669 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
670 index 9669a0b..47d8b68 100644
671 --- a/drivers/gpu/drm/i915/i915_drv.h
672 +++ b/drivers/gpu/drm/i915/i915_drv.h
673 @@ -491,6 +491,7 @@ enum intel_sbi_destination {
674 #define QUIRK_PIPEA_FORCE (1<<0)
675 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
676 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
677 +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
678
679 struct intel_fbdev;
680 struct intel_fbc_work;
681 @@ -1474,9 +1475,10 @@ void i915_hangcheck_elapsed(unsigned long data);
682 void i915_handle_error(struct drm_device *dev, bool wedged);
683
684 extern void intel_irq_init(struct drm_device *dev);
685 +extern void intel_pm_init(struct drm_device *dev);
686 extern void intel_hpd_init(struct drm_device *dev);
687 extern void intel_gt_init(struct drm_device *dev);
688 -extern void intel_gt_reset(struct drm_device *dev);
689 +extern void intel_gt_sanitize(struct drm_device *dev);
690
691 void i915_error_state_free(struct kref *error_ref);
692
693 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
694 index 34118b0..0a30088 100644
695 --- a/drivers/gpu/drm/i915/i915_gem.c
696 +++ b/drivers/gpu/drm/i915/i915_gem.c
697 @@ -1881,6 +1881,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
698 u32 seqno = intel_ring_get_seqno(ring);
699
700 BUG_ON(ring == NULL);
701 + if (obj->ring != ring && obj->last_write_seqno) {
702 + /* Keep the seqno relative to the current ring */
703 + obj->last_write_seqno = seqno;
704 + }
705 obj->ring = ring;
706
707 /* Add a reference if we're newly entering the active list. */
708 @@ -2134,7 +2138,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
709
710 for (i = 0; i < dev_priv->num_fence_regs; i++) {
711 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
712 - i915_gem_write_fence(dev, i, reg->obj);
713 +
714 + /*
715 + * Commit delayed tiling changes if we have an object still
716 + * attached to the fence, otherwise just clear the fence.
717 + */
718 + if (reg->obj) {
719 + i915_gem_object_update_fence(reg->obj, reg,
720 + reg->obj->tiling_mode);
721 + } else {
722 + i915_gem_write_fence(dev, i, NULL);
723 + }
724 }
725 }
726
727 @@ -2534,7 +2548,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
728 drm_i915_private_t *dev_priv = dev->dev_private;
729 int fence_reg;
730 int fence_pitch_shift;
731 - uint64_t val;
732
733 if (INTEL_INFO(dev)->gen >= 6) {
734 fence_reg = FENCE_REG_SANDYBRIDGE_0;
735 @@ -2544,8 +2557,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
736 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
737 }
738
739 + fence_reg += reg * 8;
740 +
741 + /* To w/a incoherency with non-atomic 64-bit register updates,
742 + * we split the 64-bit update into two 32-bit writes. In order
743 + * for a partial fence not to be evaluated between writes, we
744 + * precede the update with write to turn off the fence register,
745 + * and only enable the fence as the last step.
746 + *
747 + * For extra levels of paranoia, we make sure each step lands
748 + * before applying the next step.
749 + */
750 + I915_WRITE(fence_reg, 0);
751 + POSTING_READ(fence_reg);
752 +
753 if (obj) {
754 u32 size = obj->gtt_space->size;
755 + uint64_t val;
756
757 val = (uint64_t)((obj->gtt_offset + size - 4096) &
758 0xfffff000) << 32;
759 @@ -2554,12 +2582,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
760 if (obj->tiling_mode == I915_TILING_Y)
761 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
762 val |= I965_FENCE_REG_VALID;
763 - } else
764 - val = 0;
765
766 - fence_reg += reg * 8;
767 - I915_WRITE64(fence_reg, val);
768 - POSTING_READ(fence_reg);
769 + I915_WRITE(fence_reg + 4, val >> 32);
770 + POSTING_READ(fence_reg + 4);
771 +
772 + I915_WRITE(fence_reg + 0, val);
773 + POSTING_READ(fence_reg);
774 + } else {
775 + I915_WRITE(fence_reg + 4, 0);
776 + POSTING_READ(fence_reg + 4);
777 + }
778 }
779
780 static void i915_write_fence_reg(struct drm_device *dev, int reg,
781 @@ -2654,6 +2686,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
782 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
783 mb();
784
785 + WARN(obj && (!obj->stride || !obj->tiling_mode),
786 + "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
787 + obj->stride, obj->tiling_mode);
788 +
789 switch (INTEL_INFO(dev)->gen) {
790 case 7:
791 case 6:
792 @@ -2713,6 +2749,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
793 fence->obj = NULL;
794 list_del_init(&fence->lru_list);
795 }
796 + obj->fence_dirty = false;
797 }
798
799 static int
800 @@ -2842,7 +2879,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
801 return 0;
802
803 i915_gem_object_update_fence(obj, reg, enable);
804 - obj->fence_dirty = false;
805
806 return 0;
807 }
808 @@ -4457,7 +4493,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
809 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
810 if (obj->pages_pin_count == 0)
811 cnt += obj->base.size >> PAGE_SHIFT;
812 - list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
813 + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
814 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
815 cnt += obj->base.size >> PAGE_SHIFT;
816
817 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
818 index 56746dc..e1f4e6e 100644
819 --- a/drivers/gpu/drm/i915/intel_display.c
820 +++ b/drivers/gpu/drm/i915/intel_display.c
821 @@ -8146,15 +8146,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
822 }
823
824 static bool
825 -is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
826 - int num_connectors)
827 +is_crtc_connector_off(struct drm_mode_set *set)
828 {
829 int i;
830
831 - for (i = 0; i < num_connectors; i++)
832 - if (connectors[i].encoder &&
833 - connectors[i].encoder->crtc == crtc &&
834 - connectors[i].dpms != DRM_MODE_DPMS_ON)
835 + if (set->num_connectors == 0)
836 + return false;
837 +
838 + if (WARN_ON(set->connectors == NULL))
839 + return false;
840 +
841 + for (i = 0; i < set->num_connectors; i++)
842 + if (set->connectors[i]->encoder &&
843 + set->connectors[i]->encoder->crtc == set->crtc &&
844 + set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
845 return true;
846
847 return false;
848 @@ -8167,10 +8172,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
849
850 /* We should be able to check here if the fb has the same properties
851 * and then just flip_or_move it */
852 - if (set->connectors != NULL &&
853 - is_crtc_connector_off(set->crtc, *set->connectors,
854 - set->num_connectors)) {
855 - config->mode_changed = true;
856 + if (is_crtc_connector_off(set)) {
857 + config->mode_changed = true;
858 } else if (set->crtc->fb != set->fb) {
859 /* If we have no fb then treat it as a full mode set */
860 if (set->crtc->fb == NULL) {
861 @@ -8914,6 +8917,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
862 DRM_INFO("applying inverted panel brightness quirk\n");
863 }
864
865 +/*
866 + * Some machines (Dell XPS13) suffer broken backlight controls if
867 + * BLM_PCH_PWM_ENABLE is set.
868 + */
869 +static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
870 +{
871 + struct drm_i915_private *dev_priv = dev->dev_private;
872 + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
873 + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
874 +}
875 +
876 struct intel_quirk {
877 int device;
878 int subsystem_vendor;
879 @@ -8983,6 +8997,11 @@ static struct intel_quirk intel_quirks[] = {
880
881 /* Acer Aspire 4736Z */
882 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
883 +
884 + /* Dell XPS13 HD Sandy Bridge */
885 + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
886 + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
887 + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
888 };
889
890 static void intel_init_quirks(struct drm_device *dev)
891 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
892 index eb5e6e9..33cb87f 100644
893 --- a/drivers/gpu/drm/i915/intel_panel.c
894 +++ b/drivers/gpu/drm/i915/intel_panel.c
895 @@ -354,7 +354,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
896 POSTING_READ(reg);
897 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
898
899 - if (HAS_PCH_SPLIT(dev)) {
900 + if (HAS_PCH_SPLIT(dev) &&
901 + !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
902 tmp = I915_READ(BLC_PWM_PCH_CTL1);
903 tmp |= BLM_PCH_PWM_ENABLE;
904 tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
905 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
906 index aa01128..2cfe9f6 100644
907 --- a/drivers/gpu/drm/i915/intel_pm.c
908 +++ b/drivers/gpu/drm/i915/intel_pm.c
909 @@ -4486,7 +4486,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
910 gen6_gt_check_fifodbg(dev_priv);
911 }
912
913 -void intel_gt_reset(struct drm_device *dev)
914 +void intel_gt_sanitize(struct drm_device *dev)
915 {
916 struct drm_i915_private *dev_priv = dev->dev_private;
917
918 @@ -4497,6 +4497,10 @@ void intel_gt_reset(struct drm_device *dev)
919 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
920 __gen6_gt_force_wake_mt_reset(dev_priv);
921 }
922 +
923 + /* BIOS often leaves RC6 enabled, but disable it for hw init */
924 + if (INTEL_INFO(dev)->gen >= 6)
925 + intel_disable_gt_powersave(dev);
926 }
927
928 void intel_gt_init(struct drm_device *dev)
929 @@ -4505,18 +4509,51 @@ void intel_gt_init(struct drm_device *dev)
930
931 spin_lock_init(&dev_priv->gt_lock);
932
933 - intel_gt_reset(dev);
934 -
935 if (IS_VALLEYVIEW(dev)) {
936 dev_priv->gt.force_wake_get = vlv_force_wake_get;
937 dev_priv->gt.force_wake_put = vlv_force_wake_put;
938 - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
939 + } else if (IS_HASWELL(dev)) {
940 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
941 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
942 + } else if (IS_IVYBRIDGE(dev)) {
943 + u32 ecobus;
944 +
945 + /* IVB configs may use multi-threaded forcewake */
946 +
947 + /* A small trick here - if the bios hasn't configured
948 + * MT forcewake, and if the device is in RC6, then
949 + * force_wake_mt_get will not wake the device and the
950 + * ECOBUS read will return zero. Which will be
951 + * (correctly) interpreted by the test below as MT
952 + * forcewake being disabled.
953 + */
954 + mutex_lock(&dev->struct_mutex);
955 + __gen6_gt_force_wake_mt_get(dev_priv);
956 + ecobus = I915_READ_NOTRACE(ECOBUS);
957 + __gen6_gt_force_wake_mt_put(dev_priv);
958 + mutex_unlock(&dev->struct_mutex);
959 +
960 + if (ecobus & FORCEWAKE_MT_ENABLE) {
961 + dev_priv->gt.force_wake_get =
962 + __gen6_gt_force_wake_mt_get;
963 + dev_priv->gt.force_wake_put =
964 + __gen6_gt_force_wake_mt_put;
965 + } else {
966 + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
967 + DRM_INFO("when using vblank-synced partial screen updates.\n");
968 + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
969 + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
970 + }
971 } else if (IS_GEN6(dev)) {
972 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
973 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
974 }
975 +}
976 +
977 +void intel_pm_init(struct drm_device *dev)
978 +{
979 + struct drm_i915_private *dev_priv = dev->dev_private;
980 +
981 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
982 intel_gen6_powersave_work);
983 }
984 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
985 index 1d5d613..1424f20 100644
986 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
987 +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
988 @@ -490,9 +490,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
989 struct pipe_control *pc = ring->private;
990 struct drm_i915_gem_object *obj;
991
992 - if (!ring->private)
993 - return;
994 -
995 obj = pc->obj;
996
997 kunmap(sg_page(obj->pages->sgl));
998 @@ -500,7 +497,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
999 drm_gem_object_unreference(&obj->base);
1000
1001 kfree(pc);
1002 - ring->private = NULL;
1003 }
1004
1005 static int init_render_ring(struct intel_ring_buffer *ring)
1006 @@ -571,7 +567,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
1007 if (HAS_BROKEN_CS_TLB(dev))
1008 drm_gem_object_unreference(to_gem_object(ring->private));
1009
1010 - cleanup_pipe_control(ring);
1011 + if (INTEL_INFO(dev)->gen >= 5)
1012 + cleanup_pipe_control(ring);
1013 +
1014 + ring->private = NULL;
1015 }
1016
1017 static void
1018 diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
1019 index 8e47a9b..22aa996 100644
1020 --- a/drivers/gpu/drm/nouveau/nv17_fence.c
1021 +++ b/drivers/gpu/drm/nouveau/nv17_fence.c
1022 @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
1023 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
1024 struct nouveau_object *object;
1025 u32 start = mem->start * PAGE_SIZE;
1026 - u32 limit = mem->start + mem->size - 1;
1027 + u32 limit = start + mem->size - 1;
1028 int ret = 0;
1029
1030 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
1031 diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
1032 index f9701e5..0ee3638 100644
1033 --- a/drivers/gpu/drm/nouveau/nv50_fence.c
1034 +++ b/drivers/gpu/drm/nouveau/nv50_fence.c
1035 @@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
1036 struct nv10_fence_chan *fctx;
1037 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
1038 struct nouveau_object *object;
1039 + u32 start = mem->start * PAGE_SIZE;
1040 + u32 limit = start + mem->size - 1;
1041 int ret, i;
1042
1043 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
1044 @@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
1045 fctx->base.sync = nv17_fence_sync;
1046
1047 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
1048 - NvSema, 0x0002,
1049 + NvSema, 0x003d,
1050 &(struct nv_dma_class) {
1051 .flags = NV_DMA_TARGET_VRAM |
1052 NV_DMA_ACCESS_RDWR,
1053 - .start = mem->start * PAGE_SIZE,
1054 - .limit = mem->size - 1,
1055 + .start = start,
1056 + .limit = limit,
1057 }, sizeof(struct nv_dma_class),
1058 &object);
1059
1060 /* dma objects for display sync channel semaphore blocks */
1061 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
1062 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
1063 + u32 start = bo->bo.mem.start * PAGE_SIZE;
1064 + u32 limit = start + bo->bo.mem.size - 1;
1065
1066 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
1067 NvEvoSema0 + i, 0x003d,
1068 &(struct nv_dma_class) {
1069 .flags = NV_DMA_TARGET_VRAM |
1070 NV_DMA_ACCESS_RDWR,
1071 - .start = bo->bo.offset,
1072 - .limit = bo->bo.offset + 0xfff,
1073 + .start = start,
1074 + .limit = limit,
1075 }, sizeof(struct nv_dma_class),
1076 &object);
1077 }
1078 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
1079 index fb441a7..15da7ef 100644
1080 --- a/drivers/gpu/drm/radeon/atom.c
1081 +++ b/drivers/gpu/drm/radeon/atom.c
1082 @@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1083 int r;
1084
1085 mutex_lock(&ctx->mutex);
1086 + /* reset data block */
1087 + ctx->data_block = 0;
1088 /* reset reg block */
1089 ctx->reg_block = 0;
1090 /* reset fb window */
1091 ctx->fb_base = 0;
1092 /* reset io mode */
1093 ctx->io_mode = ATOM_IO_MM;
1094 + /* reset divmul */
1095 + ctx->divmul[0] = 0;
1096 + ctx->divmul[1] = 0;
1097 r = atom_execute_table_locked(ctx, index, params);
1098 mutex_unlock(&ctx->mutex);
1099 return r;
1100 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1101 index 064023b..32501f6 100644
1102 --- a/drivers/gpu/drm/radeon/atombios_dp.c
1103 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1104 @@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
1105 };
1106
1107 /***** radeon AUX functions *****/
1108 +
1109 +/* Atom needs data in little endian format
1110 + * so swap as appropriate when copying data to
1111 + * or from atom. Note that atom operates on
1112 + * dw units.
1113 + */
1114 +static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1115 +{
1116 +#ifdef __BIG_ENDIAN
1117 + u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
1118 + u32 *dst32, *src32;
1119 + int i;
1120 +
1121 + memcpy(src_tmp, src, num_bytes);
1122 + src32 = (u32 *)src_tmp;
1123 + dst32 = (u32 *)dst_tmp;
1124 + if (to_le) {
1125 + for (i = 0; i < ((num_bytes + 3) / 4); i++)
1126 + dst32[i] = cpu_to_le32(src32[i]);
1127 + memcpy(dst, dst_tmp, num_bytes);
1128 + } else {
1129 + u8 dws = num_bytes & ~3;
1130 + for (i = 0; i < ((num_bytes + 3) / 4); i++)
1131 + dst32[i] = le32_to_cpu(src32[i]);
1132 + memcpy(dst, dst_tmp, dws);
1133 + if (num_bytes % 4) {
1134 + for (i = 0; i < (num_bytes % 4); i++)
1135 + dst[dws+i] = dst_tmp[dws+i];
1136 + }
1137 + }
1138 +#else
1139 + memcpy(dst, src, num_bytes);
1140 +#endif
1141 +}
1142 +
1143 union aux_channel_transaction {
1144 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
1145 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
1146 @@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
1147
1148 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
1149
1150 - memcpy(base, send, send_bytes);
1151 + radeon_copy_swap(base, send, send_bytes, true);
1152
1153 - args.v1.lpAuxRequest = 0 + 4;
1154 - args.v1.lpDataOut = 16 + 4;
1155 + args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
1156 + args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
1157 args.v1.ucDataOutLen = 0;
1158 args.v1.ucChannelID = chan->rec.i2c_id;
1159 args.v1.ucDelay = delay / 10;
1160 @@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
1161 recv_bytes = recv_size;
1162
1163 if (recv && recv_size)
1164 - memcpy(recv, base + 16, recv_bytes);
1165 + radeon_copy_swap(recv, base + 16, recv_bytes, false);
1166
1167 return recv_bytes;
1168 }
1169 diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1170 index b9c6f76..bb9ea36 100644
1171 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
1172 +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1173 @@ -157,9 +157,9 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1174 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1175 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1176 */
1177 + WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
1178 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
1179 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
1180 - WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
1181 }
1182
1183
1184 @@ -177,6 +177,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
1185 uint32_t offset;
1186 ssize_t err;
1187
1188 + if (!dig || !dig->afmt)
1189 + return;
1190 +
1191 /* Silent, r600_hdmi_enable will raise WARN for us */
1192 if (!dig->afmt->enabled)
1193 return;
1194 @@ -280,6 +283,9 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
1195 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1196 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1197
1198 + if (!dig || !dig->afmt)
1199 + return;
1200 +
1201 /* Silent, r600_hdmi_enable will raise WARN for us */
1202 if (enable && dig->afmt->enabled)
1203 return;
1204 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
1205 index 6948eb8..b60004e 100644
1206 --- a/drivers/gpu/drm/radeon/r600.c
1207 +++ b/drivers/gpu/drm/radeon/r600.c
1208 @@ -2986,7 +2986,7 @@ void r600_uvd_fence_emit(struct radeon_device *rdev,
1209 struct radeon_fence *fence)
1210 {
1211 struct radeon_ring *ring = &rdev->ring[fence->ring];
1212 - uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
1213 + uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
1214
1215 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
1216 radeon_ring_write(ring, fence->seq);
1217 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1218 index e73b2a7..f48240b 100644
1219 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1220 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1221 @@ -266,6 +266,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
1222 uint32_t offset;
1223 ssize_t err;
1224
1225 + if (!dig || !dig->afmt)
1226 + return;
1227 +
1228 /* Silent, r600_hdmi_enable will raise WARN for us */
1229 if (!dig->afmt->enabled)
1230 return;
1231 @@ -448,6 +451,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
1232 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1233 u32 hdmi = HDMI0_ERROR_ACK;
1234
1235 + if (!dig || !dig->afmt)
1236 + return;
1237 +
1238 /* Silent, r600_hdmi_enable will raise WARN for us */
1239 if (enable && dig->afmt->enabled)
1240 return;
1241 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1242 index 142ce6c..f4dcfdd 100644
1243 --- a/drivers/gpu/drm/radeon/radeon.h
1244 +++ b/drivers/gpu/drm/radeon/radeon.h
1245 @@ -408,6 +408,7 @@ struct radeon_sa_manager {
1246 uint64_t gpu_addr;
1247 void *cpu_ptr;
1248 uint32_t domain;
1249 + uint32_t align;
1250 };
1251
1252 struct radeon_sa_bo;
1253 diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
1254 index 78edadc..68ce360 100644
1255 --- a/drivers/gpu/drm/radeon/radeon_combios.c
1256 +++ b/drivers/gpu/drm/radeon/radeon_combios.c
1257 @@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
1258 enum radeon_combios_table_offset table)
1259 {
1260 struct radeon_device *rdev = dev->dev_private;
1261 - int rev;
1262 + int rev, size;
1263 uint16_t offset = 0, check_offset;
1264
1265 if (!rdev->bios)
1266 @@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
1267 switch (table) {
1268 /* absolute offset tables */
1269 case COMBIOS_ASIC_INIT_1_TABLE:
1270 - check_offset = RBIOS16(rdev->bios_header_start + 0xc);
1271 - if (check_offset)
1272 - offset = check_offset;
1273 + check_offset = 0xc;
1274 break;
1275 case COMBIOS_BIOS_SUPPORT_TABLE:
1276 - check_offset = RBIOS16(rdev->bios_header_start + 0x14);
1277 - if (check_offset)
1278 - offset = check_offset;
1279 + check_offset = 0x14;
1280 break;
1281 case COMBIOS_DAC_PROGRAMMING_TABLE:
1282 - check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
1283 - if (check_offset)
1284 - offset = check_offset;
1285 + check_offset = 0x2a;
1286 break;
1287 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
1288 - check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
1289 - if (check_offset)
1290 - offset = check_offset;
1291 + check_offset = 0x2c;
1292 break;
1293 case COMBIOS_CRTC_INFO_TABLE:
1294 - check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
1295 - if (check_offset)
1296 - offset = check_offset;
1297 + check_offset = 0x2e;
1298 break;
1299 case COMBIOS_PLL_INFO_TABLE:
1300 - check_offset = RBIOS16(rdev->bios_header_start + 0x30);
1301 - if (check_offset)
1302 - offset = check_offset;
1303 + check_offset = 0x30;
1304 break;
1305 case COMBIOS_TV_INFO_TABLE:
1306 - check_offset = RBIOS16(rdev->bios_header_start + 0x32);
1307 - if (check_offset)
1308 - offset = check_offset;
1309 + check_offset = 0x32;
1310 break;
1311 case COMBIOS_DFP_INFO_TABLE:
1312 - check_offset = RBIOS16(rdev->bios_header_start + 0x34);
1313 - if (check_offset)
1314 - offset = check_offset;
1315 + check_offset = 0x34;
1316 break;
1317 case COMBIOS_HW_CONFIG_INFO_TABLE:
1318 - check_offset = RBIOS16(rdev->bios_header_start + 0x36);
1319 - if (check_offset)
1320 - offset = check_offset;
1321 + check_offset = 0x36;
1322 break;
1323 case COMBIOS_MULTIMEDIA_INFO_TABLE:
1324 - check_offset = RBIOS16(rdev->bios_header_start + 0x38);
1325 - if (check_offset)
1326 - offset = check_offset;
1327 + check_offset = 0x38;
1328 break;
1329 case COMBIOS_TV_STD_PATCH_TABLE:
1330 - check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
1331 - if (check_offset)
1332 - offset = check_offset;
1333 + check_offset = 0x3e;
1334 break;
1335 case COMBIOS_LCD_INFO_TABLE:
1336 - check_offset = RBIOS16(rdev->bios_header_start + 0x40);
1337 - if (check_offset)
1338 - offset = check_offset;
1339 + check_offset = 0x40;
1340 break;
1341 case COMBIOS_MOBILE_INFO_TABLE:
1342 - check_offset = RBIOS16(rdev->bios_header_start + 0x42);
1343 - if (check_offset)
1344 - offset = check_offset;
1345 + check_offset = 0x42;
1346 break;
1347 case COMBIOS_PLL_INIT_TABLE:
1348 - check_offset = RBIOS16(rdev->bios_header_start + 0x46);
1349 - if (check_offset)
1350 - offset = check_offset;
1351 + check_offset = 0x46;
1352 break;
1353 case COMBIOS_MEM_CONFIG_TABLE:
1354 - check_offset = RBIOS16(rdev->bios_header_start + 0x48);
1355 - if (check_offset)
1356 - offset = check_offset;
1357 + check_offset = 0x48;
1358 break;
1359 case COMBIOS_SAVE_MASK_TABLE:
1360 - check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
1361 - if (check_offset)
1362 - offset = check_offset;
1363 + check_offset = 0x4a;
1364 break;
1365 case COMBIOS_HARDCODED_EDID_TABLE:
1366 - check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
1367 - if (check_offset)
1368 - offset = check_offset;
1369 + check_offset = 0x4c;
1370 break;
1371 case COMBIOS_ASIC_INIT_2_TABLE:
1372 - check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
1373 - if (check_offset)
1374 - offset = check_offset;
1375 + check_offset = 0x4e;
1376 break;
1377 case COMBIOS_CONNECTOR_INFO_TABLE:
1378 - check_offset = RBIOS16(rdev->bios_header_start + 0x50);
1379 - if (check_offset)
1380 - offset = check_offset;
1381 + check_offset = 0x50;
1382 break;
1383 case COMBIOS_DYN_CLK_1_TABLE:
1384 - check_offset = RBIOS16(rdev->bios_header_start + 0x52);
1385 - if (check_offset)
1386 - offset = check_offset;
1387 + check_offset = 0x52;
1388 break;
1389 case COMBIOS_RESERVED_MEM_TABLE:
1390 - check_offset = RBIOS16(rdev->bios_header_start + 0x54);
1391 - if (check_offset)
1392 - offset = check_offset;
1393 + check_offset = 0x54;
1394 break;
1395 case COMBIOS_EXT_TMDS_INFO_TABLE:
1396 - check_offset = RBIOS16(rdev->bios_header_start + 0x58);
1397 - if (check_offset)
1398 - offset = check_offset;
1399 + check_offset = 0x58;
1400 break;
1401 case COMBIOS_MEM_CLK_INFO_TABLE:
1402 - check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
1403 - if (check_offset)
1404 - offset = check_offset;
1405 + check_offset = 0x5a;
1406 break;
1407 case COMBIOS_EXT_DAC_INFO_TABLE:
1408 - check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
1409 - if (check_offset)
1410 - offset = check_offset;
1411 + check_offset = 0x5c;
1412 break;
1413 case COMBIOS_MISC_INFO_TABLE:
1414 - check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
1415 - if (check_offset)
1416 - offset = check_offset;
1417 + check_offset = 0x5e;
1418 break;
1419 case COMBIOS_CRT_INFO_TABLE:
1420 - check_offset = RBIOS16(rdev->bios_header_start + 0x60);
1421 - if (check_offset)
1422 - offset = check_offset;
1423 + check_offset = 0x60;
1424 break;
1425 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
1426 - check_offset = RBIOS16(rdev->bios_header_start + 0x62);
1427 - if (check_offset)
1428 - offset = check_offset;
1429 + check_offset = 0x62;
1430 break;
1431 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
1432 - check_offset = RBIOS16(rdev->bios_header_start + 0x64);
1433 - if (check_offset)
1434 - offset = check_offset;
1435 + check_offset = 0x64;
1436 break;
1437 case COMBIOS_FAN_SPEED_INFO_TABLE:
1438 - check_offset = RBIOS16(rdev->bios_header_start + 0x66);
1439 - if (check_offset)
1440 - offset = check_offset;
1441 + check_offset = 0x66;
1442 break;
1443 case COMBIOS_OVERDRIVE_INFO_TABLE:
1444 - check_offset = RBIOS16(rdev->bios_header_start + 0x68);
1445 - if (check_offset)
1446 - offset = check_offset;
1447 + check_offset = 0x68;
1448 break;
1449 case COMBIOS_OEM_INFO_TABLE:
1450 - check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
1451 - if (check_offset)
1452 - offset = check_offset;
1453 + check_offset = 0x6a;
1454 break;
1455 case COMBIOS_DYN_CLK_2_TABLE:
1456 - check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
1457 - if (check_offset)
1458 - offset = check_offset;
1459 + check_offset = 0x6c;
1460 break;
1461 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
1462 - check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
1463 - if (check_offset)
1464 - offset = check_offset;
1465 + check_offset = 0x6e;
1466 break;
1467 case COMBIOS_I2C_INFO_TABLE:
1468 - check_offset = RBIOS16(rdev->bios_header_start + 0x70);
1469 - if (check_offset)
1470 - offset = check_offset;
1471 + check_offset = 0x70;
1472 break;
1473 /* relative offset tables */
1474 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
1475 @@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
1476 }
1477 break;
1478 default:
1479 + check_offset = 0;
1480 break;
1481 }
1482
1483 - return offset;
1484 + size = RBIOS8(rdev->bios_header_start + 0x6);
1485 + /* check absolute offset tables */
1486 + if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
1487 + offset = RBIOS16(rdev->bios_header_start + check_offset);
1488
1489 + return offset;
1490 }
1491
1492 bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
1493 @@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
1494 dac = RBIOS8(dac_info + 0x3) & 0xf;
1495 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
1496 }
1497 - /* if the values are all zeros, use the table */
1498 - if (p_dac->ps2_pdac_adj)
1499 + /* if the values are zeros, use the table */
1500 + if ((dac == 0) || (bg == 0))
1501 + found = 0;
1502 + else
1503 found = 1;
1504 }
1505
1506 /* quirks */
1507 + /* Radeon 7000 (RV100) */
1508 + if (((dev->pdev->device == 0x5159) &&
1509 + (dev->pdev->subsystem_vendor == 0x174B) &&
1510 + (dev->pdev->subsystem_device == 0x7c28)) ||
1511 /* Radeon 9100 (R200) */
1512 - if ((dev->pdev->device == 0x514D) &&
1513 + ((dev->pdev->device == 0x514D) &&
1514 (dev->pdev->subsystem_vendor == 0x174B) &&
1515 - (dev->pdev->subsystem_device == 0x7149)) {
1516 + (dev->pdev->subsystem_device == 0x7149))) {
1517 /* vbios value is bad, use the default */
1518 found = 0;
1519 }
1520 diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
1521 index 43ec4a4..5ce190b 100644
1522 --- a/drivers/gpu/drm/radeon/radeon_gart.c
1523 +++ b/drivers/gpu/drm/radeon/radeon_gart.c
1524 @@ -467,6 +467,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
1525 size *= 2;
1526 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
1527 RADEON_GPU_PAGE_ALIGN(size),
1528 + RADEON_GPU_PAGE_SIZE,
1529 RADEON_GEM_DOMAIN_VRAM);
1530 if (r) {
1531 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
1532 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1533 index 5a99d43..1fe12ab 100644
1534 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
1535 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
1536 @@ -241,9 +241,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
1537 {
1538 int r = 0;
1539
1540 - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
1541 - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
1542 -
1543 spin_lock_init(&rdev->irq.lock);
1544 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
1545 if (r) {
1546 @@ -265,6 +262,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
1547 rdev->irq.installed = false;
1548 return r;
1549 }
1550 +
1551 + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
1552 + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
1553 +
1554 DRM_INFO("radeon: irq initialized.\n");
1555 return 0;
1556 }
1557 @@ -284,8 +285,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
1558 rdev->irq.installed = false;
1559 if (rdev->msi_enabled)
1560 pci_disable_msi(rdev->pdev);
1561 + flush_work(&rdev->hotplug_work);
1562 }
1563 - flush_work(&rdev->hotplug_work);
1564 }
1565
1566 /**
1567 diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
1568 index e2cb80a..2943823 100644
1569 --- a/drivers/gpu/drm/radeon/radeon_object.h
1570 +++ b/drivers/gpu/drm/radeon/radeon_object.h
1571 @@ -158,7 +158,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
1572
1573 extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
1574 struct radeon_sa_manager *sa_manager,
1575 - unsigned size, u32 domain);
1576 + unsigned size, u32 align, u32 domain);
1577 extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
1578 struct radeon_sa_manager *sa_manager);
1579 extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
1580 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
1581 index 82434018..83f6295 100644
1582 --- a/drivers/gpu/drm/radeon/radeon_ring.c
1583 +++ b/drivers/gpu/drm/radeon/radeon_ring.c
1584 @@ -224,6 +224,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
1585 }
1586 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
1587 RADEON_IB_POOL_SIZE*64*1024,
1588 + RADEON_GPU_PAGE_SIZE,
1589 RADEON_GEM_DOMAIN_GTT);
1590 if (r) {
1591 return r;
1592 diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
1593 index 0abe5a9..f0bac68 100644
1594 --- a/drivers/gpu/drm/radeon/radeon_sa.c
1595 +++ b/drivers/gpu/drm/radeon/radeon_sa.c
1596 @@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
1597
1598 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
1599 struct radeon_sa_manager *sa_manager,
1600 - unsigned size, u32 domain)
1601 + unsigned size, u32 align, u32 domain)
1602 {
1603 int i, r;
1604
1605 @@ -57,13 +57,14 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
1606 sa_manager->bo = NULL;
1607 sa_manager->size = size;
1608 sa_manager->domain = domain;
1609 + sa_manager->align = align;
1610 sa_manager->hole = &sa_manager->olist;
1611 INIT_LIST_HEAD(&sa_manager->olist);
1612 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1613 INIT_LIST_HEAD(&sa_manager->flist[i]);
1614 }
1615
1616 - r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
1617 + r = radeon_bo_create(rdev, size, align, true,
1618 domain, NULL, &sa_manager->bo);
1619 if (r) {
1620 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
1621 @@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
1622 unsigned tries[RADEON_NUM_RINGS];
1623 int i, r;
1624
1625 - BUG_ON(align > RADEON_GPU_PAGE_SIZE);
1626 + BUG_ON(align > sa_manager->align);
1627 BUG_ON(size > sa_manager->size);
1628
1629 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
1630 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
1631 index 4c605c7..deb5c25 100644
1632 --- a/drivers/hv/hv_balloon.c
1633 +++ b/drivers/hv/hv_balloon.c
1634 @@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
1635 struct hv_hotadd_state *has)
1636 {
1637 int ret = 0;
1638 - int i, nid, t;
1639 + int i, nid;
1640 unsigned long start_pfn;
1641 unsigned long processed_pfn;
1642 unsigned long total_pfn = pfn_count;
1643 @@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
1644
1645 /*
1646 * Wait for the memory block to be onlined.
1647 + * Since the hot add has succeeded, it is ok to
1648 + * proceed even if the pages in the hot added region
1649 + * have not been "onlined" within the allowed time.
1650 */
1651 - t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
1652 - if (t == 0) {
1653 - pr_info("hot_add memory timedout\n");
1654 - has->ha_end_pfn -= HA_CHUNK;
1655 - has->covered_end_pfn -= processed_pfn;
1656 - break;
1657 - }
1658 + wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
1659
1660 }
1661
1662 @@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
1663 dm->num_pages_ballooned +
1664 compute_balloon_floor();
1665
1666 + /*
1667 + * If our transaction ID is no longer current, just don't
1668 + * send the status. This can happen if we were interrupted
1669 + * after we picked our transaction ID.
1670 + */
1671 + if (status.hdr.trans_id != atomic_read(&trans_id))
1672 + return;
1673 +
1674 vmbus_sendpacket(dm->dev->channel, &status,
1675 sizeof(struct dm_status),
1676 (unsigned long)NULL,
1677 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1678 index 41712f0..5849dc0 100644
1679 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1680 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1681 @@ -388,6 +388,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1682 init_waitqueue_head(&isert_conn->conn_wait_comp_err);
1683 kref_init(&isert_conn->conn_kref);
1684 kref_get(&isert_conn->conn_kref);
1685 + mutex_init(&isert_conn->conn_mutex);
1686
1687 cma_id->context = isert_conn;
1688 isert_conn->conn_cm_id = cma_id;
1689 @@ -540,15 +541,32 @@ isert_disconnect_work(struct work_struct *work)
1690 struct isert_conn, conn_logout_work);
1691
1692 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1693 -
1694 + mutex_lock(&isert_conn->conn_mutex);
1695 isert_conn->state = ISER_CONN_DOWN;
1696
1697 if (isert_conn->post_recv_buf_count == 0 &&
1698 atomic_read(&isert_conn->post_send_buf_count) == 0) {
1699 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
1700 - wake_up(&isert_conn->conn_wait);
1701 + mutex_unlock(&isert_conn->conn_mutex);
1702 + goto wake_up;
1703 + }
1704 + if (!isert_conn->conn_cm_id) {
1705 + mutex_unlock(&isert_conn->conn_mutex);
1706 + isert_put_conn(isert_conn);
1707 + return;
1708 + }
1709 + if (!isert_conn->logout_posted) {
1710 + pr_debug("Calling rdma_disconnect for !logout_posted from"
1711 + " isert_disconnect_work\n");
1712 + rdma_disconnect(isert_conn->conn_cm_id);
1713 + mutex_unlock(&isert_conn->conn_mutex);
1714 + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1715 + goto wake_up;
1716 }
1717 + mutex_unlock(&isert_conn->conn_mutex);
1718
1719 +wake_up:
1720 + wake_up(&isert_conn->conn_wait);
1721 isert_put_conn(isert_conn);
1722 }
1723
1724 @@ -934,16 +952,11 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1725 }
1726
1727 sequence_cmd:
1728 - rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1729 + rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1730
1731 if (!rc && dump_payload == false && unsol_data)
1732 iscsit_set_unsoliticed_dataout(cmd);
1733
1734 - if (rc == CMDSN_ERROR_CANNOT_RECOVER)
1735 - return iscsit_add_reject_from_cmd(
1736 - ISCSI_REASON_PROTOCOL_ERROR,
1737 - 1, 0, (unsigned char *)hdr, cmd);
1738 -
1739 return 0;
1740 }
1741
1742 @@ -1184,14 +1197,12 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1743 {
1744 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1745 struct isert_conn *isert_conn = isert_cmd->conn;
1746 - struct iscsi_conn *conn;
1747 + struct iscsi_conn *conn = isert_conn->conn;
1748
1749 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1750
1751 switch (cmd->iscsi_opcode) {
1752 case ISCSI_OP_SCSI_CMD:
1753 - conn = isert_conn->conn;
1754 -
1755 spin_lock_bh(&conn->cmd_lock);
1756 if (!list_empty(&cmd->i_conn_node))
1757 list_del(&cmd->i_conn_node);
1758 @@ -1201,16 +1212,18 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1759 iscsit_stop_dataout_timer(cmd);
1760
1761 isert_unmap_cmd(isert_cmd, isert_conn);
1762 - /*
1763 - * Fall-through
1764 - */
1765 + transport_generic_free_cmd(&cmd->se_cmd, 0);
1766 + break;
1767 case ISCSI_OP_SCSI_TMFUNC:
1768 + spin_lock_bh(&conn->cmd_lock);
1769 + if (!list_empty(&cmd->i_conn_node))
1770 + list_del(&cmd->i_conn_node);
1771 + spin_unlock_bh(&conn->cmd_lock);
1772 +
1773 transport_generic_free_cmd(&cmd->se_cmd, 0);
1774 break;
1775 case ISCSI_OP_REJECT:
1776 case ISCSI_OP_NOOP_OUT:
1777 - conn = isert_conn->conn;
1778 -
1779 spin_lock_bh(&conn->cmd_lock);
1780 if (!list_empty(&cmd->i_conn_node))
1781 list_del(&cmd->i_conn_node);
1782 @@ -1222,6 +1235,9 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1783 * associated cmd->se_cmd needs to be released.
1784 */
1785 if (cmd->se_cmd.se_tfo != NULL) {
1786 + pr_debug("Calling transport_generic_free_cmd from"
1787 + " isert_put_cmd for 0x%02x\n",
1788 + cmd->iscsi_opcode);
1789 transport_generic_free_cmd(&cmd->se_cmd, 0);
1790 break;
1791 }
1792 @@ -1318,8 +1334,8 @@ isert_do_control_comp(struct work_struct *work)
1793 atomic_dec(&isert_conn->post_send_buf_count);
1794
1795 cmd->i_state = ISTATE_SENT_STATUS;
1796 - complete(&cmd->reject_comp);
1797 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1798 + break;
1799 case ISTATE_SEND_LOGOUTRSP:
1800 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1801 /*
1802 @@ -1345,7 +1361,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1803 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1804
1805 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1806 - cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
1807 + cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1808 + cmd->i_state == ISTATE_SEND_REJECT) {
1809 isert_unmap_tx_desc(tx_desc, ib_dev);
1810
1811 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1812 @@ -1419,7 +1436,11 @@ isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1813 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1814 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1815
1816 - isert_conn->state = ISER_CONN_TERMINATING;
1817 + mutex_lock(&isert_conn->conn_mutex);
1818 + if (isert_conn->state != ISER_CONN_DOWN)
1819 + isert_conn->state = ISER_CONN_TERMINATING;
1820 + mutex_unlock(&isert_conn->conn_mutex);
1821 +
1822 wake_up(&isert_conn->conn_wait_comp_err);
1823 }
1824 }
1825 @@ -1637,11 +1658,25 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1826 struct isert_cmd, iscsi_cmd);
1827 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1828 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1829 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1830 + struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1831 + struct iscsi_reject *hdr =
1832 + (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1833
1834 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1835 - iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
1836 - &isert_cmd->tx_desc.iscsi_header);
1837 + iscsit_build_reject(cmd, conn, hdr);
1838 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1839 +
1840 + hton24(hdr->dlength, ISCSI_HDR_LEN);
1841 + isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1842 + (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1843 + DMA_TO_DEVICE);
1844 + isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
1845 + tx_dsg->addr = isert_cmd->sense_buf_dma;
1846 + tx_dsg->length = ISCSI_HDR_LEN;
1847 + tx_dsg->lkey = isert_conn->conn_mr->lkey;
1848 + isert_cmd->tx_desc.num_sge = 2;
1849 +
1850 isert_init_send_wr(isert_cmd, send_wr);
1851
1852 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1853 @@ -2175,6 +2210,17 @@ isert_free_np(struct iscsi_np *np)
1854 kfree(isert_np);
1855 }
1856
1857 +static int isert_check_state(struct isert_conn *isert_conn, int state)
1858 +{
1859 + int ret;
1860 +
1861 + mutex_lock(&isert_conn->conn_mutex);
1862 + ret = (isert_conn->state == state);
1863 + mutex_unlock(&isert_conn->conn_mutex);
1864 +
1865 + return ret;
1866 +}
1867 +
1868 static void isert_free_conn(struct iscsi_conn *conn)
1869 {
1870 struct isert_conn *isert_conn = conn->context;
1871 @@ -2184,26 +2230,43 @@ static void isert_free_conn(struct iscsi_conn *conn)
1872 * Decrement post_send_buf_count for special case when called
1873 * from isert_do_control_comp() -> iscsit_logout_post_handler()
1874 */
1875 + mutex_lock(&isert_conn->conn_mutex);
1876 if (isert_conn->logout_posted)
1877 atomic_dec(&isert_conn->post_send_buf_count);
1878
1879 - if (isert_conn->conn_cm_id)
1880 + if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
1881 + pr_debug("Calling rdma_disconnect from isert_free_conn\n");
1882 rdma_disconnect(isert_conn->conn_cm_id);
1883 + }
1884 /*
1885 * Only wait for conn_wait_comp_err if the isert_conn made it
1886 * into full feature phase..
1887 */
1888 - if (isert_conn->state > ISER_CONN_INIT) {
1889 + if (isert_conn->state == ISER_CONN_UP) {
1890 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
1891 isert_conn->state);
1892 + mutex_unlock(&isert_conn->conn_mutex);
1893 +
1894 wait_event(isert_conn->conn_wait_comp_err,
1895 - isert_conn->state == ISER_CONN_TERMINATING);
1896 - pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
1897 + (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
1898 +
1899 + wait_event(isert_conn->conn_wait,
1900 + (isert_check_state(isert_conn, ISER_CONN_DOWN)));
1901 +
1902 + isert_put_conn(isert_conn);
1903 + return;
1904 + }
1905 + if (isert_conn->state == ISER_CONN_INIT) {
1906 + mutex_unlock(&isert_conn->conn_mutex);
1907 + isert_put_conn(isert_conn);
1908 + return;
1909 }
1910 + pr_debug("isert_free_conn: wait_event conn_wait %d\n",
1911 + isert_conn->state);
1912 + mutex_unlock(&isert_conn->conn_mutex);
1913
1914 - pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
1915 - wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
1916 - pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
1917 + wait_event(isert_conn->conn_wait,
1918 + (isert_check_state(isert_conn, ISER_CONN_DOWN)));
1919
1920 isert_put_conn(isert_conn);
1921 }
1922 diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1923 index b104f4c..5795c82 100644
1924 --- a/drivers/infiniband/ulp/isert/ib_isert.h
1925 +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1926 @@ -102,6 +102,7 @@ struct isert_conn {
1927 struct ib_qp *conn_qp;
1928 struct isert_device *conn_device;
1929 struct work_struct conn_logout_work;
1930 + struct mutex conn_mutex;
1931 wait_queue_head_t conn_wait;
1932 wait_queue_head_t conn_wait_comp_err;
1933 struct kref conn_kref;
1934 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
1935 index aa04f02..81a79b7 100644
1936 --- a/drivers/md/dm-ioctl.c
1937 +++ b/drivers/md/dm-ioctl.c
1938 @@ -1644,7 +1644,10 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
1939 }
1940
1941 if (!dmi) {
1942 + unsigned noio_flag;
1943 + noio_flag = memalloc_noio_save();
1944 dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
1945 + memalloc_noio_restore(noio_flag);
1946 if (dmi)
1947 *param_flags |= DM_PARAMS_VMALLOC;
1948 }
1949 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1950 index bdf26f5..5adede1 100644
1951 --- a/drivers/md/dm-mpath.c
1952 +++ b/drivers/md/dm-mpath.c
1953 @@ -1561,7 +1561,6 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1954 unsigned long flags;
1955 int r;
1956
1957 -again:
1958 bdev = NULL;
1959 mode = 0;
1960 r = 0;
1961 @@ -1579,7 +1578,7 @@ again:
1962 }
1963
1964 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1965 - r = -EAGAIN;
1966 + r = -ENOTCONN;
1967 else if (!bdev)
1968 r = -EIO;
1969
1970 @@ -1591,11 +1590,8 @@ again:
1971 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1972 r = scsi_verify_blk_ioctl(NULL, cmd);
1973
1974 - if (r == -EAGAIN && !fatal_signal_pending(current)) {
1975 + if (r == -ENOTCONN && !fatal_signal_pending(current))
1976 queue_work(kmultipathd, &m->process_queued_ios);
1977 - msleep(10);
1978 - goto again;
1979 - }
1980
1981 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1982 }
1983 diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
1984 index b948fd8..0d2e812 100644
1985 --- a/drivers/md/dm-verity.c
1986 +++ b/drivers/md/dm-verity.c
1987 @@ -831,9 +831,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
1988 for (i = v->levels - 1; i >= 0; i--) {
1989 sector_t s;
1990 v->hash_level_block[i] = hash_position;
1991 - s = verity_position_at_level(v, v->data_blocks, i);
1992 - s = (s >> v->hash_per_block_bits) +
1993 - !!(s & ((1 << v->hash_per_block_bits) - 1));
1994 + s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1995 + >> ((i + 1) * v->hash_per_block_bits);
1996 if (hash_position + s < hash_position) {
1997 ti->error = "Hash device offset overflow";
1998 r = -E2BIG;
1999 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2000 index d5370a9..33f2010 100644
2001 --- a/drivers/md/dm.c
2002 +++ b/drivers/md/dm.c
2003 @@ -386,10 +386,12 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
2004 unsigned int cmd, unsigned long arg)
2005 {
2006 struct mapped_device *md = bdev->bd_disk->private_data;
2007 - struct dm_table *map = dm_get_live_table(md);
2008 + struct dm_table *map;
2009 struct dm_target *tgt;
2010 int r = -ENOTTY;
2011
2012 +retry:
2013 + map = dm_get_live_table(md);
2014 if (!map || !dm_table_get_size(map))
2015 goto out;
2016
2017 @@ -410,6 +412,11 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
2018 out:
2019 dm_table_put(map);
2020
2021 + if (r == -ENOTCONN) {
2022 + msleep(10);
2023 + goto retry;
2024 + }
2025 +
2026 return r;
2027 }
2028
2029 diff --git a/drivers/md/md.c b/drivers/md/md.c
2030 index 9b82377..51f0345 100644
2031 --- a/drivers/md/md.c
2032 +++ b/drivers/md/md.c
2033 @@ -7697,20 +7697,6 @@ static int remove_and_add_spares(struct mddev *mddev,
2034 continue;
2035
2036 rdev->recovery_offset = 0;
2037 - if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
2038 - spin_lock_irq(&mddev->write_lock);
2039 - if (mddev->in_sync)
2040 - /* OK, this device, which is in_sync,
2041 - * will definitely be noticed before
2042 - * the next write, so recovery isn't
2043 - * needed.
2044 - */
2045 - rdev->recovery_offset = mddev->recovery_cp;
2046 - spin_unlock_irq(&mddev->write_lock);
2047 - }
2048 - if (mddev->ro && rdev->recovery_offset != MaxSector)
2049 - /* not safe to add this disk now */
2050 - continue;
2051 if (mddev->pers->
2052 hot_add_disk(mddev, rdev) == 0) {
2053 if (sysfs_link_rdev(mddev, rdev))
2054 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2055 index 6e17f81..6f48244 100644
2056 --- a/drivers/md/raid1.c
2057 +++ b/drivers/md/raid1.c
2058 @@ -1848,6 +1848,36 @@ static int process_checks(struct r1bio *r1_bio)
2059 int i;
2060 int vcnt;
2061
2062 + /* Fix variable parts of all bios */
2063 + vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2064 + for (i = 0; i < conf->raid_disks * 2; i++) {
2065 + int j;
2066 + int size;
2067 + struct bio *b = r1_bio->bios[i];
2068 + if (b->bi_end_io != end_sync_read)
2069 + continue;
2070 + /* fixup the bio for reuse */
2071 + bio_reset(b);
2072 + b->bi_vcnt = vcnt;
2073 + b->bi_size = r1_bio->sectors << 9;
2074 + b->bi_sector = r1_bio->sector +
2075 + conf->mirrors[i].rdev->data_offset;
2076 + b->bi_bdev = conf->mirrors[i].rdev->bdev;
2077 + b->bi_end_io = end_sync_read;
2078 + b->bi_private = r1_bio;
2079 +
2080 + size = b->bi_size;
2081 + for (j = 0; j < vcnt ; j++) {
2082 + struct bio_vec *bi;
2083 + bi = &b->bi_io_vec[j];
2084 + bi->bv_offset = 0;
2085 + if (size > PAGE_SIZE)
2086 + bi->bv_len = PAGE_SIZE;
2087 + else
2088 + bi->bv_len = size;
2089 + size -= PAGE_SIZE;
2090 + }
2091 + }
2092 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2093 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2094 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
2095 @@ -1856,12 +1886,10 @@ static int process_checks(struct r1bio *r1_bio)
2096 break;
2097 }
2098 r1_bio->read_disk = primary;
2099 - vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2100 for (i = 0; i < conf->raid_disks * 2; i++) {
2101 int j;
2102 struct bio *pbio = r1_bio->bios[primary];
2103 struct bio *sbio = r1_bio->bios[i];
2104 - int size;
2105
2106 if (sbio->bi_end_io != end_sync_read)
2107 continue;
2108 @@ -1887,27 +1915,6 @@ static int process_checks(struct r1bio *r1_bio)
2109 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2110 continue;
2111 }
2112 - /* fixup the bio for reuse */
2113 - bio_reset(sbio);
2114 - sbio->bi_vcnt = vcnt;
2115 - sbio->bi_size = r1_bio->sectors << 9;
2116 - sbio->bi_sector = r1_bio->sector +
2117 - conf->mirrors[i].rdev->data_offset;
2118 - sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
2119 - sbio->bi_end_io = end_sync_read;
2120 - sbio->bi_private = r1_bio;
2121 -
2122 - size = sbio->bi_size;
2123 - for (j = 0; j < vcnt ; j++) {
2124 - struct bio_vec *bi;
2125 - bi = &sbio->bi_io_vec[j];
2126 - bi->bv_offset = 0;
2127 - if (size > PAGE_SIZE)
2128 - bi->bv_len = PAGE_SIZE;
2129 - else
2130 - bi->bv_len = size;
2131 - size -= PAGE_SIZE;
2132 - }
2133
2134 bio_copy_data(sbio, pbio);
2135 }
2136 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2137 index d61eb7e..081bb33 100644
2138 --- a/drivers/md/raid10.c
2139 +++ b/drivers/md/raid10.c
2140 @@ -2268,12 +2268,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2141 d = r10_bio->devs[1].devnum;
2142 wbio = r10_bio->devs[1].bio;
2143 wbio2 = r10_bio->devs[1].repl_bio;
2144 + /* Need to test wbio2->bi_end_io before we call
2145 + * generic_make_request as if the former is NULL,
2146 + * the latter is free to free wbio2.
2147 + */
2148 + if (wbio2 && !wbio2->bi_end_io)
2149 + wbio2 = NULL;
2150 if (wbio->bi_end_io) {
2151 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2152 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2153 generic_make_request(wbio);
2154 }
2155 - if (wbio2 && wbio2->bi_end_io) {
2156 + if (wbio2) {
2157 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2158 md_sync_acct(conf->mirrors[d].replacement->bdev,
2159 bio_sectors(wbio2));
2160 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2161 index 05e4a10..a35b846 100644
2162 --- a/drivers/md/raid5.c
2163 +++ b/drivers/md/raid5.c
2164 @@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
2165 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
2166 set_bit(STRIPE_SYNCING, &sh->state);
2167 clear_bit(STRIPE_INSYNC, &sh->state);
2168 + clear_bit(STRIPE_REPLACED, &sh->state);
2169 }
2170 spin_unlock(&sh->stripe_lock);
2171 }
2172 @@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
2173 handle_parity_checks5(conf, sh, &s, disks);
2174 }
2175
2176 - if (s.replacing && s.locked == 0
2177 - && !test_bit(STRIPE_INSYNC, &sh->state)) {
2178 + if ((s.replacing || s.syncing) && s.locked == 0
2179 + && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
2180 + && !test_bit(STRIPE_REPLACED, &sh->state)) {
2181 /* Write out to replacement devices where possible */
2182 for (i = 0; i < conf->raid_disks; i++)
2183 - if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
2184 - test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
2185 + if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
2186 + WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
2187 set_bit(R5_WantReplace, &sh->dev[i].flags);
2188 set_bit(R5_LOCKED, &sh->dev[i].flags);
2189 s.locked++;
2190 }
2191 - set_bit(STRIPE_INSYNC, &sh->state);
2192 + if (s.replacing)
2193 + set_bit(STRIPE_INSYNC, &sh->state);
2194 + set_bit(STRIPE_REPLACED, &sh->state);
2195 }
2196 if ((s.syncing || s.replacing) && s.locked == 0 &&
2197 + !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
2198 test_bit(STRIPE_INSYNC, &sh->state)) {
2199 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2200 clear_bit(STRIPE_SYNCING, &sh->state);
2201 diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
2202 index b0b663b..70c4932 100644
2203 --- a/drivers/md/raid5.h
2204 +++ b/drivers/md/raid5.h
2205 @@ -306,6 +306,7 @@ enum {
2206 STRIPE_SYNC_REQUESTED,
2207 STRIPE_SYNCING,
2208 STRIPE_INSYNC,
2209 + STRIPE_REPLACED,
2210 STRIPE_PREREAD_ACTIVE,
2211 STRIPE_DELAYED,
2212 STRIPE_DEGRADED,
2213 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
2214 index c97e9d3..e70b4ff 100644
2215 --- a/drivers/net/wireless/rtlwifi/pci.c
2216 +++ b/drivers/net/wireless/rtlwifi/pci.c
2217 @@ -1008,19 +1008,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
2218 return;
2219 }
2220
2221 -static void rtl_lps_change_work_callback(struct work_struct *work)
2222 -{
2223 - struct rtl_works *rtlworks =
2224 - container_of(work, struct rtl_works, lps_change_work);
2225 - struct ieee80211_hw *hw = rtlworks->hw;
2226 - struct rtl_priv *rtlpriv = rtl_priv(hw);
2227 -
2228 - if (rtlpriv->enter_ps)
2229 - rtl_lps_enter(hw);
2230 - else
2231 - rtl_lps_leave(hw);
2232 -}
2233 -
2234 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
2235 {
2236 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2237 diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
2238 index 884bcea..71e917d 100644
2239 --- a/drivers/net/wireless/rtlwifi/ps.c
2240 +++ b/drivers/net/wireless/rtlwifi/ps.c
2241 @@ -611,6 +611,18 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
2242 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
2243 }
2244
2245 +void rtl_lps_change_work_callback(struct work_struct *work)
2246 +{
2247 + struct rtl_works *rtlworks =
2248 + container_of(work, struct rtl_works, lps_change_work);
2249 + struct ieee80211_hw *hw = rtlworks->hw;
2250 + struct rtl_priv *rtlpriv = rtl_priv(hw);
2251 +
2252 + if (rtlpriv->enter_ps)
2253 + rtl_lps_enter(hw);
2254 + else
2255 + rtl_lps_leave(hw);
2256 +}
2257
2258 void rtl_swlps_wq_callback(void *data)
2259 {
2260 diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
2261 index 4d682b7..88bd76e 100644
2262 --- a/drivers/net/wireless/rtlwifi/ps.h
2263 +++ b/drivers/net/wireless/rtlwifi/ps.h
2264 @@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
2265 void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
2266 void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
2267 void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
2268 +void rtl_lps_change_work_callback(struct work_struct *work);
2269
2270 #endif
2271 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
2272 index a3532e0..1feebdc 100644
2273 --- a/drivers/net/wireless/rtlwifi/usb.c
2274 +++ b/drivers/net/wireless/rtlwifi/usb.c
2275 @@ -1070,6 +1070,8 @@ int rtl_usb_probe(struct usb_interface *intf,
2276 spin_lock_init(&rtlpriv->locks.usb_lock);
2277 INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
2278 rtl_fill_h2c_cmd_work_callback);
2279 + INIT_WORK(&rtlpriv->works.lps_change_work,
2280 + rtl_lps_change_work_callback);
2281
2282 rtlpriv->usb_data_index = 0;
2283 init_completion(&rtlpriv->firmware_loading_complete);
2284 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2285 index 1db10141..0c01b8e 100644
2286 --- a/drivers/net/xen-netfront.c
2287 +++ b/drivers/net/xen-netfront.c
2288 @@ -276,8 +276,7 @@ no_skb:
2289 break;
2290 }
2291
2292 - __skb_fill_page_desc(skb, 0, page, 0, 0);
2293 - skb_shinfo(skb)->nr_frags = 1;
2294 + skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
2295 __skb_queue_tail(&np->rx_batch, skb);
2296 }
2297
2298 @@ -822,7 +821,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
2299 struct sk_buff_head *list)
2300 {
2301 struct skb_shared_info *shinfo = skb_shinfo(skb);
2302 - int nr_frags = shinfo->nr_frags;
2303 RING_IDX cons = np->rx.rsp_cons;
2304 struct sk_buff *nskb;
2305
2306 @@ -831,19 +829,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
2307 RING_GET_RESPONSE(&np->rx, ++cons);
2308 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
2309
2310 - __skb_fill_page_desc(skb, nr_frags,
2311 - skb_frag_page(nfrag),
2312 - rx->offset, rx->status);
2313 + if (shinfo->nr_frags == MAX_SKB_FRAGS) {
2314 + unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
2315
2316 - skb->data_len += rx->status;
2317 + BUG_ON(pull_to <= skb_headlen(skb));
2318 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
2319 + }
2320 + BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
2321 +
2322 + skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
2323 + rx->offset, rx->status, PAGE_SIZE);
2324
2325 skb_shinfo(nskb)->nr_frags = 0;
2326 kfree_skb(nskb);
2327 -
2328 - nr_frags++;
2329 }
2330
2331 - shinfo->nr_frags = nr_frags;
2332 return cons;
2333 }
2334
2335 @@ -929,7 +929,8 @@ static int handle_incoming_queue(struct net_device *dev,
2336 while ((skb = __skb_dequeue(rxq)) != NULL) {
2337 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
2338
2339 - __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
2340 + if (pull_to > skb_headlen(skb))
2341 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
2342
2343 /* Ethernet work: Delayed to here as it peeks the header. */
2344 skb->protocol = eth_type_trans(skb, dev);
2345 @@ -1015,16 +1016,10 @@ err:
2346 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
2347 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
2348 skb->data_len = rx->status;
2349 + skb->len += rx->status;
2350
2351 i = xennet_fill_frags(np, skb, &tmpq);
2352
2353 - /*
2354 - * Truesize is the actual allocation size, even if the
2355 - * allocation is only partially used.
2356 - */
2357 - skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
2358 - skb->len += skb->data_len;
2359 -
2360 if (rx->flags & XEN_NETRXF_csum_blank)
2361 skb->ip_summed = CHECKSUM_PARTIAL;
2362 else if (rx->flags & XEN_NETRXF_data_validated)
2363 diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
2364 index 9bb020a..0d30ca8 100644
2365 --- a/drivers/scsi/isci/task.c
2366 +++ b/drivers/scsi/isci/task.c
2367 @@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
2368 struct isci_tmf tmf;
2369 int ret = TMF_RESP_FUNC_FAILED;
2370 unsigned long flags;
2371 + int target_done_already = 0;
2372
2373 /* Get the isci_request reference from the task. Note that
2374 * this check does not depend on the pending request list
2375 @@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
2376 /* If task is already done, the request isn't valid */
2377 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
2378 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
2379 - old_request)
2380 + old_request) {
2381 idev = isci_get_device(task->dev->lldd_dev);
2382 -
2383 + target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
2384 + &old_request->flags);
2385 + }
2386 spin_unlock(&task->task_state_lock);
2387 spin_unlock_irqrestore(&ihost->scic_lock, flags);
2388
2389 @@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
2390
2391 if (task->task_proto == SAS_PROTOCOL_SMP ||
2392 sas_protocol_ata(task->task_proto) ||
2393 - test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
2394 + target_done_already ||
2395 test_bit(IDEV_GONE, &idev->flags)) {
2396
2397 spin_unlock_irqrestore(&ihost->scic_lock, flags);
2398 diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
2399 index 15e4080..51cd27a 100644
2400 --- a/drivers/scsi/qla2xxx/qla_iocb.c
2401 +++ b/drivers/scsi/qla2xxx/qla_iocb.c
2402 @@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
2403 __constant_cpu_to_le16(CF_SIMPLE_TAG);
2404 break;
2405 }
2406 + } else {
2407 + cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
2408 }
2409
2410 /* Load SCSI command packet. */
2411 @@ -1308,11 +1310,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
2412 fcp_cmnd->task_attribute = TSK_ORDERED;
2413 break;
2414 default:
2415 - fcp_cmnd->task_attribute = 0;
2416 + fcp_cmnd->task_attribute = TSK_SIMPLE;
2417 break;
2418 }
2419 } else {
2420 - fcp_cmnd->task_attribute = 0;
2421 + fcp_cmnd->task_attribute = TSK_SIMPLE;
2422 }
2423
2424 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
2425 @@ -1527,7 +1529,12 @@ qla24xx_start_scsi(srb_t *sp)
2426 case ORDERED_QUEUE_TAG:
2427 cmd_pkt->task = TSK_ORDERED;
2428 break;
2429 + default:
2430 + cmd_pkt->task = TSK_SIMPLE;
2431 + break;
2432 }
2433 + } else {
2434 + cmd_pkt->task = TSK_SIMPLE;
2435 }
2436
2437 /* Load SCSI command packet. */
2438 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2439 index 1b1125e..610417e 100644
2440 --- a/drivers/scsi/sd.c
2441 +++ b/drivers/scsi/sd.c
2442 @@ -828,10 +828,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
2443
2444 static void sd_unprep_fn(struct request_queue *q, struct request *rq)
2445 {
2446 + struct scsi_cmnd *SCpnt = rq->special;
2447 +
2448 if (rq->cmd_flags & REQ_DISCARD) {
2449 free_page((unsigned long)rq->buffer);
2450 rq->buffer = NULL;
2451 }
2452 + if (SCpnt->cmnd != rq->cmd) {
2453 + mempool_free(SCpnt->cmnd, sd_cdb_pool);
2454 + SCpnt->cmnd = NULL;
2455 + SCpnt->cmd_len = 0;
2456 + }
2457 }
2458
2459 /**
2460 @@ -1710,21 +1717,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
2461 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
2462 sd_dif_complete(SCpnt, good_bytes);
2463
2464 - if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
2465 - == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
2466 -
2467 - /* We have to print a failed command here as the
2468 - * extended CDB gets freed before scsi_io_completion()
2469 - * is called.
2470 - */
2471 - if (result)
2472 - scsi_print_command(SCpnt);
2473 -
2474 - mempool_free(SCpnt->cmnd, sd_cdb_pool);
2475 - SCpnt->cmnd = NULL;
2476 - SCpnt->cmd_len = 0;
2477 - }
2478 -
2479 return good_bytes;
2480 }
2481
2482 diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
2483 index 9bd8747..34519ea 100644
2484 --- a/drivers/staging/android/logger.c
2485 +++ b/drivers/staging/android/logger.c
2486 @@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
2487 unsigned long nr_segs, loff_t ppos)
2488 {
2489 struct logger_log *log = file_get_log(iocb->ki_filp);
2490 - size_t orig = log->w_off;
2491 + size_t orig;
2492 struct logger_entry header;
2493 struct timespec now;
2494 ssize_t ret = 0;
2495 @@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
2496
2497 mutex_lock(&log->mutex);
2498
2499 + orig = log->w_off;
2500 +
2501 /*
2502 * Fix up any readers, pulling them forward to the first readable
2503 * entry after (what will be) the new write offset. We do this now
2504 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
2505 index 924c54c..0ae406a 100644
2506 --- a/drivers/staging/comedi/comedi_fops.c
2507 +++ b/drivers/staging/comedi/comedi_fops.c
2508 @@ -1401,22 +1401,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
2509 DPRINTK("subdevice busy\n");
2510 return -EBUSY;
2511 }
2512 - s->busy = file;
2513
2514 /* make sure channel/gain list isn't too long */
2515 if (cmd.chanlist_len > s->len_chanlist) {
2516 DPRINTK("channel/gain list too long %u > %d\n",
2517 cmd.chanlist_len, s->len_chanlist);
2518 - ret = -EINVAL;
2519 - goto cleanup;
2520 + return -EINVAL;
2521 }
2522
2523 /* make sure channel/gain list isn't too short */
2524 if (cmd.chanlist_len < 1) {
2525 DPRINTK("channel/gain list too short %u < 1\n",
2526 cmd.chanlist_len);
2527 - ret = -EINVAL;
2528 - goto cleanup;
2529 + return -EINVAL;
2530 }
2531
2532 async->cmd = cmd;
2533 @@ -1426,8 +1423,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
2534 kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
2535 if (!async->cmd.chanlist) {
2536 DPRINTK("allocation failed\n");
2537 - ret = -ENOMEM;
2538 - goto cleanup;
2539 + return -ENOMEM;
2540 }
2541
2542 if (copy_from_user(async->cmd.chanlist, user_chanlist,
2543 @@ -1479,6 +1475,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
2544
2545 comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
2546
2547 + /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
2548 + * comedi_read() or comedi_write() */
2549 + s->busy = file;
2550 ret = s->do_cmd(dev, s);
2551 if (ret == 0)
2552 return 0;
2553 @@ -1693,6 +1692,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
2554 void *file)
2555 {
2556 struct comedi_subdevice *s;
2557 + int ret;
2558
2559 if (arg >= dev->n_subdevices)
2560 return -EINVAL;
2561 @@ -1709,7 +1709,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
2562 if (s->busy != file)
2563 return -EBUSY;
2564
2565 - return do_cancel(dev, s);
2566 + ret = do_cancel(dev, s);
2567 + if (comedi_get_subdevice_runflags(s) & SRF_USER)
2568 + wake_up_interruptible(&s->async->wait_head);
2569 +
2570 + return ret;
2571 }
2572
2573 /*
2574 @@ -2041,11 +2045,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2575
2576 if (!comedi_is_subdevice_running(s)) {
2577 if (count == 0) {
2578 + mutex_lock(&dev->mutex);
2579 if (comedi_is_subdevice_in_error(s))
2580 retval = -EPIPE;
2581 else
2582 retval = 0;
2583 do_become_nonbusy(dev, s);
2584 + mutex_unlock(&dev->mutex);
2585 }
2586 break;
2587 }
2588 @@ -2144,11 +2150,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2589
2590 if (n == 0) {
2591 if (!comedi_is_subdevice_running(s)) {
2592 + mutex_lock(&dev->mutex);
2593 do_become_nonbusy(dev, s);
2594 if (comedi_is_subdevice_in_error(s))
2595 retval = -EPIPE;
2596 else
2597 retval = 0;
2598 + mutex_unlock(&dev->mutex);
2599 break;
2600 }
2601 if (file->f_flags & O_NONBLOCK) {
2602 @@ -2186,9 +2194,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2603 buf += n;
2604 break; /* makes device work like a pipe */
2605 }
2606 - if (comedi_is_subdevice_idle(s) &&
2607 - async->buf_read_count - async->buf_write_count == 0) {
2608 - do_become_nonbusy(dev, s);
2609 + if (comedi_is_subdevice_idle(s)) {
2610 + mutex_lock(&dev->mutex);
2611 + if (async->buf_read_count - async->buf_write_count == 0)
2612 + do_become_nonbusy(dev, s);
2613 + mutex_unlock(&dev->mutex);
2614 }
2615 set_current_state(TASK_RUNNING);
2616 remove_wait_queue(&async->wait_head, &wait);
2617 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2618 index d7705e5..012ff8b 100644
2619 --- a/drivers/target/iscsi/iscsi_target.c
2620 +++ b/drivers/target/iscsi/iscsi_target.c
2621 @@ -628,25 +628,18 @@ static void __exit iscsi_target_cleanup_module(void)
2622 }
2623
2624 static int iscsit_add_reject(
2625 + struct iscsi_conn *conn,
2626 u8 reason,
2627 - int fail_conn,
2628 - unsigned char *buf,
2629 - struct iscsi_conn *conn)
2630 + unsigned char *buf)
2631 {
2632 struct iscsi_cmd *cmd;
2633 - struct iscsi_reject *hdr;
2634 - int ret;
2635
2636 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2637 if (!cmd)
2638 return -1;
2639
2640 cmd->iscsi_opcode = ISCSI_OP_REJECT;
2641 - if (fail_conn)
2642 - cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
2643 -
2644 - hdr = (struct iscsi_reject *) cmd->pdu;
2645 - hdr->reason = reason;
2646 + cmd->reject_reason = reason;
2647
2648 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
2649 if (!cmd->buf_ptr) {
2650 @@ -662,23 +655,16 @@ static int iscsit_add_reject(
2651 cmd->i_state = ISTATE_SEND_REJECT;
2652 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2653
2654 - ret = wait_for_completion_interruptible(&cmd->reject_comp);
2655 - if (ret != 0)
2656 - return -1;
2657 -
2658 - return (!fail_conn) ? 0 : -1;
2659 + return -1;
2660 }
2661
2662 -int iscsit_add_reject_from_cmd(
2663 +static int iscsit_add_reject_from_cmd(
2664 + struct iscsi_cmd *cmd,
2665 u8 reason,
2666 - int fail_conn,
2667 - int add_to_conn,
2668 - unsigned char *buf,
2669 - struct iscsi_cmd *cmd)
2670 + bool add_to_conn,
2671 + unsigned char *buf)
2672 {
2673 struct iscsi_conn *conn;
2674 - struct iscsi_reject *hdr;
2675 - int ret;
2676
2677 if (!cmd->conn) {
2678 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
2679 @@ -688,11 +674,7 @@ int iscsit_add_reject_from_cmd(
2680 conn = cmd->conn;
2681
2682 cmd->iscsi_opcode = ISCSI_OP_REJECT;
2683 - if (fail_conn)
2684 - cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
2685 -
2686 - hdr = (struct iscsi_reject *) cmd->pdu;
2687 - hdr->reason = reason;
2688 + cmd->reject_reason = reason;
2689
2690 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
2691 if (!cmd->buf_ptr) {
2692 @@ -709,8 +691,6 @@ int iscsit_add_reject_from_cmd(
2693
2694 cmd->i_state = ISTATE_SEND_REJECT;
2695 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2696 -
2697 - ret = wait_for_completion_interruptible(&cmd->reject_comp);
2698 /*
2699 * Perform the kref_put now if se_cmd has already been setup by
2700 * scsit_setup_scsi_cmd()
2701 @@ -719,12 +699,19 @@ int iscsit_add_reject_from_cmd(
2702 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
2703 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2704 }
2705 - if (ret != 0)
2706 - return -1;
2707 + return -1;
2708 +}
2709
2710 - return (!fail_conn) ? 0 : -1;
2711 +static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
2712 + unsigned char *buf)
2713 +{
2714 + return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
2715 +}
2716 +
2717 +int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
2718 +{
2719 + return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
2720 }
2721 -EXPORT_SYMBOL(iscsit_add_reject_from_cmd);
2722
2723 /*
2724 * Map some portion of the allocated scatterlist to an iovec, suitable for
2725 @@ -844,8 +831,8 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2726 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
2727 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
2728 " not set. Bad iSCSI Initiator.\n");
2729 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2730 - 1, 1, buf, cmd);
2731 + return iscsit_add_reject_cmd(cmd,
2732 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2733 }
2734
2735 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
2736 @@ -865,8 +852,8 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2737 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
2738 " set when Expected Data Transfer Length is 0 for"
2739 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
2740 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2741 - 1, 1, buf, cmd);
2742 + return iscsit_add_reject_cmd(cmd,
2743 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2744 }
2745 done:
2746
2747 @@ -875,62 +862,62 @@ done:
2748 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
2749 " MUST be set if Expected Data Transfer Length is not 0."
2750 " Bad iSCSI Initiator\n");
2751 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2752 - 1, 1, buf, cmd);
2753 + return iscsit_add_reject_cmd(cmd,
2754 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2755 }
2756
2757 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
2758 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
2759 pr_err("Bidirectional operations not supported!\n");
2760 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2761 - 1, 1, buf, cmd);
2762 + return iscsit_add_reject_cmd(cmd,
2763 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2764 }
2765
2766 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
2767 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
2768 " Scsi Command PDU.\n");
2769 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2770 - 1, 1, buf, cmd);
2771 + return iscsit_add_reject_cmd(cmd,
2772 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2773 }
2774
2775 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
2776 pr_err("ImmediateData=No but DataSegmentLength=%u,"
2777 " protocol error.\n", payload_length);
2778 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
2779 - 1, 1, buf, cmd);
2780 + return iscsit_add_reject_cmd(cmd,
2781 + ISCSI_REASON_PROTOCOL_ERROR, buf);
2782 }
2783
2784 - if ((be32_to_cpu(hdr->data_length )== payload_length) &&
2785 + if ((be32_to_cpu(hdr->data_length) == payload_length) &&
2786 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
2787 pr_err("Expected Data Transfer Length and Length of"
2788 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
2789 " bit is not set protocol error\n");
2790 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
2791 - 1, 1, buf, cmd);
2792 + return iscsit_add_reject_cmd(cmd,
2793 + ISCSI_REASON_PROTOCOL_ERROR, buf);
2794 }
2795
2796 if (payload_length > be32_to_cpu(hdr->data_length)) {
2797 pr_err("DataSegmentLength: %u is greater than"
2798 " EDTL: %u, protocol error.\n", payload_length,
2799 hdr->data_length);
2800 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
2801 - 1, 1, buf, cmd);
2802 + return iscsit_add_reject_cmd(cmd,
2803 + ISCSI_REASON_PROTOCOL_ERROR, buf);
2804 }
2805
2806 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2807 pr_err("DataSegmentLength: %u is greater than"
2808 " MaxXmitDataSegmentLength: %u, protocol error.\n",
2809 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2810 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
2811 - 1, 1, buf, cmd);
2812 + return iscsit_add_reject_cmd(cmd,
2813 + ISCSI_REASON_PROTOCOL_ERROR, buf);
2814 }
2815
2816 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
2817 pr_err("DataSegmentLength: %u is greater than"
2818 " FirstBurstLength: %u, protocol error.\n",
2819 payload_length, conn->sess->sess_ops->FirstBurstLength);
2820 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
2821 - 1, 1, buf, cmd);
2822 + return iscsit_add_reject_cmd(cmd,
2823 + ISCSI_REASON_BOOKMARK_INVALID, buf);
2824 }
2825
2826 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
2827 @@ -985,9 +972,8 @@ done:
2828
2829 dr = iscsit_allocate_datain_req();
2830 if (!dr)
2831 - return iscsit_add_reject_from_cmd(
2832 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2833 - 1, 1, buf, cmd);
2834 + return iscsit_add_reject_cmd(cmd,
2835 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2836
2837 iscsit_attach_datain_req(cmd, dr);
2838 }
2839 @@ -1015,18 +1001,16 @@ done:
2840 cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
2841 if (cmd->sense_reason) {
2842 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
2843 - return iscsit_add_reject_from_cmd(
2844 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2845 - 1, 1, buf, cmd);
2846 + return iscsit_add_reject_cmd(cmd,
2847 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2848 }
2849
2850 goto attach_cmd;
2851 }
2852
2853 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
2854 - return iscsit_add_reject_from_cmd(
2855 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2856 - 1, 1, buf, cmd);
2857 + return iscsit_add_reject_cmd(cmd,
2858 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2859 }
2860
2861 attach_cmd:
2862 @@ -1068,17 +1052,13 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2863 * be acknowledged. (See below)
2864 */
2865 if (!cmd->immediate_data) {
2866 - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
2867 - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2868 - if (!cmd->sense_reason)
2869 - return 0;
2870 -
2871 + cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2872 + (unsigned char *)hdr, hdr->cmdsn);
2873 + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2874 + return -1;
2875 + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2876 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2877 return 0;
2878 - } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2879 - return iscsit_add_reject_from_cmd(
2880 - ISCSI_REASON_PROTOCOL_ERROR,
2881 - 1, 0, (unsigned char *)hdr, cmd);
2882 }
2883 }
2884
2885 @@ -1103,6 +1083,9 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2886 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
2887 */
2888 if (cmd->sense_reason) {
2889 + if (cmd->reject_reason)
2890 + return 0;
2891 +
2892 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2893 return 1;
2894 }
2895 @@ -1111,10 +1094,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2896 * the backend memory allocation.
2897 */
2898 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
2899 - if (cmd->sense_reason) {
2900 - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2901 + if (cmd->sense_reason)
2902 return 1;
2903 - }
2904
2905 return 0;
2906 }
2907 @@ -1124,6 +1105,7 @@ static int
2908 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
2909 bool dump_payload)
2910 {
2911 + struct iscsi_conn *conn = cmd->conn;
2912 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
2913 /*
2914 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
2915 @@ -1140,20 +1122,25 @@ after_immediate_data:
2916 * DataCRC, check against ExpCmdSN/MaxCmdSN if
2917 * Immediate Bit is not set.
2918 */
2919 - cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn);
2920 + cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
2921 + (unsigned char *)hdr, hdr->cmdsn);
2922 + if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2923 + return -1;
2924 + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2925 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2926 + return 0;
2927 + }
2928
2929 if (cmd->sense_reason) {
2930 - if (iscsit_dump_data_payload(cmd->conn,
2931 - cmd->first_burst_len, 1) < 0)
2932 - return -1;
2933 + int rc;
2934 +
2935 + rc = iscsit_dump_data_payload(cmd->conn,
2936 + cmd->first_burst_len, 1);
2937 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
2938 + return rc;
2939 } else if (cmd->unsolicited_data)
2940 iscsit_set_unsoliticed_dataout(cmd);
2941
2942 - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2943 - return iscsit_add_reject_from_cmd(
2944 - ISCSI_REASON_PROTOCOL_ERROR,
2945 - 1, 0, (unsigned char *)hdr, cmd);
2946 -
2947 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
2948 /*
2949 * Immediate Data failed DataCRC and ERL>=1,
2950 @@ -1184,15 +1171,14 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2951
2952 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
2953 if (rc < 0)
2954 - return rc;
2955 + return 0;
2956 /*
2957 * Allocation iovecs needed for struct socket operations for
2958 * traditional iSCSI block I/O.
2959 */
2960 if (iscsit_allocate_iovecs(cmd) < 0) {
2961 - return iscsit_add_reject_from_cmd(
2962 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2963 - 1, 0, buf, cmd);
2964 + return iscsit_add_reject_cmd(cmd,
2965 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2966 }
2967 immed_data = cmd->immediate_data;
2968
2969 @@ -1283,8 +1269,8 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
2970
2971 if (!payload_length) {
2972 pr_err("DataOUT payload is ZERO, protocol error.\n");
2973 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2974 - buf, conn);
2975 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2976 + buf);
2977 }
2978
2979 /* iSCSI write */
2980 @@ -1301,8 +1287,8 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
2981 pr_err("DataSegmentLength: %u is greater than"
2982 " MaxXmitDataSegmentLength: %u\n", payload_length,
2983 conn->conn_ops->MaxXmitDataSegmentLength);
2984 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2985 - buf, conn);
2986 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2987 + buf);
2988 }
2989
2990 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
2991 @@ -1325,8 +1311,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
2992 if (cmd->data_direction != DMA_TO_DEVICE) {
2993 pr_err("Command ITT: 0x%08x received DataOUT for a"
2994 " NON-WRITE command.\n", cmd->init_task_tag);
2995 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
2996 - 1, 0, buf, cmd);
2997 + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2998 }
2999 se_cmd = &cmd->se_cmd;
3000 iscsit_mod_dataout_timer(cmd);
3001 @@ -1335,8 +1320,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
3002 pr_err("DataOut Offset: %u, Length %u greater than"
3003 " iSCSI Command EDTL %u, protocol error.\n",
3004 hdr->offset, payload_length, cmd->se_cmd.data_length);
3005 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
3006 - 1, 0, buf, cmd);
3007 + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
3008 }
3009
3010 if (cmd->unsolicited_data) {
3011 @@ -1528,7 +1512,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
3012
3013 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
3014 if (rc < 0)
3015 - return rc;
3016 + return 0;
3017 else if (!cmd)
3018 return 0;
3019
3020 @@ -1557,8 +1541,8 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3021 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
3022 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
3023 " not set, protocol error.\n");
3024 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3025 - buf, conn);
3026 + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
3027 + (unsigned char *)hdr);
3028 }
3029
3030 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
3031 @@ -1566,8 +1550,8 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3032 " greater than MaxXmitDataSegmentLength: %u, protocol"
3033 " error.\n", payload_length,
3034 conn->conn_ops->MaxXmitDataSegmentLength);
3035 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3036 - buf, conn);
3037 + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
3038 + (unsigned char *)hdr);
3039 }
3040
3041 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
3042 @@ -1584,9 +1568,9 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3043 */
3044 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3045 if (!cmd)
3046 - return iscsit_add_reject(
3047 + return iscsit_reject_cmd(cmd,
3048 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3049 - 1, buf, conn);
3050 + (unsigned char *)hdr);
3051
3052 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
3053 cmd->i_state = ISTATE_SEND_NOPIN;
3054 @@ -1700,15 +1684,14 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3055 return 0;
3056 }
3057
3058 - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
3059 + cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
3060 + (unsigned char *)hdr, hdr->cmdsn);
3061 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
3062 ret = 0;
3063 goto ping_out;
3064 }
3065 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
3066 - return iscsit_add_reject_from_cmd(
3067 - ISCSI_REASON_PROTOCOL_ERROR,
3068 - 1, 0, buf, cmd);
3069 + return -1;
3070
3071 return 0;
3072 }
3073 @@ -1757,8 +1740,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3074 struct se_tmr_req *se_tmr;
3075 struct iscsi_tmr_req *tmr_req;
3076 struct iscsi_tm *hdr;
3077 - int out_of_order_cmdsn = 0;
3078 - int ret;
3079 + int out_of_order_cmdsn = 0, ret;
3080 + bool sess_ref = false;
3081 u8 function;
3082
3083 hdr = (struct iscsi_tm *) buf;
3084 @@ -1782,8 +1765,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3085 pr_err("Task Management Request TASK_REASSIGN not"
3086 " issued as immediate command, bad iSCSI Initiator"
3087 "implementation\n");
3088 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
3089 - 1, 1, buf, cmd);
3090 + return iscsit_add_reject_cmd(cmd,
3091 + ISCSI_REASON_PROTOCOL_ERROR, buf);
3092 }
3093 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
3094 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
3095 @@ -1795,9 +1778,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3096 if (!cmd->tmr_req) {
3097 pr_err("Unable to allocate memory for"
3098 " Task Management command!\n");
3099 - return iscsit_add_reject_from_cmd(
3100 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3101 - 1, 1, buf, cmd);
3102 + return iscsit_add_reject_cmd(cmd,
3103 + ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3104 + buf);
3105 }
3106
3107 /*
3108 @@ -1814,6 +1797,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3109 conn->sess->se_sess, 0, DMA_NONE,
3110 MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
3111
3112 + target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
3113 + sess_ref = true;
3114 +
3115 switch (function) {
3116 case ISCSI_TM_FUNC_ABORT_TASK:
3117 tcm_function = TMR_ABORT_TASK;
3118 @@ -1839,17 +1825,15 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3119 default:
3120 pr_err("Unknown iSCSI TMR Function:"
3121 " 0x%02x\n", function);
3122 - return iscsit_add_reject_from_cmd(
3123 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3124 - 1, 1, buf, cmd);
3125 + return iscsit_add_reject_cmd(cmd,
3126 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
3127 }
3128
3129 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
3130 tcm_function, GFP_KERNEL);
3131 if (ret < 0)
3132 - return iscsit_add_reject_from_cmd(
3133 - ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3134 - 1, 1, buf, cmd);
3135 + return iscsit_add_reject_cmd(cmd,
3136 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
3137
3138 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
3139 }
3140 @@ -1908,9 +1892,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3141 break;
3142
3143 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
3144 - return iscsit_add_reject_from_cmd(
3145 - ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
3146 - buf, cmd);
3147 + return iscsit_add_reject_cmd(cmd,
3148 + ISCSI_REASON_BOOKMARK_INVALID, buf);
3149 break;
3150 default:
3151 pr_err("Unknown TMR function: 0x%02x, protocol"
3152 @@ -1928,15 +1911,13 @@ attach:
3153 spin_unlock_bh(&conn->cmd_lock);
3154
3155 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
3156 - int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
3157 + int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
3158 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
3159 out_of_order_cmdsn = 1;
3160 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
3161 return 0;
3162 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
3163 - return iscsit_add_reject_from_cmd(
3164 - ISCSI_REASON_PROTOCOL_ERROR,
3165 - 1, 0, buf, cmd);
3166 + return -1;
3167 }
3168 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
3169
3170 @@ -1956,6 +1937,11 @@ attach:
3171 * For connection recovery, this is also the default action for
3172 * TMR TASK_REASSIGN.
3173 */
3174 + if (sess_ref) {
3175 + pr_debug("Handle TMR, using sess_ref=true check\n");
3176 + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3177 + }
3178 +
3179 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
3180 return 0;
3181 }
3182 @@ -1981,8 +1967,7 @@ static int iscsit_handle_text_cmd(
3183 pr_err("Unable to accept text parameter length: %u"
3184 "greater than MaxXmitDataSegmentLength %u.\n",
3185 payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
3186 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3187 - buf, conn);
3188 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
3189 }
3190
3191 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
3192 @@ -2084,8 +2069,8 @@ static int iscsit_handle_text_cmd(
3193
3194 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3195 if (!cmd)
3196 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3197 - 1, buf, conn);
3198 + return iscsit_add_reject(conn,
3199 + ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
3200
3201 cmd->iscsi_opcode = ISCSI_OP_TEXT;
3202 cmd->i_state = ISTATE_SEND_TEXTRSP;
3203 @@ -2103,11 +2088,10 @@ static int iscsit_handle_text_cmd(
3204 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
3205
3206 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
3207 - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
3208 + cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
3209 + (unsigned char *)hdr, hdr->cmdsn);
3210 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
3211 - return iscsit_add_reject_from_cmd(
3212 - ISCSI_REASON_PROTOCOL_ERROR,
3213 - 1, 0, buf, cmd);
3214 + return -1;
3215
3216 return 0;
3217 }
3218 @@ -2292,14 +2276,11 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3219 if (ret < 0)
3220 return ret;
3221 } else {
3222 - cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
3223 - if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
3224 + cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
3225 + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
3226 logout_remove = 0;
3227 - } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
3228 - return iscsit_add_reject_from_cmd(
3229 - ISCSI_REASON_PROTOCOL_ERROR,
3230 - 1, 0, buf, cmd);
3231 - }
3232 + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
3233 + return -1;
3234 }
3235
3236 return logout_remove;
3237 @@ -2323,8 +2304,8 @@ static int iscsit_handle_snack(
3238 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3239 pr_err("Initiator sent SNACK request while in"
3240 " ErrorRecoveryLevel=0.\n");
3241 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3242 - buf, conn);
3243 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3244 + buf);
3245 }
3246 /*
3247 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
3248 @@ -2348,13 +2329,13 @@ static int iscsit_handle_snack(
3249 case ISCSI_FLAG_SNACK_TYPE_RDATA:
3250 /* FIXME: Support R-Data SNACK */
3251 pr_err("R-Data SNACK Not Supported.\n");
3252 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3253 - buf, conn);
3254 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3255 + buf);
3256 default:
3257 pr_err("Unknown SNACK type 0x%02x, protocol"
3258 " error.\n", hdr->flags & 0x0f);
3259 - return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3260 - buf, conn);
3261 + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3262 + buf);
3263 }
3264
3265 return 0;
3266 @@ -2426,14 +2407,14 @@ static int iscsit_handle_immediate_data(
3267 pr_err("Unable to recover from"
3268 " Immediate Data digest failure while"
3269 " in ERL=0.\n");
3270 - iscsit_add_reject_from_cmd(
3271 + iscsit_reject_cmd(cmd,
3272 ISCSI_REASON_DATA_DIGEST_ERROR,
3273 - 1, 0, (unsigned char *)hdr, cmd);
3274 + (unsigned char *)hdr);
3275 return IMMEDIATE_DATA_CANNOT_RECOVER;
3276 } else {
3277 - iscsit_add_reject_from_cmd(
3278 + iscsit_reject_cmd(cmd,
3279 ISCSI_REASON_DATA_DIGEST_ERROR,
3280 - 0, 0, (unsigned char *)hdr, cmd);
3281 + (unsigned char *)hdr);
3282 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
3283 }
3284 } else {
3285 @@ -3533,6 +3514,7 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3286 struct iscsi_reject *hdr)
3287 {
3288 hdr->opcode = ISCSI_OP_REJECT;
3289 + hdr->reason = cmd->reject_reason;
3290 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3291 hton24(hdr->dlength, ISCSI_HDR_LEN);
3292 hdr->ffffffff = cpu_to_be32(0xffffffff);
3293 @@ -3806,18 +3788,11 @@ check_rsp_state:
3294 case ISTATE_SEND_STATUS_RECOVERY:
3295 case ISTATE_SEND_TEXTRSP:
3296 case ISTATE_SEND_TASKMGTRSP:
3297 + case ISTATE_SEND_REJECT:
3298 spin_lock_bh(&cmd->istate_lock);
3299 cmd->i_state = ISTATE_SENT_STATUS;
3300 spin_unlock_bh(&cmd->istate_lock);
3301 break;
3302 - case ISTATE_SEND_REJECT:
3303 - if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3304 - cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3305 - complete(&cmd->reject_comp);
3306 - goto err;
3307 - }
3308 - complete(&cmd->reject_comp);
3309 - break;
3310 default:
3311 pr_err("Unknown Opcode: 0x%02x ITT:"
3312 " 0x%08x, i_state: %d on CID: %hu\n",
3313 @@ -3922,8 +3897,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3314 case ISCSI_OP_SCSI_CMD:
3315 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3316 if (!cmd)
3317 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3318 - 1, buf, conn);
3319 + goto reject;
3320
3321 ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3322 break;
3323 @@ -3935,16 +3909,14 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3324 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3325 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3326 if (!cmd)
3327 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3328 - 1, buf, conn);
3329 + goto reject;
3330 }
3331 ret = iscsit_handle_nop_out(conn, cmd, buf);
3332 break;
3333 case ISCSI_OP_SCSI_TMFUNC:
3334 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3335 if (!cmd)
3336 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3337 - 1, buf, conn);
3338 + goto reject;
3339
3340 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
3341 break;
3342 @@ -3954,8 +3926,7 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3343 case ISCSI_OP_LOGOUT:
3344 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
3345 if (!cmd)
3346 - return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3347 - 1, buf, conn);
3348 + goto reject;
3349
3350 ret = iscsit_handle_logout_cmd(conn, cmd, buf);
3351 if (ret > 0)
3352 @@ -3987,6 +3958,8 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
3353 }
3354
3355 return ret;
3356 +reject:
3357 + return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
3358 }
3359
3360 int iscsi_target_rx_thread(void *arg)
3361 @@ -4086,8 +4059,8 @@ restart:
3362 (!(opcode & ISCSI_OP_LOGOUT)))) {
3363 pr_err("Received illegal iSCSI Opcode: 0x%02x"
3364 " while in Discovery Session, rejecting.\n", opcode);
3365 - iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3366 - buffer, conn);
3367 + iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3368 + buffer);
3369 goto transport_err;
3370 }
3371
3372 diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
3373 index a0050b2..2c437cb 100644
3374 --- a/drivers/target/iscsi/iscsi_target.h
3375 +++ b/drivers/target/iscsi/iscsi_target.h
3376 @@ -15,7 +15,7 @@ extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
3377 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
3378 struct iscsi_portal_group *);
3379 extern int iscsit_del_np(struct iscsi_np *);
3380 -extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
3381 +extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
3382 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
3383 extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
3384 extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
3385 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
3386 index 8d8b3ff..421344d 100644
3387 --- a/drivers/target/iscsi/iscsi_target_configfs.c
3388 +++ b/drivers/target/iscsi/iscsi_target_configfs.c
3389 @@ -474,7 +474,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
3390 if (!capable(CAP_SYS_ADMIN)) \
3391 return -EPERM; \
3392 \
3393 - snprintf(auth->name, PAGE_SIZE, "%s", page); \
3394 + snprintf(auth->name, sizeof(auth->name), "%s", page); \
3395 if (!strncmp("NULL", auth->name, 4)) \
3396 auth->naf_flags &= ~flags; \
3397 else \
3398 diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
3399 index 60ec4b9..8907dcd 100644
3400 --- a/drivers/target/iscsi/iscsi_target_core.h
3401 +++ b/drivers/target/iscsi/iscsi_target_core.h
3402 @@ -132,7 +132,6 @@ enum cmd_flags_table {
3403 ICF_CONTIG_MEMORY = 0x00000020,
3404 ICF_ATTACHED_TO_RQUEUE = 0x00000040,
3405 ICF_OOO_CMDSN = 0x00000080,
3406 - ICF_REJECT_FAIL_CONN = 0x00000100,
3407 };
3408
3409 /* struct iscsi_cmd->i_state */
3410 @@ -366,6 +365,8 @@ struct iscsi_cmd {
3411 u8 maxcmdsn_inc;
3412 /* Immediate Unsolicited Dataout */
3413 u8 unsolicited_data;
3414 + /* Reject reason code */
3415 + u8 reject_reason;
3416 /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
3417 u16 logout_cid;
3418 /* Command flags */
3419 @@ -446,7 +447,6 @@ struct iscsi_cmd {
3420 struct list_head datain_list;
3421 /* R2T List */
3422 struct list_head cmd_r2t_list;
3423 - struct completion reject_comp;
3424 /* Timer for DataOUT */
3425 struct timer_list dataout_timer;
3426 /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
3427 diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
3428 index dcb199d..08bd878 100644
3429 --- a/drivers/target/iscsi/iscsi_target_erl0.c
3430 +++ b/drivers/target/iscsi/iscsi_target_erl0.c
3431 @@ -746,13 +746,12 @@ int iscsit_check_post_dataout(
3432 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3433 pr_err("Unable to recover from DataOUT CRC"
3434 " failure while ERL=0, closing session.\n");
3435 - iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
3436 - 1, 0, buf, cmd);
3437 + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
3438 + buf);
3439 return DATAOUT_CANNOT_RECOVER;
3440 }
3441
3442 - iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
3443 - 0, 0, buf, cmd);
3444 + iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
3445 return iscsit_dataout_post_crc_failed(cmd, buf);
3446 }
3447 }
3448 @@ -909,6 +908,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
3449 wait_for_completion(&conn->conn_wait_comp);
3450 complete(&conn->conn_post_wait_comp);
3451 }
3452 +EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
3453
3454 void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
3455 {
3456 diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
3457 index 40d9dbc..586c268 100644
3458 --- a/drivers/target/iscsi/iscsi_target_erl1.c
3459 +++ b/drivers/target/iscsi/iscsi_target_erl1.c
3460 @@ -162,9 +162,8 @@ static int iscsit_handle_r2t_snack(
3461 " protocol error.\n", cmd->init_task_tag, begrun,
3462 (begrun + runlength), cmd->acked_data_sn);
3463
3464 - return iscsit_add_reject_from_cmd(
3465 - ISCSI_REASON_PROTOCOL_ERROR,
3466 - 1, 0, buf, cmd);
3467 + return iscsit_reject_cmd(cmd,
3468 + ISCSI_REASON_PROTOCOL_ERROR, buf);
3469 }
3470
3471 if (runlength) {
3472 @@ -173,8 +172,8 @@ static int iscsit_handle_r2t_snack(
3473 " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
3474 " current R2TSN: 0x%08x, protocol error.\n",
3475 cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
3476 - return iscsit_add_reject_from_cmd(
3477 - ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
3478 + return iscsit_reject_cmd(cmd,
3479 + ISCSI_REASON_BOOKMARK_INVALID, buf);
3480 }
3481 last_r2tsn = (begrun + runlength);
3482 } else
3483 @@ -433,8 +432,7 @@ static int iscsit_handle_recovery_datain(
3484 " protocol error.\n", cmd->init_task_tag, begrun,
3485 (begrun + runlength), cmd->acked_data_sn);
3486
3487 - return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
3488 - 1, 0, buf, cmd);
3489 + return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
3490 }
3491
3492 /*
3493 @@ -445,14 +443,14 @@ static int iscsit_handle_recovery_datain(
3494 pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
3495 ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
3496 begrun, runlength, (cmd->data_sn - 1));
3497 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
3498 - 1, 0, buf, cmd);
3499 + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID,
3500 + buf);
3501 }
3502
3503 dr = iscsit_allocate_datain_req();
3504 if (!dr)
3505 - return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3506 - 1, 0, buf, cmd);
3507 + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
3508 + buf);
3509
3510 dr->data_sn = dr->begrun = begrun;
3511 dr->runlength = runlength;
3512 @@ -1090,7 +1088,7 @@ int iscsit_handle_ooo_cmdsn(
3513
3514 ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
3515 if (!ooo_cmdsn)
3516 - return CMDSN_ERROR_CANNOT_RECOVER;
3517 + return -ENOMEM;
3518
3519 ooo_cmdsn->cmd = cmd;
3520 ooo_cmdsn->batch_count = (batch) ?
3521 @@ -1101,10 +1099,10 @@ int iscsit_handle_ooo_cmdsn(
3522
3523 if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
3524 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
3525 - return CMDSN_ERROR_CANNOT_RECOVER;
3526 + return -ENOMEM;
3527 }
3528
3529 - return CMDSN_HIGHER_THAN_EXP;
3530 + return 0;
3531 }
3532
3533 static int iscsit_set_dataout_timeout_values(
3534 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
3535 index 08a3bac..96e7fdb 100644
3536 --- a/drivers/target/iscsi/iscsi_target_util.c
3537 +++ b/drivers/target/iscsi/iscsi_target_util.c
3538 @@ -178,7 +178,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
3539 INIT_LIST_HEAD(&cmd->i_conn_node);
3540 INIT_LIST_HEAD(&cmd->datain_list);
3541 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
3542 - init_completion(&cmd->reject_comp);
3543 spin_lock_init(&cmd->datain_lock);
3544 spin_lock_init(&cmd->dataout_timeout_lock);
3545 spin_lock_init(&cmd->istate_lock);
3546 @@ -284,13 +283,12 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
3547 * Commands may be received out of order if MC/S is in use.
3548 * Ensure they are executed in CmdSN order.
3549 */
3550 -int iscsit_sequence_cmd(
3551 - struct iscsi_conn *conn,
3552 - struct iscsi_cmd *cmd,
3553 - __be32 cmdsn)
3554 +int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3555 + unsigned char *buf, __be32 cmdsn)
3556 {
3557 - int ret;
3558 - int cmdsn_ret;
3559 + int ret, cmdsn_ret;
3560 + bool reject = false;
3561 + u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
3562
3563 mutex_lock(&conn->sess->cmdsn_mutex);
3564
3565 @@ -300,9 +298,19 @@ int iscsit_sequence_cmd(
3566 ret = iscsit_execute_cmd(cmd, 0);
3567 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
3568 iscsit_execute_ooo_cmdsns(conn->sess);
3569 + else if (ret < 0) {
3570 + reject = true;
3571 + ret = CMDSN_ERROR_CANNOT_RECOVER;
3572 + }
3573 break;
3574 case CMDSN_HIGHER_THAN_EXP:
3575 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
3576 + if (ret < 0) {
3577 + reject = true;
3578 + ret = CMDSN_ERROR_CANNOT_RECOVER;
3579 + break;
3580 + }
3581 + ret = CMDSN_HIGHER_THAN_EXP;
3582 break;
3583 case CMDSN_LOWER_THAN_EXP:
3584 cmd->i_state = ISTATE_REMOVE;
3585 @@ -310,11 +318,16 @@ int iscsit_sequence_cmd(
3586 ret = cmdsn_ret;
3587 break;
3588 default:
3589 + reason = ISCSI_REASON_PROTOCOL_ERROR;
3590 + reject = true;
3591 ret = cmdsn_ret;
3592 break;
3593 }
3594 mutex_unlock(&conn->sess->cmdsn_mutex);
3595
3596 + if (reject)
3597 + iscsit_reject_cmd(cmd, reason, buf);
3598 +
3599 return ret;
3600 }
3601 EXPORT_SYMBOL(iscsit_sequence_cmd);
3602 diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
3603 index a442265..e4fc34a 100644
3604 --- a/drivers/target/iscsi/iscsi_target_util.h
3605 +++ b/drivers/target/iscsi/iscsi_target_util.h
3606 @@ -13,7 +13,8 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
3607 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
3608 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
3609 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
3610 -int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, __be32 cmdsn);
3611 +extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3612 + unsigned char * ,__be32 cmdsn);
3613 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
3614 extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
3615 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
3616 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
3617 index 121aeb9..f597e88 100644
3618 --- a/drivers/tty/tty_port.c
3619 +++ b/drivers/tty/tty_port.c
3620 @@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
3621 {
3622 struct tty_struct *tty = tty_port_tty_get(port);
3623
3624 - if (tty && (!check_clocal || !C_CLOCAL(tty))) {
3625 + if (tty && (!check_clocal || !C_CLOCAL(tty)))
3626 tty_hangup(tty);
3627 - tty_kref_put(tty);
3628 - }
3629 + tty_kref_put(tty);
3630 }
3631 EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
3632
3633 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3634 index feef935..b93fc88 100644
3635 --- a/drivers/usb/core/hub.c
3636 +++ b/drivers/usb/core/hub.c
3637 @@ -668,6 +668,15 @@ resubmit:
3638 static inline int
3639 hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
3640 {
3641 + /* Need to clear both directions for control ep */
3642 + if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
3643 + USB_ENDPOINT_XFER_CONTROL) {
3644 + int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
3645 + HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
3646 + devinfo ^ 0x8000, tt, NULL, 0, 1000);
3647 + if (status)
3648 + return status;
3649 + }
3650 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
3651 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
3652 tt, NULL, 0, 1000);
3653 @@ -2846,6 +2855,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
3654 USB_CTRL_SET_TIMEOUT);
3655 }
3656
3657 +/* Count of wakeup-enabled devices at or below udev */
3658 +static unsigned wakeup_enabled_descendants(struct usb_device *udev)
3659 +{
3660 + struct usb_hub *hub = usb_hub_to_struct_hub(udev);
3661 +
3662 + return udev->do_remote_wakeup +
3663 + (hub ? hub->wakeup_enabled_descendants : 0);
3664 +}
3665 +
3666 /*
3667 * usb_port_suspend - suspend a usb device's upstream port
3668 * @udev: device that's no longer in active use, not a root hub
3669 @@ -2886,8 +2904,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
3670 * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
3671 * timer, no SRP, no requests through sysfs.
3672 *
3673 - * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get
3674 - * suspended only when their bus goes into global suspend (i.e., the root
3675 + * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
3676 + * suspended until their bus goes into global suspend (i.e., the root
3677 * hub is suspended). Nevertheless, we change @udev->state to
3678 * USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
3679 * upstream port setting is stored in @udev->port_is_suspended.
3680 @@ -2958,15 +2976,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3681 /* see 7.1.7.6 */
3682 if (hub_is_superspeed(hub->hdev))
3683 status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
3684 - else if (PMSG_IS_AUTO(msg))
3685 - status = set_port_feature(hub->hdev, port1,
3686 - USB_PORT_FEAT_SUSPEND);
3687 +
3688 /*
3689 * For system suspend, we do not need to enable the suspend feature
3690 * on individual USB-2 ports. The devices will automatically go
3691 * into suspend a few ms after the root hub stops sending packets.
3692 * The USB 2.0 spec calls this "global suspend".
3693 + *
3694 + * However, many USB hubs have a bug: They don't relay wakeup requests
3695 + * from a downstream port if the port's suspend feature isn't on.
3696 + * Therefore we will turn on the suspend feature if udev or any of its
3697 + * descendants is enabled for remote wakeup.
3698 */
3699 + else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
3700 + status = set_port_feature(hub->hdev, port1,
3701 + USB_PORT_FEAT_SUSPEND);
3702 else {
3703 really_suspend = false;
3704 status = 0;
3705 @@ -3001,15 +3025,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3706 if (!PMSG_IS_AUTO(msg))
3707 status = 0;
3708 } else {
3709 - /* device has up to 10 msec to fully suspend */
3710 dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
3711 (PMSG_IS_AUTO(msg) ? "auto-" : ""),
3712 udev->do_remote_wakeup);
3713 - usb_set_device_state(udev, USB_STATE_SUSPENDED);
3714 if (really_suspend) {
3715 udev->port_is_suspended = 1;
3716 +
3717 + /* device has up to 10 msec to fully suspend */
3718 msleep(10);
3719 }
3720 + usb_set_device_state(udev, USB_STATE_SUSPENDED);
3721 }
3722
3723 /*
3724 @@ -3291,7 +3316,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3725 unsigned port1;
3726 int status;
3727
3728 - /* Warn if children aren't already suspended */
3729 + /*
3730 + * Warn if children aren't already suspended.
3731 + * Also, add up the number of wakeup-enabled descendants.
3732 + */
3733 + hub->wakeup_enabled_descendants = 0;
3734 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
3735 struct usb_device *udev;
3736
3737 @@ -3301,6 +3330,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
3738 if (PMSG_IS_AUTO(msg))
3739 return -EBUSY;
3740 }
3741 + if (udev)
3742 + hub->wakeup_enabled_descendants +=
3743 + wakeup_enabled_descendants(udev);
3744 }
3745
3746 if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
3747 diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
3748 index 80ab9ee..f608b39 100644
3749 --- a/drivers/usb/core/hub.h
3750 +++ b/drivers/usb/core/hub.h
3751 @@ -59,6 +59,9 @@ struct usb_hub {
3752 struct usb_tt tt; /* Transaction Translator */
3753
3754 unsigned mA_per_port; /* current for each child */
3755 +#ifdef CONFIG_PM
3756 + unsigned wakeup_enabled_descendants;
3757 +#endif
3758
3759 unsigned limited_power:1;
3760 unsigned quiescing:1;
3761 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3762 index c35d49d..358375e 100644
3763 --- a/drivers/usb/dwc3/core.c
3764 +++ b/drivers/usb/dwc3/core.c
3765 @@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev)
3766 }
3767
3768 if (IS_ERR(dwc->usb3_phy)) {
3769 - ret = PTR_ERR(dwc->usb2_phy);
3770 + ret = PTR_ERR(dwc->usb3_phy);
3771
3772 /*
3773 * if -ENXIO is returned, it means PHY layer wasn't
3774 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
3775 index b69d322..27dad99 100644
3776 --- a/drivers/usb/dwc3/core.h
3777 +++ b/drivers/usb/dwc3/core.h
3778 @@ -759,8 +759,8 @@ struct dwc3 {
3779
3780 struct dwc3_event_type {
3781 u32 is_devspec:1;
3782 - u32 type:6;
3783 - u32 reserved8_31:25;
3784 + u32 type:7;
3785 + u32 reserved8_31:24;
3786 } __packed;
3787
3788 #define DWC3_DEPEVT_XFERCOMPLETE 0x01
3789 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3790 index b5e5b35..f77083f 100644
3791 --- a/drivers/usb/dwc3/gadget.c
3792 +++ b/drivers/usb/dwc3/gadget.c
3793 @@ -1584,6 +1584,7 @@ err1:
3794 __dwc3_gadget_ep_disable(dwc->eps[0]);
3795
3796 err0:
3797 + dwc->gadget_driver = NULL;
3798 spin_unlock_irqrestore(&dwc->lock, flags);
3799
3800 return ret;
3801 diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
3802 index ffd8fa5..5514822 100644
3803 --- a/drivers/usb/gadget/udc-core.c
3804 +++ b/drivers/usb/gadget/udc-core.c
3805 @@ -105,7 +105,7 @@ void usb_gadget_set_state(struct usb_gadget *gadget,
3806 enum usb_device_state state)
3807 {
3808 gadget->state = state;
3809 - sysfs_notify(&gadget->dev.kobj, NULL, "status");
3810 + sysfs_notify(&gadget->dev.kobj, NULL, "state");
3811 }
3812 EXPORT_SYMBOL_GPL(usb_gadget_set_state);
3813
3814 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
3815 index 9ab4a4d..ca6289b 100644
3816 --- a/drivers/usb/host/ehci-hub.c
3817 +++ b/drivers/usb/host/ehci-hub.c
3818 @@ -858,6 +858,7 @@ static int ehci_hub_control (
3819 ehci->reset_done[wIndex] = jiffies
3820 + msecs_to_jiffies(20);
3821 usb_hcd_start_port_resume(&hcd->self, wIndex);
3822 + set_bit(wIndex, &ehci->resuming_ports);
3823 /* check the port again */
3824 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
3825 ehci->reset_done[wIndex]);
3826 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3827 index cc24e39..f00cb20 100644
3828 --- a/drivers/usb/host/xhci-pci.c
3829 +++ b/drivers/usb/host/xhci-pci.c
3830 @@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3831 }
3832 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3833 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
3834 - xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
3835 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
3836 xhci->limit_active_eps = 64;
3837 xhci->quirks |= XHCI_SW_BW_CHECKING;
3838 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3839 index 1969c00..cc3bfc5 100644
3840 --- a/drivers/usb/host/xhci-ring.c
3841 +++ b/drivers/usb/host/xhci-ring.c
3842 @@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
3843
3844 /* A ring has pending URBs if its TD list is not empty */
3845 if (!(ep->ep_state & EP_HAS_STREAMS)) {
3846 - if (!(list_empty(&ep->ring->td_list)))
3847 + if (ep->ring && !(list_empty(&ep->ring->td_list)))
3848 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
3849 return;
3850 }
3851 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3852 index d8f640b..9a550b6 100644
3853 --- a/drivers/usb/host/xhci.c
3854 +++ b/drivers/usb/host/xhci.c
3855 @@ -1171,9 +1171,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
3856 }
3857
3858 xhci = hcd_to_xhci(hcd);
3859 - if (xhci->xhc_state & XHCI_STATE_HALTED)
3860 - return -ENODEV;
3861 -
3862 if (check_virt_dev) {
3863 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
3864 printk(KERN_DEBUG "xHCI %s called with unaddressed "
3865 @@ -1189,6 +1186,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
3866 }
3867 }
3868
3869 + if (xhci->xhc_state & XHCI_STATE_HALTED)
3870 + return -ENODEV;
3871 +
3872 return 1;
3873 }
3874
3875 @@ -4697,6 +4697,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
3876
3877 get_quirks(dev, xhci);
3878
3879 + /* In xhci controllers which follow xhci 1.0 spec gives a spurious
3880 + * success event after a short transfer. This quirk will ignore such
3881 + * spurious event.
3882 + */
3883 + if (xhci->hci_version > 0x96)
3884 + xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
3885 +
3886 /* Make sure the HC is halted. */
3887 retval = xhci_halt(xhci);
3888 if (retval)
3889 diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
3890 index c21386e..de98906 100644
3891 --- a/drivers/usb/misc/sisusbvga/sisusb.c
3892 +++ b/drivers/usb/misc/sisusbvga/sisusb.c
3893 @@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
3894 { USB_DEVICE(0x0711, 0x0903) },
3895 { USB_DEVICE(0x0711, 0x0918) },
3896 { USB_DEVICE(0x0711, 0x0920) },
3897 + { USB_DEVICE(0x0711, 0x0950) },
3898 { USB_DEVICE(0x182d, 0x021c) },
3899 { USB_DEVICE(0x182d, 0x0269) },
3900 { }
3901 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3902 index 7260ec6..b65e657 100644
3903 --- a/drivers/usb/serial/ftdi_sio.c
3904 +++ b/drivers/usb/serial/ftdi_sio.c
3905 @@ -735,9 +735,34 @@ static struct usb_device_id id_table_combined [] = {
3906 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
3907 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
3908 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
3909 - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
3910 - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
3911 - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
3912 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
3913 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
3914 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
3915 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
3916 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
3917 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
3918 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
3919 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
3920 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
3921 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
3922 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
3923 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
3924 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
3925 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
3926 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
3927 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
3928 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
3929 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
3930 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
3931 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
3932 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
3933 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
3934 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
3935 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
3936 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
3937 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
3938 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
3939 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
3940 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
3941 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
3942 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
3943 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3944 index 6dd7925..1b8af46 100644
3945 --- a/drivers/usb/serial/ftdi_sio_ids.h
3946 +++ b/drivers/usb/serial/ftdi_sio_ids.h
3947 @@ -815,11 +815,35 @@
3948 /*
3949 * RT Systems programming cables for various ham radios
3950 */
3951 -#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
3952 -#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
3953 -#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
3954 -#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
3955 -
3956 +#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
3957 +#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
3958 +#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
3959 +#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
3960 +#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
3961 +#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
3962 +#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
3963 +#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
3964 +#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
3965 +#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
3966 +#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
3967 +#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
3968 +#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
3969 +#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
3970 +#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
3971 +#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
3972 +#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
3973 +#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
3974 +#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
3975 +#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
3976 +#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
3977 +#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
3978 +#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
3979 +#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
3980 +#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
3981 +#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
3982 +#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
3983 +#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
3984 +#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
3985
3986 /*
3987 * Physik Instrumente
3988 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
3989 index 7e99808..62b86a6 100644
3990 --- a/drivers/usb/serial/mos7840.c
3991 +++ b/drivers/usb/serial/mos7840.c
3992 @@ -914,20 +914,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
3993 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
3994 if (status < 0) {
3995 dev_dbg(&port->dev, "Reading Spreg failed\n");
3996 - return -1;
3997 + goto err;
3998 }
3999 Data |= 0x80;
4000 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
4001 if (status < 0) {
4002 dev_dbg(&port->dev, "writing Spreg failed\n");
4003 - return -1;
4004 + goto err;
4005 }
4006
4007 Data &= ~0x80;
4008 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
4009 if (status < 0) {
4010 dev_dbg(&port->dev, "writing Spreg failed\n");
4011 - return -1;
4012 + goto err;
4013 }
4014 /* End of block to be checked */
4015
4016 @@ -936,7 +936,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
4017 &Data);
4018 if (status < 0) {
4019 dev_dbg(&port->dev, "Reading Controlreg failed\n");
4020 - return -1;
4021 + goto err;
4022 }
4023 Data |= 0x08; /* Driver done bit */
4024 Data |= 0x20; /* rx_disable */
4025 @@ -944,7 +944,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
4026 mos7840_port->ControlRegOffset, Data);
4027 if (status < 0) {
4028 dev_dbg(&port->dev, "writing Controlreg failed\n");
4029 - return -1;
4030 + goto err;
4031 }
4032 /* do register settings here */
4033 /* Set all regs to the device default values. */
4034 @@ -955,21 +955,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
4035 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
4036 if (status < 0) {
4037 dev_dbg(&port->dev, "disabling interrupts failed\n");
4038 - return -1;
4039 + goto err;
4040 }
4041 /* Set FIFO_CONTROL_REGISTER to the default value */
4042 Data = 0x00;
4043 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
4044 if (status < 0) {
4045 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
4046 - return -1;
4047 + goto err;
4048 }
4049
4050 Data = 0xcf;
4051 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
4052 if (status < 0) {
4053 dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
4054 - return -1;
4055 + goto err;
4056 }
4057
4058 Data = 0x03;
4059 @@ -1114,6 +1114,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
4060 /* mos7840_change_port_settings(mos7840_port,old_termios); */
4061
4062 return 0;
4063 +err:
4064 + for (j = 0; j < NUM_URBS; ++j) {
4065 + urb = mos7840_port->write_urb_pool[j];
4066 + if (!urb)
4067 + continue;
4068 + kfree(urb->transfer_buffer);
4069 + usb_free_urb(urb);
4070 + }
4071 + return status;
4072 }
4073
4074 /*****************************************************************************
4075 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
4076 index e581c25..01f79f1 100644
4077 --- a/drivers/usb/serial/ti_usb_3410_5052.c
4078 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
4079 @@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial)
4080 usb_set_serial_data(serial, tdev);
4081
4082 /* determine device type */
4083 - if (usb_match_id(serial->interface, ti_id_table_3410))
4084 + if (serial->type == &ti_1port_device)
4085 tdev->td_is_3410 = 1;
4086 dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
4087 tdev->td_is_3410 ? "3410" : "5052");
4088 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4089 index 179933528..c015f2c 100644
4090 --- a/drivers/usb/storage/unusual_devs.h
4091 +++ b/drivers/usb/storage/unusual_devs.h
4092 @@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
4093 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4094 US_FL_FIX_INQUIRY ),
4095
4096 +/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
4097 +UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
4098 + "Sony Corp.",
4099 + "MicroVault Flash Drive",
4100 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4101 + US_FL_NO_READ_CAPACITY_16 ),
4102 +
4103 /* floppy reports multiple luns */
4104 UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
4105 "SAMSUNG",
4106 diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
4107 index 45c8efa..34924fb 100644
4108 --- a/drivers/xen/evtchn.c
4109 +++ b/drivers/xen/evtchn.c
4110 @@ -377,18 +377,12 @@ static long evtchn_ioctl(struct file *file,
4111 if (unbind.port >= NR_EVENT_CHANNELS)
4112 break;
4113
4114 - spin_lock_irq(&port_user_lock);
4115 -
4116 rc = -ENOTCONN;
4117 - if (get_port_user(unbind.port) != u) {
4118 - spin_unlock_irq(&port_user_lock);
4119 + if (get_port_user(unbind.port) != u)
4120 break;
4121 - }
4122
4123 disable_irq(irq_from_evtchn(unbind.port));
4124
4125 - spin_unlock_irq(&port_user_lock);
4126 -
4127 evtchn_unbind_from_user(u, unbind.port);
4128
4129 rc = 0;
4130 @@ -488,26 +482,15 @@ static int evtchn_release(struct inode *inode, struct file *filp)
4131 int i;
4132 struct per_user_data *u = filp->private_data;
4133
4134 - spin_lock_irq(&port_user_lock);
4135 -
4136 - free_page((unsigned long)u->ring);
4137 -
4138 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
4139 if (get_port_user(i) != u)
4140 continue;
4141
4142 disable_irq(irq_from_evtchn(i));
4143 - }
4144 -
4145 - spin_unlock_irq(&port_user_lock);
4146 -
4147 - for (i = 0; i < NR_EVENT_CHANNELS; i++) {
4148 - if (get_port_user(i) != u)
4149 - continue;
4150 -
4151 evtchn_unbind_from_user(get_port_user(i), i);
4152 }
4153
4154 + free_page((unsigned long)u->ring);
4155 kfree(u->name);
4156 kfree(u);
4157
4158 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4159 index df472ab..0b272d0 100644
4160 --- a/fs/btrfs/extent-tree.c
4161 +++ b/fs/btrfs/extent-tree.c
4162 @@ -7298,6 +7298,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
4163 int err = 0;
4164 int ret;
4165 int level;
4166 + bool root_dropped = false;
4167
4168 path = btrfs_alloc_path();
4169 if (!path) {
4170 @@ -7355,6 +7356,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
4171 while (1) {
4172 btrfs_tree_lock(path->nodes[level]);
4173 btrfs_set_lock_blocking(path->nodes[level]);
4174 + path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
4175
4176 ret = btrfs_lookup_extent_info(trans, root,
4177 path->nodes[level]->start,
4178 @@ -7370,6 +7372,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
4179 break;
4180
4181 btrfs_tree_unlock(path->nodes[level]);
4182 + path->locks[level] = 0;
4183 WARN_ON(wc->refs[level] != 1);
4184 level--;
4185 }
4186 @@ -7471,12 +7474,22 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
4187 free_extent_buffer(root->commit_root);
4188 kfree(root);
4189 }
4190 + root_dropped = true;
4191 out_end_trans:
4192 btrfs_end_transaction_throttle(trans, tree_root);
4193 out_free:
4194 kfree(wc);
4195 btrfs_free_path(path);
4196 out:
4197 + /*
4198 + * So if we need to stop dropping the snapshot for whatever reason we
4199 + * need to make sure to add it back to the dead root list so that we
4200 + * keep trying to do the work later. This also cleans up roots if we
4201 + * don't have it in the radix (like when we recover after a power fail
4202 + * or unmount) so we don't leak memory.
4203 + */
4204 + if (root_dropped == false)
4205 + btrfs_add_dead_root(root);
4206 if (err)
4207 btrfs_std_error(root->fs_info, err);
4208 return err;
4209 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
4210 index 79bd479..eb84c2d 100644
4211 --- a/fs/btrfs/scrub.c
4212 +++ b/fs/btrfs/scrub.c
4213 @@ -2501,7 +2501,7 @@ again:
4214 ret = scrub_extent(sctx, extent_logical, extent_len,
4215 extent_physical, extent_dev, flags,
4216 generation, extent_mirror_num,
4217 - extent_physical);
4218 + extent_logical - logical + physical);
4219 if (ret)
4220 goto out;
4221
4222 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
4223 index 84ce601..baf149a 100644
4224 --- a/fs/nfsd/vfs.c
4225 +++ b/fs/nfsd/vfs.c
4226 @@ -802,9 +802,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
4227 flags = O_WRONLY|O_LARGEFILE;
4228 }
4229 *filp = dentry_open(&path, flags, current_cred());
4230 - if (IS_ERR(*filp))
4231 + if (IS_ERR(*filp)) {
4232 host_err = PTR_ERR(*filp);
4233 - else {
4234 + *filp = NULL;
4235 + } else {
4236 host_err = ima_file_check(*filp, may_flags);
4237
4238 if (may_flags & NFSD_MAY_64BIT_COOKIE)
4239 diff --git a/fs/super.c b/fs/super.c
4240 index 7465d43..68307c0 100644
4241 --- a/fs/super.c
4242 +++ b/fs/super.c
4243 @@ -336,19 +336,19 @@ EXPORT_SYMBOL(deactivate_super);
4244 * and want to turn it into a full-blown active reference. grab_super()
4245 * is called with sb_lock held and drops it. Returns 1 in case of
4246 * success, 0 if we had failed (superblock contents was already dead or
4247 - * dying when grab_super() had been called).
4248 + * dying when grab_super() had been called). Note that this is only
4249 + * called for superblocks not in rundown mode (== ones still on ->fs_supers
4250 + * of their type), so increment of ->s_count is OK here.
4251 */
4252 static int grab_super(struct super_block *s) __releases(sb_lock)
4253 {
4254 - if (atomic_inc_not_zero(&s->s_active)) {
4255 - spin_unlock(&sb_lock);
4256 - return 1;
4257 - }
4258 - /* it's going away */
4259 s->s_count++;
4260 spin_unlock(&sb_lock);
4261 - /* wait for it to die */
4262 down_write(&s->s_umount);
4263 + if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
4264 + put_super(s);
4265 + return 1;
4266 + }
4267 up_write(&s->s_umount);
4268 put_super(s);
4269 return 0;
4270 @@ -463,11 +463,6 @@ retry:
4271 destroy_super(s);
4272 s = NULL;
4273 }
4274 - down_write(&old->s_umount);
4275 - if (unlikely(!(old->s_flags & MS_BORN))) {
4276 - deactivate_locked_super(old);
4277 - goto retry;
4278 - }
4279 return old;
4280 }
4281 }
4282 @@ -660,10 +655,10 @@ restart:
4283 if (hlist_unhashed(&sb->s_instances))
4284 continue;
4285 if (sb->s_bdev == bdev) {
4286 - if (grab_super(sb)) /* drops sb_lock */
4287 - return sb;
4288 - else
4289 + if (!grab_super(sb))
4290 goto restart;
4291 + up_write(&sb->s_umount);
4292 + return sb;
4293 }
4294 }
4295 spin_unlock(&sb_lock);
4296 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
4297 index 191501a..217e4b4 100644
4298 --- a/include/linux/firewire.h
4299 +++ b/include/linux/firewire.h
4300 @@ -434,6 +434,7 @@ struct fw_iso_context {
4301 int type;
4302 int channel;
4303 int speed;
4304 + bool drop_overflow_headers;
4305 size_t header_size;
4306 union {
4307 fw_iso_callback_t sc;
4308 diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
4309 index 23a87d0..c5aade5 100644
4310 --- a/include/target/iscsi/iscsi_transport.h
4311 +++ b/include/target/iscsi/iscsi_transport.h
4312 @@ -34,8 +34,6 @@ extern void iscsit_put_transport(struct iscsit_transport *);
4313 /*
4314 * From iscsi_target.c
4315 */
4316 -extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
4317 - struct iscsi_cmd *);
4318 extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
4319 unsigned char *);
4320 extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
4321 @@ -67,6 +65,10 @@ extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
4322 */
4323 extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
4324 /*
4325 + * From iscsi_target_erl0.c
4326 + */
4327 +extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
4328 +/*
4329 * From iscsi_target_erl1.c
4330 */
4331 extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
4332 @@ -80,4 +82,5 @@ extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
4333 * From iscsi_target_util.c
4334 */
4335 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
4336 -extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
4337 +extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
4338 + unsigned char *, __be32);
4339 diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
4340 index d500369..1db453e 100644
4341 --- a/include/uapi/linux/firewire-cdev.h
4342 +++ b/include/uapi/linux/firewire-cdev.h
4343 @@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
4344 * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
4345 * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
4346 * without the interrupt bit set that the kernel's internal buffer for @header
4347 - * is about to overflow. (In the last case, kernels with ABI version < 5 drop
4348 - * header data up to the next interrupt packet.)
4349 + * is about to overflow. (In the last case, ABI versions < 5 drop header data
4350 + * up to the next interrupt packet.)
4351 *
4352 * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
4353 *
4354 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4355 index 0b936d8..f7bc3ce 100644
4356 --- a/kernel/trace/trace.c
4357 +++ b/kernel/trace/trace.c
4358 @@ -1163,18 +1163,17 @@ void tracing_reset_current(int cpu)
4359 tracing_reset(&global_trace.trace_buffer, cpu);
4360 }
4361
4362 +/* Must have trace_types_lock held */
4363 void tracing_reset_all_online_cpus(void)
4364 {
4365 struct trace_array *tr;
4366
4367 - mutex_lock(&trace_types_lock);
4368 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4369 tracing_reset_online_cpus(&tr->trace_buffer);
4370 #ifdef CONFIG_TRACER_MAX_TRACE
4371 tracing_reset_online_cpus(&tr->max_buffer);
4372 #endif
4373 }
4374 - mutex_unlock(&trace_types_lock);
4375 }
4376
4377 #define SAVED_CMDLINES 128
4378 @@ -2956,7 +2955,6 @@ static int tracing_release(struct inode *inode, struct file *file)
4379
4380 iter = m->private;
4381 tr = iter->tr;
4382 - trace_array_put(tr);
4383
4384 mutex_lock(&trace_types_lock);
4385
4386 @@ -2971,6 +2969,9 @@ static int tracing_release(struct inode *inode, struct file *file)
4387 if (!iter->snapshot)
4388 /* reenable tracing if it was previously enabled */
4389 tracing_start_tr(tr);
4390 +
4391 + __trace_array_put(tr);
4392 +
4393 mutex_unlock(&trace_types_lock);
4394
4395 mutex_destroy(&iter->mutex);
4396 @@ -3395,6 +3396,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4397 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4398 {
4399 struct trace_array *tr = inode->i_private;
4400 + int ret;
4401
4402 if (tracing_disabled)
4403 return -ENODEV;
4404 @@ -3402,7 +3404,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
4405 if (trace_array_get(tr) < 0)
4406 return -ENODEV;
4407
4408 - return single_open(file, tracing_trace_options_show, inode->i_private);
4409 + ret = single_open(file, tracing_trace_options_show, inode->i_private);
4410 + if (ret < 0)
4411 + trace_array_put(tr);
4412 +
4413 + return ret;
4414 }
4415
4416 static const struct file_operations tracing_iter_fops = {
4417 @@ -3906,6 +3912,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
4418 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4419 if (!iter) {
4420 ret = -ENOMEM;
4421 + __trace_array_put(tr);
4422 goto out;
4423 }
4424
4425 @@ -4652,21 +4659,24 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
4426 ret = PTR_ERR(iter);
4427 } else {
4428 /* Writes still need the seq_file to hold the private data */
4429 + ret = -ENOMEM;
4430 m = kzalloc(sizeof(*m), GFP_KERNEL);
4431 if (!m)
4432 - return -ENOMEM;
4433 + goto out;
4434 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4435 if (!iter) {
4436 kfree(m);
4437 - return -ENOMEM;
4438 + goto out;
4439 }
4440 + ret = 0;
4441 +
4442 iter->tr = tr;
4443 iter->trace_buffer = &tc->tr->max_buffer;
4444 iter->cpu_file = tc->cpu;
4445 m->private = iter;
4446 file->private_data = m;
4447 }
4448 -
4449 +out:
4450 if (ret < 0)
4451 trace_array_put(tr);
4452
4453 @@ -4896,8 +4906,6 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
4454
4455 mutex_lock(&trace_types_lock);
4456
4457 - tr->ref++;
4458 -
4459 info->iter.tr = tr;
4460 info->iter.cpu_file = tc->cpu;
4461 info->iter.trace = tr->current_trace;
4462 @@ -5276,9 +5284,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
4463 }
4464
4465 static const struct file_operations tracing_stats_fops = {
4466 - .open = tracing_open_generic,
4467 + .open = tracing_open_generic_tc,
4468 .read = tracing_stats_read,
4469 .llseek = generic_file_llseek,
4470 + .release = tracing_release_generic_tc,
4471 };
4472
4473 #ifdef CONFIG_DYNAMIC_FTRACE
4474 @@ -5926,8 +5935,10 @@ static int new_instance_create(const char *name)
4475 goto out_free_tr;
4476
4477 ret = event_trace_add_tracer(tr->dir, tr);
4478 - if (ret)
4479 + if (ret) {
4480 + debugfs_remove_recursive(tr->dir);
4481 goto out_free_tr;
4482 + }
4483
4484 init_tracer_debugfs(tr, tr->dir);
4485
4486 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4487 index 6dfd48b..6953263 100644
4488 --- a/kernel/trace/trace_events.c
4489 +++ b/kernel/trace/trace_events.c
4490 @@ -1221,6 +1221,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
4491
4492 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
4493 static int ftrace_event_set_open(struct inode *inode, struct file *file);
4494 +static int ftrace_event_release(struct inode *inode, struct file *file);
4495
4496 static const struct seq_operations show_event_seq_ops = {
4497 .start = t_start,
4498 @@ -1248,7 +1249,7 @@ static const struct file_operations ftrace_set_event_fops = {
4499 .read = seq_read,
4500 .write = ftrace_event_write,
4501 .llseek = seq_lseek,
4502 - .release = seq_release,
4503 + .release = ftrace_event_release,
4504 };
4505
4506 static const struct file_operations ftrace_enable_fops = {
4507 @@ -1326,6 +1327,15 @@ ftrace_event_open(struct inode *inode, struct file *file,
4508 return ret;
4509 }
4510
4511 +static int ftrace_event_release(struct inode *inode, struct file *file)
4512 +{
4513 + struct trace_array *tr = inode->i_private;
4514 +
4515 + trace_array_put(tr);
4516 +
4517 + return seq_release(inode, file);
4518 +}
4519 +
4520 static int
4521 ftrace_event_avail_open(struct inode *inode, struct file *file)
4522 {
4523 @@ -1339,12 +1349,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
4524 {
4525 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
4526 struct trace_array *tr = inode->i_private;
4527 + int ret;
4528 +
4529 + if (trace_array_get(tr) < 0)
4530 + return -ENODEV;
4531
4532 if ((file->f_mode & FMODE_WRITE) &&
4533 (file->f_flags & O_TRUNC))
4534 ftrace_clear_events(tr);
4535
4536 - return ftrace_event_open(inode, file, seq_ops);
4537 + ret = ftrace_event_open(inode, file, seq_ops);
4538 + if (ret < 0)
4539 + trace_array_put(tr);
4540 + return ret;
4541 }
4542
4543 static struct event_subsystem *
4544 diff --git a/mm/memory.c b/mm/memory.c
4545 index 61a262b..5e50800 100644
4546 --- a/mm/memory.c
4547 +++ b/mm/memory.c
4548 @@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
4549 spinlock_t *ptl;
4550 pte_t *start_pte;
4551 pte_t *pte;
4552 + unsigned long range_start = addr;
4553
4554 again:
4555 init_rss_vec(rss);
4556 @@ -1206,12 +1207,14 @@ again:
4557 force_flush = 0;
4558
4559 #ifdef HAVE_GENERIC_MMU_GATHER
4560 - tlb->start = addr;
4561 - tlb->end = end;
4562 + tlb->start = range_start;
4563 + tlb->end = addr;
4564 #endif
4565 tlb_flush_mmu(tlb);
4566 - if (addr != end)
4567 + if (addr != end) {
4568 + range_start = addr;
4569 goto again;
4570 + }
4571 }
4572
4573 return addr;
4574 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4575 index 7431001..4baf12e 100644
4576 --- a/mm/mempolicy.c
4577 +++ b/mm/mempolicy.c
4578 @@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
4579 if (prev) {
4580 vma = prev;
4581 next = vma->vm_next;
4582 - continue;
4583 + if (mpol_equal(vma_policy(vma), new_pol))
4584 + continue;
4585 + /* vma_merge() joined vma && vma->next, case 8 */
4586 + goto replace;
4587 }
4588 if (vma->vm_start != vmstart) {
4589 err = split_vma(vma->vm_mm, vma, vmstart, 1);
4590 @@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
4591 if (err)
4592 goto out;
4593 }
4594 + replace:
4595 err = vma_replace_policy(vma, new_pol);
4596 if (err)
4597 goto out;
4598 diff --git a/mm/mmap.c b/mm/mmap.c
4599 index f681e18..7dbe397 100644
4600 --- a/mm/mmap.c
4601 +++ b/mm/mmap.c
4602 @@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
4603 if (next->anon_vma)
4604 anon_vma_merge(vma, next);
4605 mm->map_count--;
4606 - vma_set_policy(vma, vma_policy(next));
4607 + mpol_put(vma_policy(next));
4608 kmem_cache_free(vm_area_cachep, next);
4609 /*
4610 * In mprotect's case 6 (see comments on vma_merge),
4611 diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
4612 index 8d2eddd..65b1462 100644
4613 --- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
4614 +++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
4615 @@ -98,6 +98,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
4616 */
4617 static u32 *decode_write_list(u32 *va, u32 *vaend)
4618 {
4619 + unsigned long start, end;
4620 int nchunks;
4621
4622 struct rpcrdma_write_array *ary =
4623 @@ -113,9 +114,12 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
4624 return NULL;
4625 }
4626 nchunks = ntohl(ary->wc_nchunks);
4627 - if (((unsigned long)&ary->wc_array[0] +
4628 - (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
4629 - (unsigned long)vaend) {
4630 +
4631 + start = (unsigned long)&ary->wc_array[0];
4632 + end = (unsigned long)vaend;
4633 + if (nchunks < 0 ||
4634 + nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
4635 + (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
4636 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
4637 ary, nchunks, vaend);
4638 return NULL;
4639 @@ -129,6 +133,7 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
4640
4641 static u32 *decode_reply_array(u32 *va, u32 *vaend)
4642 {
4643 + unsigned long start, end;
4644 int nchunks;
4645 struct rpcrdma_write_array *ary =
4646 (struct rpcrdma_write_array *)va;
4647 @@ -143,9 +148,12 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
4648 return NULL;
4649 }
4650 nchunks = ntohl(ary->wc_nchunks);
4651 - if (((unsigned long)&ary->wc_array[0] +
4652 - (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
4653 - (unsigned long)vaend) {
4654 +
4655 + start = (unsigned long)&ary->wc_array[0];
4656 + end = (unsigned long)vaend;
4657 + if (nchunks < 0 ||
4658 + nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
4659 + (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
4660 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
4661 ary, nchunks, vaend);
4662 return NULL;
4663 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4664 index 403010c..051c03d 100644
4665 --- a/sound/pci/hda/patch_realtek.c
4666 +++ b/sound/pci/hda/patch_realtek.c
4667 @@ -3495,9 +3495,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4668 SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4669 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4670 SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4671 + SND_PCI_QUIRK(0x1028, 0x05f9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4672 + SND_PCI_QUIRK(0x1028, 0x05fb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4673 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4674 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4675 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4676 + SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4677 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
4678 SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
4679 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4680 diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
4681 index 3eeada5..566a367 100644
4682 --- a/sound/soc/codecs/max98088.c
4683 +++ b/sound/soc/codecs/max98088.c
4684 @@ -1612,7 +1612,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
4685
4686 static void max98088_sync_cache(struct snd_soc_codec *codec)
4687 {
4688 - u16 *reg_cache = codec->reg_cache;
4689 + u8 *reg_cache = codec->reg_cache;
4690 int i;
4691
4692 if (!codec->cache_sync)
4693 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
4694 index 92bbfec..ea47938 100644
4695 --- a/sound/soc/codecs/sgtl5000.c
4696 +++ b/sound/soc/codecs/sgtl5000.c
4697 @@ -37,7 +37,7 @@
4698 static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
4699 [SGTL5000_CHIP_CLK_CTRL] = 0x0008,
4700 [SGTL5000_CHIP_I2S_CTRL] = 0x0010,
4701 - [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
4702 + [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
4703 [SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
4704 [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
4705 [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
4706 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
4707 index e971028..730dd0c 100644
4708 --- a/sound/soc/codecs/wm8962.c
4709 +++ b/sound/soc/codecs/wm8962.c
4710 @@ -1600,7 +1600,6 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
4711 struct snd_ctl_elem_value *ucontrol)
4712 {
4713 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
4714 - u16 *reg_cache = codec->reg_cache;
4715 int ret;
4716
4717 /* Apply the update (if any) */
4718 @@ -1609,16 +1608,19 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
4719 return 0;
4720
4721 /* If the left PGA is enabled hit that VU bit... */
4722 - if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
4723 - return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
4724 - reg_cache[WM8962_HPOUTL_VOLUME]);
4725 + ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
4726 + if (ret & WM8962_HPOUTL_PGA_ENA) {
4727 + snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
4728 + snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
4729 + return 1;
4730 + }
4731
4732 /* ...otherwise the right. The VU is stereo. */
4733 - if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
4734 - return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
4735 - reg_cache[WM8962_HPOUTR_VOLUME]);
4736 + if (ret & WM8962_HPOUTR_PGA_ENA)
4737 + snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
4738 + snd_soc_read(codec, WM8962_HPOUTR_VOLUME));
4739
4740 - return 0;
4741 + return 1;
4742 }
4743
4744 /* The VU bits for the speakers are in a different register to the mute
4745 @@ -3374,7 +3376,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
4746 int ret;
4747 struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
4748 struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
4749 - u16 *reg_cache = codec->reg_cache;
4750 int i, trigger, irq_pol;
4751 bool dmicclk, dmicdat;
4752
4753 @@ -3432,8 +3433,9 @@ static int wm8962_probe(struct snd_soc_codec *codec)
4754
4755 /* Put the speakers into mono mode? */
4756 if (pdata->spk_mono)
4757 - reg_cache[WM8962_CLASS_D_CONTROL_2]
4758 - |= WM8962_SPK_MONO;
4759 + snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
4760 + WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
4761 +
4762
4763 /* Micbias setup, detection enable and detection
4764 * threasholds. */
4765 diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
4766 index 2f70ea7..05676c0 100644
4767 --- a/sound/soc/tegra/tegra20_ac97.c
4768 +++ b/sound/soc/tegra/tegra20_ac97.c
4769 @@ -399,9 +399,9 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
4770 ac97->capture_dma_data.slave_id = of_dma[1];
4771
4772 ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1;
4773 - ac97->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4774 - ac97->capture_dma_data.maxburst = 4;
4775 - ac97->capture_dma_data.slave_id = of_dma[0];
4776 + ac97->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4777 + ac97->playback_dma_data.maxburst = 4;
4778 + ac97->playback_dma_data.slave_id = of_dma[1];
4779
4780 ret = snd_soc_register_component(&pdev->dev, &tegra20_ac97_component,
4781 &tegra20_ac97_dai, 1);
4782 diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
4783 index 5eaa12c..551b3c9 100644
4784 --- a/sound/soc/tegra/tegra20_spdif.c
4785 +++ b/sound/soc/tegra/tegra20_spdif.c
4786 @@ -323,8 +323,8 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
4787 }
4788
4789 spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
4790 - spdif->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4791 - spdif->capture_dma_data.maxburst = 4;
4792 + spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4793 + spdif->playback_dma_data.maxburst = 4;
4794 spdif->playback_dma_data.slave_id = dmareq->start;
4795
4796 pm_runtime_enable(&pdev->dev);
4797 diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
4798 index 5a1f648..274e178 100644
4799 --- a/tools/hv/hv_kvp_daemon.c
4800 +++ b/tools/hv/hv_kvp_daemon.c
4801 @@ -1016,9 +1016,10 @@ kvp_get_ip_info(int family, char *if_name, int op,
4802
4803 if (sn_offset == 0)
4804 strcpy(sn_str, cidr_mask);
4805 - else
4806 + else {
4807 + strcat((char *)ip_buffer->sub_net, ";");
4808 strcat(sn_str, cidr_mask);
4809 - strcat((char *)ip_buffer->sub_net, ";");
4810 + }
4811 sn_offset += strlen(sn_str) + 1;
4812 }
4813
4814 diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
4815 index 8ef3bd3..3e89719 100644
4816 --- a/tools/perf/config/utilities.mak
4817 +++ b/tools/perf/config/utilities.mak
4818 @@ -173,7 +173,7 @@ _ge-abspath = $(if $(is-executable),$(1))
4819 # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
4820 #
4821 define get-executable-or-default
4822 -$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
4823 +$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
4824 endef
4825 _ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
4826 _gea_warn = $(warning The path '$(1)' is not executable.)