Magellan Linux

Contents of /trunk/kernel-alx/patches-3.4/0112-3.4.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1946 - (show annotations) (download)
Wed Nov 14 15:25:09 2012 UTC (11 years, 5 months ago) by niro
File size: 61848 byte(s)
3.4.18-alx-r1
1 diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
2 index 9ff4444..c369c9d 100644
3 --- a/arch/arm/plat-omap/include/plat/omap-serial.h
4 +++ b/arch/arm/plat-omap/include/plat/omap-serial.h
5 @@ -42,10 +42,10 @@
6 #define OMAP_UART_WER_MOD_WKUP 0X7F
7
8 /* Enable XON/XOFF flow control on output */
9 -#define OMAP_UART_SW_TX 0x04
10 +#define OMAP_UART_SW_TX 0x8
11
12 /* Enable XON/XOFF flow control on input */
13 -#define OMAP_UART_SW_RX 0x04
14 +#define OMAP_UART_SW_RX 0x2
15
16 #define OMAP_UART_SYSC_RESET 0X07
17 #define OMAP_UART_TCR_TRIG 0X0F
18 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
19 index 73ef56c..bda833c 100644
20 --- a/arch/x86/kernel/alternative.c
21 +++ b/arch/x86/kernel/alternative.c
22 @@ -160,7 +160,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
23 #endif
24
25 #ifdef P6_NOP1
26 -static const unsigned char __initconst_or_module p6nops[] =
27 +static const unsigned char p6nops[] =
28 {
29 P6_NOP1,
30 P6_NOP2,
31 diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
32 index 46b77ed..a7c6d6a 100644
33 --- a/drivers/char/ttyprintk.c
34 +++ b/drivers/char/ttyprintk.c
35 @@ -67,7 +67,7 @@ static int tpk_printk(const unsigned char *buf, int count)
36 tmp[tpk_curr + 1] = '\0';
37 printk(KERN_INFO "%s%s", tpk_tag, tmp);
38 tpk_curr = 0;
39 - if (buf[i + 1] == '\n')
40 + if ((i + 1) < count && buf[i + 1] == '\n')
41 i++;
42 break;
43 case '\n':
44 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
45 index 47408e8..d10c987 100644
46 --- a/drivers/firmware/efivars.c
47 +++ b/drivers/firmware/efivars.c
48 @@ -435,12 +435,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf)
49 if (status != EFI_SUCCESS)
50 return -EIO;
51
52 - if (var->Attributes & 0x1)
53 + if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
54 str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
55 - if (var->Attributes & 0x2)
56 + if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
57 str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
58 - if (var->Attributes & 0x4)
59 + if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
60 str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
61 + if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
62 + str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
63 + if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
64 + str += sprintf(str,
65 + "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
66 + if (var->Attributes &
67 + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
68 + str += sprintf(str,
69 + "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
70 + if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
71 + str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
72 return str - buf;
73 }
74
75 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
76 index 3974c29..69b23c2 100644
77 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
78 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
79 @@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev)
80
81 netif_stop_queue(dev);
82
83 - ipoib_ib_dev_down(dev, 0);
84 + ipoib_ib_dev_down(dev, 1);
85 ipoib_ib_dev_stop(dev, 0);
86
87 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
88 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
89 index 20ebc6f..213965d 100644
90 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
91 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
92 @@ -190,7 +190,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
93
94 mcast->mcmember = *mcmember;
95
96 - /* Set the cached Q_Key before we attach if it's the broadcast group */
97 + /* Set the multicast MTU and cached Q_Key before we attach if it's
98 + * the broadcast group.
99 + */
100 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
101 sizeof (union ib_gid))) {
102 spin_lock_irq(&priv->lock);
103 @@ -198,10 +200,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
104 spin_unlock_irq(&priv->lock);
105 return -EAGAIN;
106 }
107 + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
108 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
109 spin_unlock_irq(&priv->lock);
110 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
111 set_qkey = 1;
112 +
113 + if (!ipoib_cm_admin_enabled(dev)) {
114 + rtnl_lock();
115 + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
116 + rtnl_unlock();
117 + }
118 }
119
120 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
121 @@ -589,14 +598,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
122 return;
123 }
124
125 - priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
126 -
127 - if (!ipoib_cm_admin_enabled(dev)) {
128 - rtnl_lock();
129 - dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
130 - rtnl_unlock();
131 - }
132 -
133 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
134
135 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
136 diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
137 index 1b5b0c7..922d845 100644
138 --- a/drivers/infiniband/ulp/srp/ib_srp.c
139 +++ b/drivers/infiniband/ulp/srp/ib_srp.c
140 @@ -638,9 +638,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
141 struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
142
143 if (scmnd) {
144 + srp_free_req(target, req, scmnd, 0);
145 scmnd->result = DID_RESET << 16;
146 scmnd->scsi_done(scmnd);
147 - srp_free_req(target, req, scmnd, 0);
148 }
149 }
150
151 @@ -1687,6 +1687,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
152 SRP_TSK_ABORT_TASK);
153 srp_free_req(target, req, scmnd, 0);
154 scmnd->result = DID_ABORT << 16;
155 + scmnd->scsi_done(scmnd);
156
157 return SUCCESS;
158 }
159 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
160 index 2e227fb..f220a69 100644
161 --- a/drivers/md/dm-table.c
162 +++ b/drivers/md/dm-table.c
163 @@ -1351,17 +1351,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
164 return q && blk_queue_nonrot(q);
165 }
166
167 -static bool dm_table_is_nonrot(struct dm_table *t)
168 +static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
169 + sector_t start, sector_t len, void *data)
170 +{
171 + struct request_queue *q = bdev_get_queue(dev->bdev);
172 +
173 + return q && !blk_queue_add_random(q);
174 +}
175 +
176 +static bool dm_table_all_devices_attribute(struct dm_table *t,
177 + iterate_devices_callout_fn func)
178 {
179 struct dm_target *ti;
180 unsigned i = 0;
181
182 - /* Ensure that all underlying device are non-rotational. */
183 while (i < dm_table_get_num_targets(t)) {
184 ti = dm_table_get_target(t, i++);
185
186 if (!ti->type->iterate_devices ||
187 - !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
188 + !ti->type->iterate_devices(ti, func, NULL))
189 return 0;
190 }
191
192 @@ -1393,7 +1401,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
193 if (!dm_table_discard_zeroes_data(t))
194 q->limits.discard_zeroes_data = 0;
195
196 - if (dm_table_is_nonrot(t))
197 + /* Ensure that all underlying devices are non-rotational. */
198 + if (dm_table_all_devices_attribute(t, device_is_nonrot))
199 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
200 else
201 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
202 @@ -1401,6 +1410,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
203 dm_table_set_integrity(t);
204
205 /*
206 + * Determine whether or not this queue's I/O timings contribute
207 + * to the entropy pool, Only request-based targets use this.
208 + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
209 + * have it set.
210 + */
211 + if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
212 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
213 +
214 + /*
215 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
216 * visible to other CPUs because, once the flag is set, incoming bios
217 * are processed by request-based dm, which refers to the queue
218 diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
219 index fa365d3..68bf5c3 100644
220 --- a/drivers/md/dm-verity.c
221 +++ b/drivers/md/dm-verity.c
222 @@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
223 v->hash_dev_block_bits = ffs(num) - 1;
224
225 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
226 - num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) !=
227 - (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) {
228 + (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
229 + >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
230 ti->error = "Invalid data blocks";
231 r = -EINVAL;
232 goto bad;
233 @@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
234 }
235
236 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
237 - num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) !=
238 - (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) {
239 + (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
240 + >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
241 ti->error = "Invalid hash start";
242 r = -EINVAL;
243 goto bad;
244 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
245 index e24143c..9ff3019 100644
246 --- a/drivers/md/dm.c
247 +++ b/drivers/md/dm.c
248 @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
249 {
250 int r = error;
251 struct dm_rq_target_io *tio = clone->end_io_data;
252 - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
253 + dm_request_endio_fn rq_end_io = NULL;
254
255 - if (mapped && rq_end_io)
256 - r = rq_end_io(tio->ti, clone, error, &tio->info);
257 + if (tio->ti) {
258 + rq_end_io = tio->ti->type->rq_end_io;
259 +
260 + if (mapped && rq_end_io)
261 + r = rq_end_io(tio->ti, clone, error, &tio->info);
262 + }
263
264 if (r <= 0)
265 /* The target wants to complete the I/O */
266 @@ -1566,15 +1570,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
267 int r, requeued = 0;
268 struct dm_rq_target_io *tio = clone->end_io_data;
269
270 - /*
271 - * Hold the md reference here for the in-flight I/O.
272 - * We can't rely on the reference count by device opener,
273 - * because the device may be closed during the request completion
274 - * when all bios are completed.
275 - * See the comment in rq_completed() too.
276 - */
277 - dm_get(md);
278 -
279 tio->ti = ti;
280 r = ti->type->map_rq(ti, clone, &tio->info);
281 switch (r) {
282 @@ -1606,6 +1601,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
283 return requeued;
284 }
285
286 +static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
287 +{
288 + struct request *clone;
289 +
290 + blk_start_request(orig);
291 + clone = orig->special;
292 + atomic_inc(&md->pending[rq_data_dir(clone)]);
293 +
294 + /*
295 + * Hold the md reference here for the in-flight I/O.
296 + * We can't rely on the reference count by device opener,
297 + * because the device may be closed during the request completion
298 + * when all bios are completed.
299 + * See the comment in rq_completed() too.
300 + */
301 + dm_get(md);
302 +
303 + return clone;
304 +}
305 +
306 /*
307 * q->request_fn for request-based dm.
308 * Called with the queue lock held.
309 @@ -1635,14 +1650,21 @@ static void dm_request_fn(struct request_queue *q)
310 pos = blk_rq_pos(rq);
311
312 ti = dm_table_find_target(map, pos);
313 - BUG_ON(!dm_target_is_valid(ti));
314 + if (!dm_target_is_valid(ti)) {
315 + /*
316 + * Must perform setup, that dm_done() requires,
317 + * before calling dm_kill_unmapped_request
318 + */
319 + DMERR_LIMIT("request attempted access beyond the end of device");
320 + clone = dm_start_request(md, rq);
321 + dm_kill_unmapped_request(clone, -EIO);
322 + continue;
323 + }
324
325 if (ti->type->busy && ti->type->busy(ti))
326 goto delay_and_out;
327
328 - blk_start_request(rq);
329 - clone = rq->special;
330 - atomic_inc(&md->pending[rq_data_dir(clone)]);
331 + clone = dm_start_request(md, rq);
332
333 spin_unlock(q->queue_lock);
334 if (map_request(ti, clone, md))
335 @@ -1662,8 +1684,6 @@ delay_and_out:
336 blk_delay_queue(q, HZ / 10);
337 out:
338 dm_table_put(map);
339 -
340 - return;
341 }
342
343 int dm_underlying_device_busy(struct request_queue *q)
344 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
345 index 0fde9fc..83bab2c 100644
346 --- a/drivers/mtd/ubi/build.c
347 +++ b/drivers/mtd/ubi/build.c
348 @@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
349 struct ubi_volume *vol = ubi->volumes[vol_id];
350 int err, old_reserved_pebs = vol->reserved_pebs;
351
352 + if (ubi->ro_mode) {
353 + ubi_warn("skip auto-resize because of R/O mode");
354 + return 0;
355 + }
356 +
357 /*
358 * Clear the auto-resize flag in the volume in-memory copy of the
359 * volume table, and 'ubi_resize_volume()' will propagate this change
360 diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
361 index 5caa572..957f000 100644
362 --- a/drivers/net/can/mscan/mpc5xxx_can.c
363 +++ b/drivers/net/can/mscan/mpc5xxx_can.c
364 @@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
365
366 if (!clock_name || !strcmp(clock_name, "sys")) {
367 sys_clk = clk_get(&ofdev->dev, "sys_clk");
368 - if (!sys_clk) {
369 + if (IS_ERR(sys_clk)) {
370 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
371 goto exit_unmap;
372 }
373 @@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
374
375 if (clocksrc < 0) {
376 ref_clk = clk_get(&ofdev->dev, "ref_clk");
377 - if (!ref_clk) {
378 + if (IS_ERR(ref_clk)) {
379 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
380 goto exit_unmap;
381 }
382 diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
383 index e44097a..0e7d6c1 100644
384 --- a/drivers/net/wireless/ath/ath9k/pci.c
385 +++ b/drivers/net/wireless/ath/ath9k/pci.c
386 @@ -122,8 +122,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
387 if (!parent)
388 return;
389
390 - if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
391 - /* Bluetooth coexistance requires disabling ASPM. */
392 + if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
393 + (AR_SREV_9285(ah))) {
394 + /* Bluetooth coexistance requires disabling ASPM for AR9285. */
395 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
396 aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
397 pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
398 diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
399 index a98db30..0f30c07 100644
400 --- a/drivers/net/wireless/b43legacy/main.c
401 +++ b/drivers/net/wireless/b43legacy/main.c
402 @@ -3892,6 +3892,8 @@ static void b43legacy_remove(struct ssb_device *dev)
403 cancel_work_sync(&wl->firmware_load);
404
405 B43legacy_WARN_ON(!wl);
406 + if (!wldev->fw.ucode)
407 + return; /* NULL if fw never loaded */
408 if (wl->current_dev == wldev)
409 ieee80211_unregister_hw(wl->hw);
410
411 diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
412 index f8d818a..811ff72 100644
413 --- a/drivers/remoteproc/Kconfig
414 +++ b/drivers/remoteproc/Kconfig
415 @@ -5,6 +5,7 @@ config REMOTEPROC
416 tristate
417 depends on EXPERIMENTAL
418 select FW_CONFIG
419 + select VIRTIO
420
421 config OMAP_REMOTEPROC
422 tristate "OMAP remoteproc support"
423 diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
424 index 7591b97..837cc40 100644
425 --- a/drivers/remoteproc/remoteproc_core.c
426 +++ b/drivers/remoteproc/remoteproc_core.c
427 @@ -643,17 +643,10 @@ static int rproc_handle_carveout(struct rproc *rproc,
428 dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n",
429 rsc->da, rsc->pa, rsc->len, rsc->flags);
430
431 - mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
432 - if (!mapping) {
433 - dev_err(dev, "kzalloc mapping failed\n");
434 - return -ENOMEM;
435 - }
436 -
437 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
438 if (!carveout) {
439 dev_err(dev, "kzalloc carveout failed\n");
440 - ret = -ENOMEM;
441 - goto free_mapping;
442 + return -ENOMEM;
443 }
444
445 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
446 @@ -683,11 +676,18 @@ static int rproc_handle_carveout(struct rproc *rproc,
447 * physical address in this case.
448 */
449 if (rproc->domain) {
450 + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
451 + if (!mapping) {
452 + dev_err(dev, "kzalloc mapping failed\n");
453 + ret = -ENOMEM;
454 + goto dma_free;
455 + }
456 +
457 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
458 rsc->flags);
459 if (ret) {
460 dev_err(dev, "iommu_map failed: %d\n", ret);
461 - goto dma_free;
462 + goto free_mapping;
463 }
464
465 /*
466 @@ -728,12 +728,12 @@ static int rproc_handle_carveout(struct rproc *rproc,
467
468 return 0;
469
470 +free_mapping:
471 + kfree(mapping);
472 dma_free:
473 dma_free_coherent(dev, rsc->len, va, dma);
474 free_carv:
475 kfree(carveout);
476 -free_mapping:
477 - kfree(mapping);
478 return ret;
479 }
480
481 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
482 index 04c5cea..3a8ba3e 100644
483 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
484 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
485 @@ -583,8 +583,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
486 h->state = TPGS_STATE_STANDBY;
487 break;
488 case TPGS_STATE_OFFLINE:
489 - case TPGS_STATE_UNAVAILABLE:
490 - /* Path unusable for unavailable/offline */
491 + /* Path unusable */
492 err = SCSI_DH_DEV_OFFLINED;
493 break;
494 default:
495 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
496 index 7c49e0a..8a5e25d 100644
497 --- a/drivers/scsi/hpsa.c
498 +++ b/drivers/scsi/hpsa.c
499 @@ -2943,7 +2943,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
500 c->Request.Timeout = 0; /* Don't time out */
501 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
502 c->Request.CDB[0] = cmd;
503 - c->Request.CDB[1] = 0x03; /* Reset target above */
504 + c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
505 /* If bytes 4-7 are zero, it means reset the */
506 /* LunID device */
507 c->Request.CDB[4] = 0x00;
508 diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
509 index 3a6c474..337e8b3 100644
510 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
511 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
512 @@ -1541,6 +1541,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
513
514 host_config = &evt_struct->iu.mad.host_config;
515
516 + /* The transport length field is only 16-bit */
517 + length = min(0xffff, length);
518 +
519 /* Set up a lun reset SRP command */
520 memset(host_config, 0x00, sizeof(*host_config));
521 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
522 diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
523 index bc6cf88..4c150df 100644
524 --- a/drivers/scsi/isci/init.c
525 +++ b/drivers/scsi/isci/init.c
526 @@ -481,7 +481,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
527 orom->hdr.version)) {
528 dev_warn(&pdev->dev,
529 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
530 - devm_kfree(&pdev->dev, orom);
531 orom = NULL;
532 break;
533 }
534 diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
535 index 9b8117b..4c66f46 100644
536 --- a/drivers/scsi/isci/probe_roms.c
537 +++ b/drivers/scsi/isci/probe_roms.c
538 @@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
539
540 if (i >= len) {
541 dev_err(&pdev->dev, "oprom parse error\n");
542 - devm_kfree(&pdev->dev, rom);
543 rom = NULL;
544 }
545 pci_unmap_biosrom(oprom);
546 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
547 index bb7c482..08d48a3 100644
548 --- a/drivers/scsi/scsi_sysfs.c
549 +++ b/drivers/scsi/scsi_sysfs.c
550 @@ -1023,33 +1023,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
551 void scsi_remove_target(struct device *dev)
552 {
553 struct Scsi_Host *shost = dev_to_shost(dev->parent);
554 - struct scsi_target *starget, *found;
555 + struct scsi_target *starget, *last = NULL;
556 unsigned long flags;
557
558 - restart:
559 - found = NULL;
560 + /* remove targets being careful to lookup next entry before
561 + * deleting the last
562 + */
563 spin_lock_irqsave(shost->host_lock, flags);
564 list_for_each_entry(starget, &shost->__targets, siblings) {
565 if (starget->state == STARGET_DEL)
566 continue;
567 if (starget->dev.parent == dev || &starget->dev == dev) {
568 - found = starget;
569 - found->reap_ref++;
570 - break;
571 + /* assuming new targets arrive at the end */
572 + starget->reap_ref++;
573 + spin_unlock_irqrestore(shost->host_lock, flags);
574 + if (last)
575 + scsi_target_reap(last);
576 + last = starget;
577 + __scsi_remove_target(starget);
578 + spin_lock_irqsave(shost->host_lock, flags);
579 }
580 }
581 spin_unlock_irqrestore(shost->host_lock, flags);
582
583 - if (found) {
584 - __scsi_remove_target(found);
585 - scsi_target_reap(found);
586 - /* in the case where @dev has multiple starget children,
587 - * continue removing.
588 - *
589 - * FIXME: does such a case exist?
590 - */
591 - goto restart;
592 - }
593 + if (last)
594 + scsi_target_reap(last);
595 }
596 EXPORT_SYMBOL(scsi_remove_target);
597
598 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
599 index a796964..b719460 100644
600 --- a/drivers/staging/comedi/comedi_fops.c
601 +++ b/drivers/staging/comedi/comedi_fops.c
602 @@ -843,7 +843,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
603 ret = -EAGAIN;
604 break;
605 }
606 - ret = s->async->inttrig(dev, s, insn->data[0]);
607 + ret = s->async->inttrig(dev, s, data[0]);
608 if (ret >= 0)
609 ret = 1;
610 break;
611 @@ -1088,7 +1088,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
612 goto cleanup;
613 }
614
615 - kfree(async->cmd.chanlist);
616 async->cmd = user_cmd;
617 async->cmd.data = NULL;
618 /* load channel/gain list */
619 @@ -1833,6 +1832,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s)
620 if (async) {
621 comedi_reset_async_buf(async);
622 async->inttrig = NULL;
623 + kfree(async->cmd.chanlist);
624 + async->cmd.chanlist = NULL;
625 } else {
626 printk(KERN_ERR
627 "BUG: (?) do_become_nonbusy called with async=0\n");
628 diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
629 index 6a79ba1..d7e6341 100644
630 --- a/drivers/staging/comedi/drivers/jr3_pci.c
631 +++ b/drivers/staging/comedi/drivers/jr3_pci.c
632 @@ -905,7 +905,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
633 }
634
635 /* Reset DSP card */
636 - devpriv->iobase->channel[0].reset = 0;
637 + writel(0, &devpriv->iobase->channel[0].reset);
638
639 result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
640 dev_dbg(dev->hw_dev, "Firmare load %d\n", result);
641 diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
642 index 23fc64b..c72128f 100644
643 --- a/drivers/staging/comedi/drivers/s626.c
644 +++ b/drivers/staging/comedi/drivers/s626.c
645 @@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
646 /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
647
648 k->SetMode(dev, k, Setup, TRUE);
649 - Preload(dev, k, *(insn->data));
650 + Preload(dev, k, data[0]);
651 k->PulseIndex(dev, k);
652 SetLatchSource(dev, k, valueSrclatch);
653 k->SetEnable(dev, k, (uint16_t) (enab != 0));
654 diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
655 index fa6dc9c..887a807 100644
656 --- a/drivers/staging/rtl8712/rtl8712_recv.c
657 +++ b/drivers/staging/rtl8712/rtl8712_recv.c
658 @@ -1126,6 +1126,9 @@ static void recv_tasklet(void *priv)
659 recvbuf2recvframe(padapter, pskb);
660 skb_reset_tail_pointer(pskb);
661 pskb->len = 0;
662 - skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
663 + if (!skb_cloned(pskb))
664 + skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
665 + else
666 + consume_skb(pskb);
667 }
668 }
669 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
670 index 42cdafe..b5130c8 100644
671 --- a/drivers/staging/speakup/speakup_soft.c
672 +++ b/drivers/staging/speakup/speakup_soft.c
673 @@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth);
674 static unsigned char get_index(void);
675
676 static struct miscdevice synth_device;
677 -static int initialized;
678 +static int init_pos;
679 static int misc_registered;
680
681 static struct var_t vars[] = {
682 @@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp)
683 unsigned long flags;
684 spk_lock(flags);
685 synth_soft.alive = 0;
686 - initialized = 0;
687 + init_pos = 0;
688 spk_unlock(flags);
689 /* Make sure we let applications go before leaving */
690 speakup_start_ttys();
691 @@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
692 ch = '\x18';
693 } else if (synth_buffer_empty()) {
694 break;
695 - } else if (!initialized) {
696 - if (*init) {
697 - ch = *init;
698 - init++;
699 - } else {
700 - initialized = 1;
701 - }
702 + } else if (init[init_pos]) {
703 + ch = init[init_pos++];
704 } else {
705 ch = synth_buffer_getc();
706 }
707 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
708 index c43b683..90dff82 100644
709 --- a/drivers/tty/n_gsm.c
710 +++ b/drivers/tty/n_gsm.c
711 @@ -875,7 +875,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
712
713 /* dlci->skb is locked by tx_lock */
714 if (dlci->skb == NULL) {
715 - dlci->skb = skb_dequeue(&dlci->skb_list);
716 + dlci->skb = skb_dequeue_tail(&dlci->skb_list);
717 if (dlci->skb == NULL)
718 return 0;
719 first = 1;
720 @@ -899,8 +899,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
721
722 /* FIXME: need a timer or something to kick this so it can't
723 get stuck with no work outstanding and no buffer free */
724 - if (msg == NULL)
725 + if (msg == NULL) {
726 + skb_queue_tail(&dlci->skb_list, dlci->skb);
727 + dlci->skb = NULL;
728 return -ENOMEM;
729 + }
730 dp = msg->data;
731
732 if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
733 @@ -971,16 +974,19 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
734 static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
735 {
736 unsigned long flags;
737 + int sweep;
738
739 spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
740 /* If we have nothing running then we need to fire up */
741 + sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
742 if (dlci->gsm->tx_bytes == 0) {
743 if (dlci->net)
744 gsm_dlci_data_output_framed(dlci->gsm, dlci);
745 else
746 gsm_dlci_data_output(dlci->gsm, dlci);
747 - } else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
748 - gsm_dlci_data_sweep(dlci->gsm);
749 + }
750 + if (sweep)
751 + gsm_dlci_data_sweep(dlci->gsm);
752 spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
753 }
754
755 @@ -1190,6 +1196,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
756 u8 *data, int clen)
757 {
758 u8 buf[1];
759 + unsigned long flags;
760 +
761 switch (command) {
762 case CMD_CLD: {
763 struct gsm_dlci *dlci = gsm->dlci[0];
764 @@ -1215,7 +1223,9 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
765 gsm->constipated = 0;
766 gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
767 /* Kick the link in case it is idling */
768 + spin_lock_irqsave(&gsm->tx_lock, flags);
769 gsm_data_kick(gsm);
770 + spin_unlock_irqrestore(&gsm->tx_lock, flags);
771 break;
772 case CMD_MSC:
773 /* Out of band modem line change indicator for a DLCI */
774 @@ -2377,12 +2387,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
775
776 /* Queue poll */
777 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
778 + spin_lock_irqsave(&gsm->tx_lock, flags);
779 gsm_data_kick(gsm);
780 if (gsm->tx_bytes < TX_THRESH_LO) {
781 - spin_lock_irqsave(&gsm->tx_lock, flags);
782 gsm_dlci_data_sweep(gsm);
783 - spin_unlock_irqrestore(&gsm->tx_lock, flags);
784 }
785 + spin_unlock_irqrestore(&gsm->tx_lock, flags);
786 }
787
788 /**
789 @@ -2889,6 +2899,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
790 gsm = gsm_mux[mux];
791 if (gsm->dead)
792 return -EL2HLT;
793 + /* If DLCI 0 is not yet fully open return an error. This is ok from a locking
794 + perspective as we don't have to worry about this if DLCI0 is lost */
795 + if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
796 + return -EL2NSYNC;
797 dlci = gsm->dlci[line];
798 if (dlci == NULL)
799 dlci = gsm_dlci_alloc(gsm, line);
800 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
801 index 94b6eda..2303a02 100644
802 --- a/drivers/tty/n_tty.c
803 +++ b/drivers/tty/n_tty.c
804 @@ -1727,7 +1727,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
805
806 do_it_again:
807
808 - BUG_ON(!tty->read_buf);
809 + if (WARN_ON(!tty->read_buf))
810 + return -EAGAIN;
811
812 c = job_control(tty, file);
813 if (c < 0)
814 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
815 index 3614973..40747fe 100644
816 --- a/drivers/tty/serial/8250/8250_pci.c
817 +++ b/drivers/tty/serial/8250/8250_pci.c
818 @@ -1125,6 +1125,8 @@ pci_xr17c154_setup(struct serial_private *priv,
819 #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
820 #define PCI_SUBDEVICE_ID_POCTAL232 0x0308
821 #define PCI_SUBDEVICE_ID_POCTAL422 0x0408
822 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
823 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
824 #define PCI_VENDOR_ID_ADVANTECH 0x13fe
825 #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
826 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
827 @@ -3187,8 +3189,11 @@ static struct pci_device_id serial_pci_tbl[] = {
828 * For now just used the hex ID 0x950a.
829 */
830 { PCI_VENDOR_ID_OXSEMI, 0x950a,
831 - PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
832 - pbn_b0_2_115200 },
833 + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00,
834 + 0, 0, pbn_b0_2_115200 },
835 + { PCI_VENDOR_ID_OXSEMI, 0x950a,
836 + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30,
837 + 0, 0, pbn_b0_2_115200 },
838 { PCI_VENDOR_ID_OXSEMI, 0x950a,
839 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
840 pbn_b0_2_1130000 },
841 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
842 index 3d569cd..b69356c 100644
843 --- a/drivers/tty/serial/amba-pl011.c
844 +++ b/drivers/tty/serial/amba-pl011.c
845 @@ -1654,13 +1654,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
846 old_cr &= ~ST_UART011_CR_OVSFACT;
847 }
848
849 + /*
850 + * Workaround for the ST Micro oversampling variants to
851 + * increase the bitrate slightly, by lowering the divisor,
852 + * to avoid delayed sampling of start bit at high speeds,
853 + * else we see data corruption.
854 + */
855 + if (uap->vendor->oversampling) {
856 + if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
857 + quot -= 1;
858 + else if ((baud > 3250000) && (quot > 2))
859 + quot -= 2;
860 + }
861 /* Set baud rate */
862 writew(quot & 0x3f, port->membase + UART011_FBRD);
863 writew(quot >> 6, port->membase + UART011_IBRD);
864
865 /*
866 * ----------v----------v----------v----------v-----
867 - * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
868 + * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
869 + * UART011_FBRD & UART011_IBRD.
870 * ----------^----------^----------^----------^-----
871 */
872 writew(lcr_h, port->membase + uap->lcrh_rx);
873 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
874 index d00b38e..6189923 100644
875 --- a/drivers/tty/serial/omap-serial.c
876 +++ b/drivers/tty/serial/omap-serial.c
877 @@ -649,19 +649,19 @@ serial_omap_configure_xonxoff
878
879 /*
880 * IXON Flag:
881 - * Enable XON/XOFF flow control on output.
882 - * Transmit XON1, XOFF1
883 + * Flow control for OMAP.TX
884 + * OMAP.RX should listen for XON/XOFF
885 */
886 if (termios->c_iflag & IXON)
887 - up->efr |= OMAP_UART_SW_TX;
888 + up->efr |= OMAP_UART_SW_RX;
889
890 /*
891 * IXOFF Flag:
892 - * Enable XON/XOFF flow control on input.
893 - * Receiver compares XON1, XOFF1.
894 + * Flow control for OMAP.RX
895 + * OMAP.TX should send XON/XOFF
896 */
897 if (termios->c_iflag & IXOFF)
898 - up->efr |= OMAP_UART_SW_RX;
899 + up->efr |= OMAP_UART_SW_TX;
900
901 serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
902 serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
903 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
904 index 3b0c4e3..a6d5d51 100644
905 --- a/drivers/tty/vt/keyboard.c
906 +++ b/drivers/tty/vt/keyboard.c
907 @@ -1053,13 +1053,10 @@ static int kbd_update_leds_helper(struct input_handle *handle, void *data)
908 */
909 int vt_get_leds(int console, int flag)
910 {
911 - unsigned long flags;
912 struct kbd_struct * kbd = kbd_table + console;
913 int ret;
914
915 - spin_lock_irqsave(&kbd_event_lock, flags);
916 ret = vc_kbd_led(kbd, flag);
917 - spin_unlock_irqrestore(&kbd_event_lock, flags);
918
919 return ret;
920 }
921 diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
922 index 170cbe8..2d277a2 100644
923 --- a/drivers/usb/gadget/dummy_hcd.c
924 +++ b/drivers/usb/gadget/dummy_hcd.c
925 @@ -2505,10 +2505,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
926 hs_hcd->has_tt = 1;
927
928 retval = usb_add_hcd(hs_hcd, 0, 0);
929 - if (retval != 0) {
930 - usb_put_hcd(hs_hcd);
931 - return retval;
932 - }
933 + if (retval)
934 + goto put_usb2_hcd;
935
936 if (mod_data.is_super_speed) {
937 ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
938 @@ -2527,6 +2525,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
939 put_usb3_hcd:
940 usb_put_hcd(ss_hcd);
941 dealloc_usb2_hcd:
942 + usb_remove_hcd(hs_hcd);
943 +put_usb2_hcd:
944 usb_put_hcd(hs_hcd);
945 the_controller.hs_hcd = the_controller.ss_hcd = NULL;
946 return retval;
947 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
948 index 6b90824..cbed50a 100644
949 --- a/drivers/usb/host/xhci-mem.c
950 +++ b/drivers/usb/host/xhci-mem.c
951 @@ -1772,6 +1772,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
952 {
953 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
954 struct dev_info *dev_info, *next;
955 + struct xhci_cd *cur_cd, *next_cd;
956 unsigned long flags;
957 int size;
958 int i, j, num_ports;
959 @@ -1793,6 +1794,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
960 xhci_ring_free(xhci, xhci->cmd_ring);
961 xhci->cmd_ring = NULL;
962 xhci_dbg(xhci, "Freed command ring\n");
963 + list_for_each_entry_safe(cur_cd, next_cd,
964 + &xhci->cancel_cmd_list, cancel_cmd_list) {
965 + list_del(&cur_cd->cancel_cmd_list);
966 + kfree(cur_cd);
967 + }
968
969 for (i = 1; i < MAX_HC_SLOTS; ++i)
970 xhci_free_virt_device(xhci, i);
971 @@ -2338,6 +2344,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
972 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
973 if (!xhci->cmd_ring)
974 goto fail;
975 + INIT_LIST_HEAD(&xhci->cancel_cmd_list);
976 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
977 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
978 (unsigned long long)xhci->cmd_ring->first_seg->dma);
979 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
980 index f152740..4211017 100644
981 --- a/drivers/usb/host/xhci-pci.c
982 +++ b/drivers/usb/host/xhci-pci.c
983 @@ -99,6 +99,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
984 * PPT chipsets.
985 */
986 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
987 + xhci->quirks |= XHCI_AVOID_BEI;
988 }
989 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
990 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
991 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
992 index 203ba31..a23d71b 100644
993 --- a/drivers/usb/host/xhci-ring.c
994 +++ b/drivers/usb/host/xhci-ring.c
995 @@ -280,12 +280,123 @@ static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
996 /* Ring the host controller doorbell after placing a command on the ring */
997 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
998 {
999 + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
1000 + return;
1001 +
1002 xhci_dbg(xhci, "// Ding dong!\n");
1003 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
1004 /* Flush PCI posted writes */
1005 xhci_readl(xhci, &xhci->dba->doorbell[0]);
1006 }
1007
1008 +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
1009 +{
1010 + u64 temp_64;
1011 + int ret;
1012 +
1013 + xhci_dbg(xhci, "Abort command ring\n");
1014 +
1015 + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
1016 + xhci_dbg(xhci, "The command ring isn't running, "
1017 + "Have the command ring been stopped?\n");
1018 + return 0;
1019 + }
1020 +
1021 + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1022 + if (!(temp_64 & CMD_RING_RUNNING)) {
1023 + xhci_dbg(xhci, "Command ring had been stopped\n");
1024 + return 0;
1025 + }
1026 + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1027 + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
1028 + &xhci->op_regs->cmd_ring);
1029 +
1030 + /* Section 4.6.1.2 of xHCI 1.0 spec says software should
1031 + * time the completion od all xHCI commands, including
1032 + * the Command Abort operation. If software doesn't see
1033 + * CRR negated in a timely manner (e.g. longer than 5
1034 + * seconds), then it should assume that the there are
1035 + * larger problems with the xHC and assert HCRST.
1036 + */
1037 + ret = handshake(xhci, &xhci->op_regs->cmd_ring,
1038 + CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
1039 + if (ret < 0) {
1040 + xhci_err(xhci, "Stopped the command ring failed, "
1041 + "maybe the host is dead\n");
1042 + xhci->xhc_state |= XHCI_STATE_DYING;
1043 + xhci_quiesce(xhci);
1044 + xhci_halt(xhci);
1045 + return -ESHUTDOWN;
1046 + }
1047 +
1048 + return 0;
1049 +}
1050 +
1051 +static int xhci_queue_cd(struct xhci_hcd *xhci,
1052 + struct xhci_command *command,
1053 + union xhci_trb *cmd_trb)
1054 +{
1055 + struct xhci_cd *cd;
1056 + cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
1057 + if (!cd)
1058 + return -ENOMEM;
1059 + INIT_LIST_HEAD(&cd->cancel_cmd_list);
1060 +
1061 + cd->command = command;
1062 + cd->cmd_trb = cmd_trb;
1063 + list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
1064 +
1065 + return 0;
1066 +}
1067 +
1068 +/*
1069 + * Cancel the command which has issue.
1070 + *
1071 + * Some commands may hang due to waiting for acknowledgement from
1072 + * usb device. It is outside of the xHC's ability to control and
1073 + * will cause the command ring is blocked. When it occurs software
1074 + * should intervene to recover the command ring.
1075 + * See Section 4.6.1.1 and 4.6.1.2
1076 + */
1077 +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1078 + union xhci_trb *cmd_trb)
1079 +{
1080 + int retval = 0;
1081 + unsigned long flags;
1082 +
1083 + spin_lock_irqsave(&xhci->lock, flags);
1084 +
1085 + if (xhci->xhc_state & XHCI_STATE_DYING) {
1086 + xhci_warn(xhci, "Abort the command ring,"
1087 + " but the xHCI is dead.\n");
1088 + retval = -ESHUTDOWN;
1089 + goto fail;
1090 + }
1091 +
1092 + /* queue the cmd desriptor to cancel_cmd_list */
1093 + retval = xhci_queue_cd(xhci, command, cmd_trb);
1094 + if (retval) {
1095 + xhci_warn(xhci, "Queuing command descriptor failed.\n");
1096 + goto fail;
1097 + }
1098 +
1099 + /* abort command ring */
1100 + retval = xhci_abort_cmd_ring(xhci);
1101 + if (retval) {
1102 + xhci_err(xhci, "Abort command ring failed\n");
1103 + if (unlikely(retval == -ESHUTDOWN)) {
1104 + spin_unlock_irqrestore(&xhci->lock, flags);
1105 + usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1106 + xhci_dbg(xhci, "xHCI host controller is dead.\n");
1107 + return retval;
1108 + }
1109 + }
1110 +
1111 +fail:
1112 + spin_unlock_irqrestore(&xhci->lock, flags);
1113 + return retval;
1114 +}
1115 +
1116 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
1117 unsigned int slot_id,
1118 unsigned int ep_index,
1119 @@ -1059,6 +1170,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1120 }
1121 }
1122
1123 +/* Complete the command and detele it from the devcie's command queue.
1124 + */
1125 +static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1126 + struct xhci_command *command, u32 status)
1127 +{
1128 + command->status = status;
1129 + list_del(&command->cmd_list);
1130 + if (command->completion)
1131 + complete(command->completion);
1132 + else
1133 + xhci_free_command(xhci, command);
1134 +}
1135 +
1136 +
1137 /* Check to see if a command in the device's command queue matches this one.
1138 * Signal the completion or free the command, and return 1. Return 0 if the
1139 * completed command isn't at the head of the command list.
1140 @@ -1077,15 +1202,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1141 if (xhci->cmd_ring->dequeue != command->command_trb)
1142 return 0;
1143
1144 - command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1145 - list_del(&command->cmd_list);
1146 - if (command->completion)
1147 - complete(command->completion);
1148 - else
1149 - xhci_free_command(xhci, command);
1150 + xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1151 + GET_COMP_CODE(le32_to_cpu(event->status)));
1152 return 1;
1153 }
1154
1155 +/*
1156 + * Finding the command trb need to be cancelled and modifying it to
1157 + * NO OP command. And if the command is in device's command wait
1158 + * list, finishing and freeing it.
1159 + *
1160 + * If we can't find the command trb, we think it had already been
1161 + * executed.
1162 + */
1163 +static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1164 +{
1165 + struct xhci_segment *cur_seg;
1166 + union xhci_trb *cmd_trb;
1167 + u32 cycle_state;
1168 +
1169 + if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1170 + return;
1171 +
1172 + /* find the current segment of command ring */
1173 + cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1174 + xhci->cmd_ring->dequeue, &cycle_state);
1175 +
1176 + /* find the command trb matched by cd from command ring */
1177 + for (cmd_trb = xhci->cmd_ring->dequeue;
1178 + cmd_trb != xhci->cmd_ring->enqueue;
1179 + next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1180 + /* If the trb is link trb, continue */
1181 + if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1182 + continue;
1183 +
1184 + if (cur_cd->cmd_trb == cmd_trb) {
1185 +
1186 + /* If the command in device's command list, we should
1187 + * finish it and free the command structure.
1188 + */
1189 + if (cur_cd->command)
1190 + xhci_complete_cmd_in_cmd_wait_list(xhci,
1191 + cur_cd->command, COMP_CMD_STOP);
1192 +
1193 + /* get cycle state from the origin command trb */
1194 + cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1195 + & TRB_CYCLE;
1196 +
1197 + /* modify the command trb to NO OP command */
1198 + cmd_trb->generic.field[0] = 0;
1199 + cmd_trb->generic.field[1] = 0;
1200 + cmd_trb->generic.field[2] = 0;
1201 + cmd_trb->generic.field[3] = cpu_to_le32(
1202 + TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1203 + break;
1204 + }
1205 + }
1206 +}
1207 +
1208 +static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1209 +{
1210 + struct xhci_cd *cur_cd, *next_cd;
1211 +
1212 + if (list_empty(&xhci->cancel_cmd_list))
1213 + return;
1214 +
1215 + list_for_each_entry_safe(cur_cd, next_cd,
1216 + &xhci->cancel_cmd_list, cancel_cmd_list) {
1217 + xhci_cmd_to_noop(xhci, cur_cd);
1218 + list_del(&cur_cd->cancel_cmd_list);
1219 + kfree(cur_cd);
1220 + }
1221 +}
1222 +
1223 +/*
1224 + * traversing the cancel_cmd_list. If the command descriptor according
1225 + * to cmd_trb is found, the function free it and return 1, otherwise
1226 + * return 0.
1227 + */
1228 +static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1229 + union xhci_trb *cmd_trb)
1230 +{
1231 + struct xhci_cd *cur_cd, *next_cd;
1232 +
1233 + if (list_empty(&xhci->cancel_cmd_list))
1234 + return 0;
1235 +
1236 + list_for_each_entry_safe(cur_cd, next_cd,
1237 + &xhci->cancel_cmd_list, cancel_cmd_list) {
1238 + if (cur_cd->cmd_trb == cmd_trb) {
1239 + if (cur_cd->command)
1240 + xhci_complete_cmd_in_cmd_wait_list(xhci,
1241 + cur_cd->command, COMP_CMD_STOP);
1242 + list_del(&cur_cd->cancel_cmd_list);
1243 + kfree(cur_cd);
1244 + return 1;
1245 + }
1246 + }
1247 +
1248 + return 0;
1249 +}
1250 +
1251 +/*
1252 + * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1253 + * trb pointed by the command ring dequeue pointer is the trb we want to
1254 + * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1255 + * traverse the cancel_cmd_list to trun the all of the commands according
1256 + * to command descriptor to NO-OP trb.
1257 + */
1258 +static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1259 + int cmd_trb_comp_code)
1260 +{
1261 + int cur_trb_is_good = 0;
1262 +
1263 + /* Searching the cmd trb pointed by the command ring dequeue
1264 + * pointer in command descriptor list. If it is found, free it.
1265 + */
1266 + cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1267 + xhci->cmd_ring->dequeue);
1268 +
1269 + if (cmd_trb_comp_code == COMP_CMD_ABORT)
1270 + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1271 + else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1272 + /* traversing the cancel_cmd_list and canceling
1273 + * the command according to command descriptor
1274 + */
1275 + xhci_cancel_cmd_in_cd_list(xhci);
1276 +
1277 + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1278 + /*
1279 + * ring command ring doorbell again to restart the
1280 + * command ring
1281 + */
1282 + if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1283 + xhci_ring_cmd_db(xhci);
1284 + }
1285 + return cur_trb_is_good;
1286 +}
1287 +
1288 static void handle_cmd_completion(struct xhci_hcd *xhci,
1289 struct xhci_event_cmd *event)
1290 {
1291 @@ -1111,6 +1365,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1292 xhci->error_bitmask |= 1 << 5;
1293 return;
1294 }
1295 +
1296 + if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
1297 + (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
1298 + /* If the return value is 0, we think the trb pointed by
1299 + * command ring dequeue pointer is a good trb. The good
1300 + * trb means we don't want to cancel the trb, but it have
1301 + * been stopped by host. So we should handle it normally.
1302 + * Otherwise, driver should invoke inc_deq() and return.
1303 + */
1304 + if (handle_stopped_cmd_ring(xhci,
1305 + GET_COMP_CODE(le32_to_cpu(event->status)))) {
1306 + inc_deq(xhci, xhci->cmd_ring);
1307 + return;
1308 + }
1309 + }
1310 +
1311 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1312 & TRB_TYPE_BITMASK) {
1313 case TRB_TYPE(TRB_ENABLE_SLOT):
1314 @@ -3400,7 +3670,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1315 } else {
1316 td->last_trb = ep_ring->enqueue;
1317 field |= TRB_IOC;
1318 - if (xhci->hci_version == 0x100) {
1319 + if (xhci->hci_version == 0x100 &&
1320 + !(xhci->quirks &
1321 + XHCI_AVOID_BEI)) {
1322 /* Set BEI bit except for the last td */
1323 if (i < num_tds - 1)
1324 field |= TRB_BEI;
1325 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1326 index 6467d79..f756231 100644
1327 --- a/drivers/usb/host/xhci.c
1328 +++ b/drivers/usb/host/xhci.c
1329 @@ -52,7 +52,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
1330 * handshake done). There are two failure modes: "usec" have passed (major
1331 * hardware flakeout), or the register reads as all-ones (hardware removed).
1332 */
1333 -static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
1334 +int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
1335 u32 mask, u32 done, int usec)
1336 {
1337 u32 result;
1338 @@ -105,9 +105,10 @@ int xhci_halt(struct xhci_hcd *xhci)
1339
1340 ret = handshake(xhci, &xhci->op_regs->status,
1341 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
1342 - if (!ret)
1343 + if (!ret) {
1344 xhci->xhc_state |= XHCI_STATE_HALTED;
1345 - else
1346 + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1347 + } else
1348 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
1349 XHCI_MAX_HALT_USEC);
1350 return ret;
1351 @@ -470,6 +471,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
1352
1353 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
1354 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
1355 + if (!dmi_product_name || !dmi_sys_vendor)
1356 + return false;
1357
1358 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
1359 return false;
1360 @@ -581,6 +584,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
1361 return -ENODEV;
1362 }
1363 xhci->shared_hcd->state = HC_STATE_RUNNING;
1364 + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1365
1366 if (xhci->quirks & XHCI_NEC_HOST)
1367 xhci_ring_cmd_db(xhci);
1368 @@ -886,7 +890,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
1369 command &= ~CMD_RUN;
1370 xhci_writel(xhci, command, &xhci->op_regs->command);
1371 if (handshake(xhci, &xhci->op_regs->status,
1372 - STS_HALT, STS_HALT, 100*100)) {
1373 + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
1374 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1375 spin_unlock_irq(&xhci->lock);
1376 return -ETIMEDOUT;
1377 @@ -2521,6 +2525,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1378 struct completion *cmd_completion;
1379 u32 *cmd_status;
1380 struct xhci_virt_device *virt_dev;
1381 + union xhci_trb *cmd_trb;
1382
1383 spin_lock_irqsave(&xhci->lock, flags);
1384 virt_dev = xhci->devs[udev->slot_id];
1385 @@ -2566,6 +2571,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1386 }
1387 init_completion(cmd_completion);
1388
1389 + cmd_trb = xhci->cmd_ring->dequeue;
1390 if (!ctx_change)
1391 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1392 udev->slot_id, must_succeed);
1393 @@ -2587,14 +2593,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1394 /* Wait for the configure endpoint command to complete */
1395 timeleft = wait_for_completion_interruptible_timeout(
1396 cmd_completion,
1397 - USB_CTRL_SET_TIMEOUT);
1398 + XHCI_CMD_DEFAULT_TIMEOUT);
1399 if (timeleft <= 0) {
1400 xhci_warn(xhci, "%s while waiting for %s command\n",
1401 timeleft == 0 ? "Timeout" : "Signal",
1402 ctx_change == 0 ?
1403 "configure endpoint" :
1404 "evaluate context");
1405 - /* FIXME cancel the configure endpoint command */
1406 + /* cancel the configure endpoint command */
1407 + ret = xhci_cancel_cmd(xhci, command, cmd_trb);
1408 + if (ret < 0)
1409 + return ret;
1410 return -ETIME;
1411 }
1412
1413 @@ -3543,8 +3552,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1414 unsigned long flags;
1415 int timeleft;
1416 int ret;
1417 + union xhci_trb *cmd_trb;
1418
1419 spin_lock_irqsave(&xhci->lock, flags);
1420 + cmd_trb = xhci->cmd_ring->dequeue;
1421 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1422 if (ret) {
1423 spin_unlock_irqrestore(&xhci->lock, flags);
1424 @@ -3556,12 +3567,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1425
1426 /* XXX: how much time for xHC slot assignment? */
1427 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1428 - USB_CTRL_SET_TIMEOUT);
1429 + XHCI_CMD_DEFAULT_TIMEOUT);
1430 if (timeleft <= 0) {
1431 xhci_warn(xhci, "%s while waiting for a slot\n",
1432 timeleft == 0 ? "Timeout" : "Signal");
1433 - /* FIXME cancel the enable slot request */
1434 - return 0;
1435 + /* cancel the enable slot request */
1436 + return xhci_cancel_cmd(xhci, NULL, cmd_trb);
1437 }
1438
1439 if (!xhci->slot_id) {
1440 @@ -3622,6 +3633,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1441 struct xhci_slot_ctx *slot_ctx;
1442 struct xhci_input_control_ctx *ctrl_ctx;
1443 u64 temp_64;
1444 + union xhci_trb *cmd_trb;
1445
1446 if (!udev->slot_id) {
1447 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1448 @@ -3660,6 +3672,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1449 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1450
1451 spin_lock_irqsave(&xhci->lock, flags);
1452 + cmd_trb = xhci->cmd_ring->dequeue;
1453 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1454 udev->slot_id);
1455 if (ret) {
1456 @@ -3672,7 +3685,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1457
1458 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1459 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1460 - USB_CTRL_SET_TIMEOUT);
1461 + XHCI_CMD_DEFAULT_TIMEOUT);
1462 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1463 * the SetAddress() "recovery interval" required by USB and aborting the
1464 * command on a timeout.
1465 @@ -3680,7 +3693,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1466 if (timeleft <= 0) {
1467 xhci_warn(xhci, "%s while waiting for address device command\n",
1468 timeleft == 0 ? "Timeout" : "Signal");
1469 - /* FIXME cancel the address device command */
1470 + /* cancel the address device command */
1471 + ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
1472 + if (ret < 0)
1473 + return ret;
1474 return -ETIME;
1475 }
1476
1477 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1478 index 6e77f3b..5361fd8 100644
1479 --- a/drivers/usb/host/xhci.h
1480 +++ b/drivers/usb/host/xhci.h
1481 @@ -1252,6 +1252,16 @@ struct xhci_td {
1482 union xhci_trb *last_trb;
1483 };
1484
1485 +/* xHCI command default timeout value */
1486 +#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
1487 +
1488 +/* command descriptor */
1489 +struct xhci_cd {
1490 + struct list_head cancel_cmd_list;
1491 + struct xhci_command *command;
1492 + union xhci_trb *cmd_trb;
1493 +};
1494 +
1495 struct xhci_dequeue_state {
1496 struct xhci_segment *new_deq_seg;
1497 union xhci_trb *new_deq_ptr;
1498 @@ -1417,6 +1427,11 @@ struct xhci_hcd {
1499 /* data structures */
1500 struct xhci_device_context_array *dcbaa;
1501 struct xhci_ring *cmd_ring;
1502 + unsigned int cmd_ring_state;
1503 +#define CMD_RING_STATE_RUNNING (1 << 0)
1504 +#define CMD_RING_STATE_ABORTED (1 << 1)
1505 +#define CMD_RING_STATE_STOPPED (1 << 2)
1506 + struct list_head cancel_cmd_list;
1507 unsigned int cmd_ring_reserved_trbs;
1508 struct xhci_ring *event_ring;
1509 struct xhci_erst erst;
1510 @@ -1488,6 +1503,7 @@ struct xhci_hcd {
1511 #define XHCI_TRUST_TX_LENGTH (1 << 10)
1512 #define XHCI_SPURIOUS_REBOOT (1 << 13)
1513 #define XHCI_COMP_MODE_QUIRK (1 << 14)
1514 +#define XHCI_AVOID_BEI (1 << 15)
1515 unsigned int num_active_eps;
1516 unsigned int limit_active_eps;
1517 /* There are two roothubs to keep track of bus suspend info for */
1518 @@ -1694,6 +1710,8 @@ static inline void xhci_unregister_plat(void)
1519
1520 /* xHCI host controller glue */
1521 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
1522 +int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
1523 + u32 mask, u32 done, int usec);
1524 void xhci_quiesce(struct xhci_hcd *xhci);
1525 int xhci_halt(struct xhci_hcd *xhci);
1526 int xhci_reset(struct xhci_hcd *xhci);
1527 @@ -1784,6 +1802,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1528 unsigned int slot_id, unsigned int ep_index,
1529 struct xhci_dequeue_state *deq_state);
1530 void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1531 +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1532 + union xhci_trb *cmd_trb);
1533 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1534 unsigned int ep_index, unsigned int stream_id);
1535
1536 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1537 index 4d2b7d3..25bb935 100644
1538 --- a/drivers/usb/serial/ftdi_sio.c
1539 +++ b/drivers/usb/serial/ftdi_sio.c
1540 @@ -584,6 +584,8 @@ static struct usb_device_id id_table_combined [] = {
1541 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
1542 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
1543 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
1544 + { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
1545 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1546 /*
1547 * ELV devices:
1548 */
1549 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1550 index 41fe582..57c12ef 100644
1551 --- a/drivers/usb/serial/ftdi_sio_ids.h
1552 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1553 @@ -517,6 +517,11 @@
1554 */
1555 #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
1556
1557 +/*
1558 + * TIAO product ids (FTDI_VID)
1559 + * http://www.tiaowiki.com/w/Main_Page
1560 + */
1561 +#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
1562
1563
1564 /********************************/
1565 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1566 index a49099d..57de734 100644
1567 --- a/drivers/usb/serial/option.c
1568 +++ b/drivers/usb/serial/option.c
1569 @@ -870,7 +870,8 @@ static const struct usb_device_id option_ids[] = {
1570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
1571 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
1572 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
1573 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
1574 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
1575 + .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1576 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
1577 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
1578 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
1579 diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
1580 index 9662456..b223381 100644
1581 --- a/drivers/usb/serial/qcaux.c
1582 +++ b/drivers/usb/serial/qcaux.c
1583 @@ -36,8 +36,6 @@
1584 #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
1585 #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
1586 #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
1587 -#define PANTECH_PRODUCT_UML190_VZW 0x3716
1588 -#define PANTECH_PRODUCT_UML290_VZW 0x3718
1589
1590 /* CMOTECH devices */
1591 #define CMOTECH_VENDOR_ID 0x16d8
1592 @@ -68,11 +66,9 @@ static struct usb_device_id id_table[] = {
1593 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
1594 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
1595 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
1596 - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
1597 - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
1598 - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
1599 - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
1600 - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
1601 + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
1602 + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
1603 + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
1604 { },
1605 };
1606 MODULE_DEVICE_TABLE(usb, id_table);
1607 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1608 index 16f7354..a009b9e 100644
1609 --- a/fs/binfmt_elf.c
1610 +++ b/fs/binfmt_elf.c
1611 @@ -1698,30 +1698,19 @@ static int elf_note_info_init(struct elf_note_info *info)
1612 return 0;
1613 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1614 if (!info->psinfo)
1615 - goto notes_free;
1616 + return 0;
1617 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1618 if (!info->prstatus)
1619 - goto psinfo_free;
1620 + return 0;
1621 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1622 if (!info->fpu)
1623 - goto prstatus_free;
1624 + return 0;
1625 #ifdef ELF_CORE_COPY_XFPREGS
1626 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1627 if (!info->xfpu)
1628 - goto fpu_free;
1629 + return 0;
1630 #endif
1631 return 1;
1632 -#ifdef ELF_CORE_COPY_XFPREGS
1633 - fpu_free:
1634 - kfree(info->fpu);
1635 -#endif
1636 - prstatus_free:
1637 - kfree(info->prstatus);
1638 - psinfo_free:
1639 - kfree(info->psinfo);
1640 - notes_free:
1641 - kfree(info->notes);
1642 - return 0;
1643 }
1644
1645 static int fill_note_info(struct elfhdr *elf, int phdrs,
1646 diff --git a/fs/dcache.c b/fs/dcache.c
1647 index 10fab26..f104945 100644
1648 --- a/fs/dcache.c
1649 +++ b/fs/dcache.c
1650 @@ -1116,6 +1116,8 @@ positive:
1651 return 1;
1652
1653 rename_retry:
1654 + if (locked)
1655 + goto again;
1656 locked = 1;
1657 write_seqlock(&rename_lock);
1658 goto again;
1659 @@ -1218,6 +1220,8 @@ out:
1660 rename_retry:
1661 if (found)
1662 return found;
1663 + if (locked)
1664 + goto again;
1665 locked = 1;
1666 write_seqlock(&rename_lock);
1667 goto again;
1668 @@ -2963,6 +2967,8 @@ resume:
1669 return;
1670
1671 rename_retry:
1672 + if (locked)
1673 + goto again;
1674 locked = 1;
1675 write_seqlock(&rename_lock);
1676 goto again;
1677 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
1678 index 19ca550..bf7934f 100644
1679 --- a/include/linux/pci_ids.h
1680 +++ b/include/linux/pci_ids.h
1681 @@ -1846,7 +1846,6 @@
1682 #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
1683 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
1684 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
1685 -#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530
1686
1687 #define PCI_VENDOR_ID_RADISYS 0x1331
1688
1689 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
1690 index 5737238..1d6bf24 100644
1691 --- a/security/yama/yama_lsm.c
1692 +++ b/security/yama/yama_lsm.c
1693 @@ -138,7 +138,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
1694 if (arg2 == 0) {
1695 yama_ptracer_del(NULL, myself);
1696 rc = 0;
1697 - } else if (arg2 == PR_SET_PTRACER_ANY) {
1698 + } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
1699 rc = yama_ptracer_add(NULL, myself);
1700 } else {
1701 struct task_struct *tracer;
1702 diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1703 index d9834b3..2984ffb 100644
1704 --- a/tools/hv/hv_kvp_daemon.c
1705 +++ b/tools/hv/hv_kvp_daemon.c
1706 @@ -106,7 +106,7 @@ static void kvp_acquire_lock(int pool)
1707
1708 if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) {
1709 syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool);
1710 - exit(-1);
1711 + exit(EXIT_FAILURE);
1712 }
1713 }
1714
1715 @@ -118,7 +118,7 @@ static void kvp_release_lock(int pool)
1716 if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) {
1717 perror("fcntl");
1718 syslog(LOG_ERR, "Failed to release the lock pool: %d", pool);
1719 - exit(-1);
1720 + exit(EXIT_FAILURE);
1721 }
1722 }
1723
1724 @@ -137,14 +137,19 @@ static void kvp_update_file(int pool)
1725 if (!filep) {
1726 kvp_release_lock(pool);
1727 syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
1728 - exit(-1);
1729 + exit(EXIT_FAILURE);
1730 }
1731
1732 bytes_written = fwrite(kvp_file_info[pool].records,
1733 sizeof(struct kvp_record),
1734 kvp_file_info[pool].num_records, filep);
1735
1736 - fflush(filep);
1737 + if (ferror(filep) || fclose(filep)) {
1738 + kvp_release_lock(pool);
1739 + syslog(LOG_ERR, "Failed to write file, pool: %d", pool);
1740 + exit(EXIT_FAILURE);
1741 + }
1742 +
1743 kvp_release_lock(pool);
1744 }
1745
1746 @@ -163,14 +168,19 @@ static void kvp_update_mem_state(int pool)
1747 if (!filep) {
1748 kvp_release_lock(pool);
1749 syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
1750 - exit(-1);
1751 + exit(EXIT_FAILURE);
1752 }
1753 - while (!feof(filep)) {
1754 + for (;;) {
1755 readp = &record[records_read];
1756 records_read += fread(readp, sizeof(struct kvp_record),
1757 ENTRIES_PER_BLOCK * num_blocks,
1758 filep);
1759
1760 + if (ferror(filep)) {
1761 + syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
1762 + exit(EXIT_FAILURE);
1763 + }
1764 +
1765 if (!feof(filep)) {
1766 /*
1767 * We have more data to read.
1768 @@ -180,7 +190,7 @@ static void kvp_update_mem_state(int pool)
1769
1770 if (record == NULL) {
1771 syslog(LOG_ERR, "malloc failed");
1772 - exit(-1);
1773 + exit(EXIT_FAILURE);
1774 }
1775 continue;
1776 }
1777 @@ -191,6 +201,7 @@ static void kvp_update_mem_state(int pool)
1778 kvp_file_info[pool].records = record;
1779 kvp_file_info[pool].num_records = records_read;
1780
1781 + fclose(filep);
1782 kvp_release_lock(pool);
1783 }
1784 static int kvp_file_init(void)
1785 @@ -208,7 +219,7 @@ static int kvp_file_init(void)
1786 if (access("/var/opt/hyperv", F_OK)) {
1787 if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) {
1788 syslog(LOG_ERR, " Failed to create /var/opt/hyperv");
1789 - exit(-1);
1790 + exit(EXIT_FAILURE);
1791 }
1792 }
1793
1794 @@ -232,12 +243,18 @@ static int kvp_file_init(void)
1795 fclose(filep);
1796 return 1;
1797 }
1798 - while (!feof(filep)) {
1799 + for (;;) {
1800 readp = &record[records_read];
1801 records_read += fread(readp, sizeof(struct kvp_record),
1802 ENTRIES_PER_BLOCK,
1803 filep);
1804
1805 + if (ferror(filep)) {
1806 + syslog(LOG_ERR, "Failed to read file, pool: %d",
1807 + i);
1808 + exit(EXIT_FAILURE);
1809 + }
1810 +
1811 if (!feof(filep)) {
1812 /*
1813 * We have more data to read.
1814 @@ -657,13 +674,13 @@ int main(void)
1815
1816 if (kvp_file_init()) {
1817 syslog(LOG_ERR, "Failed to initialize the pools");
1818 - exit(-1);
1819 + exit(EXIT_FAILURE);
1820 }
1821
1822 fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
1823 if (fd < 0) {
1824 syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
1825 - exit(-1);
1826 + exit(EXIT_FAILURE);
1827 }
1828 addr.nl_family = AF_NETLINK;
1829 addr.nl_pad = 0;
1830 @@ -675,7 +692,7 @@ int main(void)
1831 if (error < 0) {
1832 syslog(LOG_ERR, "bind failed; error:%d", error);
1833 close(fd);
1834 - exit(-1);
1835 + exit(EXIT_FAILURE);
1836 }
1837 sock_opt = addr.nl_groups;
1838 setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
1839 @@ -695,7 +712,7 @@ int main(void)
1840 if (len < 0) {
1841 syslog(LOG_ERR, "netlink_send failed; error:%d", len);
1842 close(fd);
1843 - exit(-1);
1844 + exit(EXIT_FAILURE);
1845 }
1846
1847 pfd.fd = fd;
1848 @@ -863,7 +880,7 @@ kvp_done:
1849 len = netlink_send(fd, incoming_cn_msg);
1850 if (len < 0) {
1851 syslog(LOG_ERR, "net_link send failed; error:%d", len);
1852 - exit(-1);
1853 + exit(EXIT_FAILURE);
1854 }
1855 }
1856