Magellan Linux

Contents of /trunk/kernel26-mcore/patches-2.6.37-r4/0102-2.6.37.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1301 - (show annotations) (download)
Thu Mar 10 22:13:57 2011 UTC (13 years, 1 month ago) by niro
File size: 113288 byte(s)
2.6.37-mcore-r4: updated to linux-2.6.37.3
1 diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
2 index aefd1e6..04ca0632 100644
3 --- a/Documentation/networking/dns_resolver.txt
4 +++ b/Documentation/networking/dns_resolver.txt
5 @@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
6 create dns_resolver foo:* * /usr/sbin/dns.foo %k
7
8
9 -
10 =====
11 USAGE
12 =====
13 @@ -104,6 +103,14 @@ implemented in the module can be called after doing:
14 returned also.
15
16
17 +===============================
18 +READING DNS KEYS FROM USERSPACE
19 +===============================
20 +
21 +Keys of dns_resolver type can be read from userspace using keyctl_read() or
22 +"keyctl read/print/pipe".
23 +
24 +
25 =========
26 MECHANISM
27 =========
28 diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
29 index 55d106b..f90c732 100644
30 --- a/arch/x86/include/asm/acpi.h
31 +++ b/arch/x86/include/asm/acpi.h
32 @@ -88,6 +88,7 @@ extern int acpi_disabled;
33 extern int acpi_pci_disabled;
34 extern int acpi_skip_timer_override;
35 extern int acpi_use_timer_override;
36 +extern int acpi_fix_pin2_polarity;
37
38 extern u8 acpi_sci_flags;
39 extern int acpi_sci_override_gsi;
40 diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
41 index 1def601..cfdc6c8 100644
42 --- a/arch/x86/include/asm/smpboot_hooks.h
43 +++ b/arch/x86/include/asm/smpboot_hooks.h
44 @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
45 */
46 CMOS_WRITE(0, 0xf);
47
48 - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
49 + *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
50 }
51
52 static inline void __init smpboot_setup_io_apic(void)
53 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
54 index 71232b9..80cc938 100644
55 --- a/arch/x86/kernel/acpi/boot.c
56 +++ b/arch/x86/kernel/acpi/boot.c
57 @@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
58 int acpi_sci_override_gsi __initdata;
59 int acpi_skip_timer_override __initdata;
60 int acpi_use_timer_override __initdata;
61 +int acpi_fix_pin2_polarity __initdata;
62
63 #ifdef CONFIG_X86_LOCAL_APIC
64 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
65 @@ -410,10 +411,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
66 return 0;
67 }
68
69 - if (acpi_skip_timer_override &&
70 - intsrc->source_irq == 0 && intsrc->global_irq == 2) {
71 - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
72 - return 0;
73 + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
74 + if (acpi_skip_timer_override) {
75 + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
76 + return 0;
77 + }
78 + if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
79 + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
80 + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
81 + }
82 }
83
84 mp_override_legacy_irq(intsrc->source_irq,
85 diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
86 index 76b8cd9..9efbdcc 100644
87 --- a/arch/x86/kernel/early-quirks.c
88 +++ b/arch/x86/kernel/early-quirks.c
89 @@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
90
91 static u32 __init ati_sbx00_rev(int num, int slot, int func)
92 {
93 - u32 old, d;
94 + u32 d;
95
96 - d = read_pci_config(num, slot, func, 0x70);
97 - old = d;
98 - d &= ~(1<<8);
99 - write_pci_config(num, slot, func, 0x70, d);
100 d = read_pci_config(num, slot, func, 0x8);
101 d &= 0xff;
102 - write_pci_config(num, slot, func, 0x70, old);
103
104 return d;
105 }
106 @@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
107 {
108 u32 d, rev;
109
110 - if (acpi_use_timer_override)
111 - return;
112 -
113 rev = ati_sbx00_rev(num, slot, func);
114 + if (rev >= 0x40)
115 + acpi_fix_pin2_polarity = 1;
116 +
117 if (rev > 0x13)
118 return;
119
120 + if (acpi_use_timer_override)
121 + return;
122 +
123 /* check for IRQ0 interrupt swap */
124 d = read_pci_config(num, slot, func, 0x64);
125 if (!(d & (1<<14)))
126 diff --git a/block/blk-core.c b/block/blk-core.c
127 index 8767520..bb1e6fd 100644
128 --- a/block/blk-core.c
129 +++ b/block/blk-core.c
130 @@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
131 WARN_ON(!irqs_disabled());
132
133 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
134 - __blk_run_queue(q);
135 + __blk_run_queue(q, false);
136 }
137 EXPORT_SYMBOL(blk_start_queue);
138
139 @@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
140 /**
141 * __blk_run_queue - run a single device queue
142 * @q: The queue to run
143 + * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
144 *
145 * Description:
146 * See @blk_run_queue. This variant must be called with the queue lock
147 * held and interrupts disabled.
148 *
149 */
150 -void __blk_run_queue(struct request_queue *q)
151 +void __blk_run_queue(struct request_queue *q, bool force_kblockd)
152 {
153 blk_remove_plug(q);
154
155 @@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
156 * Only recurse once to avoid overrunning the stack, let the unplug
157 * handling reinvoke the handler shortly if we already got there.
158 */
159 - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
160 + if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
161 q->request_fn(q);
162 queue_flag_clear(QUEUE_FLAG_REENTER, q);
163 } else {
164 @@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
165 unsigned long flags;
166
167 spin_lock_irqsave(q->queue_lock, flags);
168 - __blk_run_queue(q);
169 + __blk_run_queue(q, false);
170 spin_unlock_irqrestore(q->queue_lock, flags);
171 }
172 EXPORT_SYMBOL(blk_run_queue);
173 @@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
174
175 drive_stat_acct(rq, 1);
176 __elv_add_request(q, rq, where, 0);
177 - __blk_run_queue(q);
178 + __blk_run_queue(q, false);
179 spin_unlock_irqrestore(q->queue_lock, flags);
180 }
181 EXPORT_SYMBOL(blk_insert_request);
182 @@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
183 }
184 EXPORT_SYMBOL(kblockd_schedule_work);
185
186 -int kblockd_schedule_delayed_work(struct request_queue *q,
187 - struct delayed_work *dwork, unsigned long delay)
188 -{
189 - return queue_delayed_work(kblockd_workqueue, dwork, delay);
190 -}
191 -EXPORT_SYMBOL(kblockd_schedule_delayed_work);
192 -
193 int __init blk_dev_init(void)
194 {
195 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
196 diff --git a/block/blk-flush.c b/block/blk-flush.c
197 index 54b123d..b27d020 100644
198 --- a/block/blk-flush.c
199 +++ b/block/blk-flush.c
200 @@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
201
202 /*
203 * Moving a request silently to empty queue_head may stall the
204 - * queue. Kick the queue in those cases.
205 + * queue. Kick the queue in those cases. This function is called
206 + * from request completion path and calling directly into
207 + * request_fn may confuse the driver. Always use kblockd.
208 */
209 if (was_empty && next_rq)
210 - __blk_run_queue(q);
211 + __blk_run_queue(q, true);
212 }
213
214 static void pre_flush_end_io(struct request *rq, int error)
215 @@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
216 BUG();
217 }
218
219 - elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
220 + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
221 return rq;
222 }
223
224 diff --git a/block/blk-throttle.c b/block/blk-throttle.c
225 index 381b09b..b93ffbc 100644
226 --- a/block/blk-throttle.c
227 +++ b/block/blk-throttle.c
228 @@ -20,6 +20,11 @@ static int throtl_quantum = 32;
229 /* Throttling is performed over 100ms slice and after that slice is renewed */
230 static unsigned long throtl_slice = HZ/10; /* 100 ms */
231
232 +/* A workqueue to queue throttle related work */
233 +static struct workqueue_struct *kthrotld_workqueue;
234 +static void throtl_schedule_delayed_work(struct throtl_data *td,
235 + unsigned long delay);
236 +
237 struct throtl_rb_root {
238 struct rb_root rb;
239 struct rb_node *left;
240 @@ -337,10 +342,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
241 update_min_dispatch_time(st);
242
243 if (time_before_eq(st->min_disptime, jiffies))
244 - throtl_schedule_delayed_work(td->queue, 0);
245 + throtl_schedule_delayed_work(td, 0);
246 else
247 - throtl_schedule_delayed_work(td->queue,
248 - (st->min_disptime - jiffies));
249 + throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
250 }
251
252 static inline void
253 @@ -807,10 +811,10 @@ void blk_throtl_work(struct work_struct *work)
254 }
255
256 /* Call with queue lock held */
257 -void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
258 +static void
259 +throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
260 {
261
262 - struct throtl_data *td = q->td;
263 struct delayed_work *dwork = &td->throtl_work;
264
265 if (total_nr_queued(td) > 0) {
266 @@ -819,12 +823,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
267 * Cancel that and schedule a new one.
268 */
269 __cancel_delayed_work(dwork);
270 - kblockd_schedule_delayed_work(q, dwork, delay);
271 + queue_delayed_work(kthrotld_workqueue, dwork, delay);
272 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
273 delay, jiffies);
274 }
275 }
276 -EXPORT_SYMBOL(throtl_schedule_delayed_work);
277
278 static void
279 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
280 @@ -912,7 +915,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
281 smp_mb__after_atomic_inc();
282
283 /* Schedule a work now to process the limit change */
284 - throtl_schedule_delayed_work(td->queue, 0);
285 + throtl_schedule_delayed_work(td, 0);
286 }
287
288 static void throtl_update_blkio_group_write_bps(void *key,
289 @@ -926,7 +929,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
290 smp_mb__before_atomic_inc();
291 atomic_inc(&td->limits_changed);
292 smp_mb__after_atomic_inc();
293 - throtl_schedule_delayed_work(td->queue, 0);
294 + throtl_schedule_delayed_work(td, 0);
295 }
296
297 static void throtl_update_blkio_group_read_iops(void *key,
298 @@ -940,7 +943,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
299 smp_mb__before_atomic_inc();
300 atomic_inc(&td->limits_changed);
301 smp_mb__after_atomic_inc();
302 - throtl_schedule_delayed_work(td->queue, 0);
303 + throtl_schedule_delayed_work(td, 0);
304 }
305
306 static void throtl_update_blkio_group_write_iops(void *key,
307 @@ -954,7 +957,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
308 smp_mb__before_atomic_inc();
309 atomic_inc(&td->limits_changed);
310 smp_mb__after_atomic_inc();
311 - throtl_schedule_delayed_work(td->queue, 0);
312 + throtl_schedule_delayed_work(td, 0);
313 }
314
315 void throtl_shutdown_timer_wq(struct request_queue *q)
316 @@ -1127,6 +1130,10 @@ void blk_throtl_exit(struct request_queue *q)
317
318 static int __init throtl_init(void)
319 {
320 + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
321 + if (!kthrotld_workqueue)
322 + panic("Failed to create kthrotld\n");
323 +
324 blkio_policy_register(&blkio_policy_throtl);
325 return 0;
326 }
327 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
328 index 6f2a966..b1e42f9 100644
329 --- a/block/cfq-iosched.c
330 +++ b/block/cfq-iosched.c
331 @@ -3335,7 +3335,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
332 cfqd->busy_queues > 1) {
333 cfq_del_timer(cfqd, cfqq);
334 cfq_clear_cfqq_wait_request(cfqq);
335 - __blk_run_queue(cfqd->queue);
336 + __blk_run_queue(cfqd->queue, false);
337 } else {
338 cfq_blkiocg_update_idle_time_stats(
339 &cfqq->cfqg->blkg);
340 @@ -3350,7 +3350,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
341 * this new queue is RT and the current one is BE
342 */
343 cfq_preempt_queue(cfqd, cfqq);
344 - __blk_run_queue(cfqd->queue);
345 + __blk_run_queue(cfqd->queue, false);
346 }
347 }
348
349 @@ -3711,7 +3711,7 @@ static void cfq_kick_queue(struct work_struct *work)
350 struct request_queue *q = cfqd->queue;
351
352 spin_lock_irq(q->queue_lock);
353 - __blk_run_queue(cfqd->queue);
354 + __blk_run_queue(cfqd->queue, false);
355 spin_unlock_irq(q->queue_lock);
356 }
357
358 diff --git a/block/elevator.c b/block/elevator.c
359 index 2569512..236e93c 100644
360 --- a/block/elevator.c
361 +++ b/block/elevator.c
362 @@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
363 */
364 elv_drain_elevator(q);
365 while (q->rq.elvpriv) {
366 - __blk_run_queue(q);
367 + __blk_run_queue(q, false);
368 spin_unlock_irq(q->queue_lock);
369 msleep(10);
370 spin_lock_irq(q->queue_lock);
371 @@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
372 * with anything. There's no point in delaying queue
373 * processing.
374 */
375 - __blk_run_queue(q);
376 + __blk_run_queue(q, false);
377 break;
378
379 case ELEVATOR_INSERT_SORT:
380 diff --git a/block/genhd.c b/block/genhd.c
381 index 0c55eae..fc5781e 100644
382 --- a/block/genhd.c
383 +++ b/block/genhd.c
384 @@ -1285,7 +1285,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
385 struct block_device *bdev = bdget_disk(disk, partno);
386 if (bdev) {
387 fsync_bdev(bdev);
388 - res = __invalidate_device(bdev);
389 + res = __invalidate_device(bdev, true);
390 bdput(bdev);
391 }
392 return res;
393 diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
394 index 5df67f1..384f7ab 100644
395 --- a/drivers/acpi/debugfs.c
396 +++ b/drivers/acpi/debugfs.c
397 @@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
398 size_t count, loff_t *ppos)
399 {
400 static char *buf;
401 - static int uncopied_bytes;
402 + static u32 max_size;
403 + static u32 uncopied_bytes;
404 +
405 struct acpi_table_header table;
406 acpi_status status;
407
408 @@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
409 if (copy_from_user(&table, user_buf,
410 sizeof(struct acpi_table_header)))
411 return -EFAULT;
412 - uncopied_bytes = table.length;
413 - buf = kzalloc(uncopied_bytes, GFP_KERNEL);
414 + uncopied_bytes = max_size = table.length;
415 + buf = kzalloc(max_size, GFP_KERNEL);
416 if (!buf)
417 return -ENOMEM;
418 }
419
420 - if (uncopied_bytes < count) {
421 - kfree(buf);
422 + if (buf == NULL)
423 + return -EINVAL;
424 +
425 + if ((*ppos > max_size) ||
426 + (*ppos + count > max_size) ||
427 + (*ppos + count < count) ||
428 + (count > uncopied_bytes))
429 return -EINVAL;
430 - }
431
432 if (copy_from_user(buf + (*ppos), user_buf, count)) {
433 kfree(buf);
434 + buf = NULL;
435 return -EFAULT;
436 }
437
438 @@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
439 if (!uncopied_bytes) {
440 status = acpi_install_method(buf);
441 kfree(buf);
442 + buf = NULL;
443 if (ACPI_FAILURE(status))
444 return -EINVAL;
445 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
446 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
447 index 3951020..59294dc 100644
448 --- a/drivers/block/floppy.c
449 +++ b/drivers/block/floppy.c
450 @@ -3276,7 +3276,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
451 struct block_device *bdev = opened_bdev[cnt];
452 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
453 continue;
454 - __invalidate_device(bdev);
455 + __invalidate_device(bdev, true);
456 }
457 mutex_unlock(&open_lock);
458 } else {
459 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
460 index 7ea0bea..1c76384 100644
461 --- a/drivers/block/loop.c
462 +++ b/drivers/block/loop.c
463 @@ -78,7 +78,6 @@
464
465 #include <asm/uaccess.h>
466
467 -static DEFINE_MUTEX(loop_mutex);
468 static LIST_HEAD(loop_devices);
469 static DEFINE_MUTEX(loop_devices_mutex);
470
471 @@ -1505,11 +1504,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
472 {
473 struct loop_device *lo = bdev->bd_disk->private_data;
474
475 - mutex_lock(&loop_mutex);
476 mutex_lock(&lo->lo_ctl_mutex);
477 lo->lo_refcnt++;
478 mutex_unlock(&lo->lo_ctl_mutex);
479 - mutex_unlock(&loop_mutex);
480
481 return 0;
482 }
483 @@ -1519,7 +1516,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
484 struct loop_device *lo = disk->private_data;
485 int err;
486
487 - mutex_lock(&loop_mutex);
488 mutex_lock(&lo->lo_ctl_mutex);
489
490 if (--lo->lo_refcnt)
491 @@ -1544,7 +1540,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
492 out:
493 mutex_unlock(&lo->lo_ctl_mutex);
494 out_unlocked:
495 - mutex_unlock(&loop_mutex);
496 return 0;
497 }
498
499 diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
500 index 949ed09..a096f9f 100644
501 --- a/drivers/bluetooth/ath3k.c
502 +++ b/drivers/bluetooth/ath3k.c
503 @@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
504 /* Atheros AR3011 with sflash firmware*/
505 { USB_DEVICE(0x0CF3, 0x3002) },
506
507 + /* Atheros AR9285 Malbec with sflash firmware */
508 + { USB_DEVICE(0x03F0, 0x311D) },
509 +
510 + /* Atheros AR5BBU12 with sflash firmware */
511 + { USB_DEVICE(0x0489, 0xE02C) },
512 { } /* Terminating entry */
513 };
514
515 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
516 index 1da773f..700a384 100644
517 --- a/drivers/bluetooth/btusb.c
518 +++ b/drivers/bluetooth/btusb.c
519 @@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
520 /* Atheros 3011 with sflash firmware */
521 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
522
523 + /* Atheros AR9285 Malbec with sflash firmware */
524 + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
525 +
526 + /* Atheros AR5BBU12 with sflash firmware */
527 + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
528 +
529 /* Broadcom BCM2035 */
530 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
531 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
532 @@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
533
534 if (hdev->conn_hash.sco_num > 0) {
535 if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
536 - err = usb_autopm_get_interface(data->isoc);
537 + err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
538 if (err < 0) {
539 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
540 usb_kill_anchored_urbs(&data->isoc_anchor);
541 @@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
542
543 __set_isoc_interface(hdev, 0);
544 if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
545 - usb_autopm_put_interface(data->isoc);
546 + usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
547 }
548 }
549
550 @@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
551
552 usb_set_intfdata(intf, data);
553
554 - usb_enable_autosuspend(interface_to_usbdev(intf));
555 -
556 return 0;
557 }
558
559 diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
560 index 16d5155..d198884 100644
561 --- a/drivers/gpu/drm/drm_irq.c
562 +++ b/drivers/gpu/drm/drm_irq.c
563 @@ -549,7 +549,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
564 struct drm_file *file_priv)
565 {
566 struct drm_modeset_ctl *modeset = data;
567 - int crtc, ret = 0;
568 + int ret = 0;
569 + unsigned int crtc;
570
571 /* If drm_vblank_init() hasn't been called yet, just no-op */
572 if (!dev->num_crtcs)
573 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
574 index 4916c10..2f2b09d 100644
575 --- a/drivers/gpu/drm/i915/i915_dma.c
576 +++ b/drivers/gpu/drm/i915/i915_dma.c
577 @@ -1943,6 +1943,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
578 if (IS_GEN2(dev))
579 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
580
581 + /* 965GM sometimes incorrectly writes to hardware status page (HWS)
582 + * using 32bit addressing, overwriting memory if HWS is located
583 + * above 4GB.
584 + *
585 + * The documentation also mentions an issue with undefined
586 + * behaviour if any general state is accessed within a page above 4GB,
587 + * which also needs to be handled carefully.
588 + */
589 + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
590 + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
591 +
592 dev_priv->regs = ioremap(base, size);
593 if (!dev_priv->regs) {
594 DRM_ERROR("failed to map registers\n");
595 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
596 index c7d1fca..7e1b648 100644
597 --- a/drivers/gpu/drm/radeon/radeon_display.c
598 +++ b/drivers/gpu/drm/radeon/radeon_display.c
599 @@ -639,7 +639,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
600 max_fractional_feed_div = pll->max_frac_feedback_div;
601 }
602
603 - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
604 + for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
605 uint32_t ref_div;
606
607 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
608 diff --git a/drivers/md/linear.c b/drivers/md/linear.c
609 index 8a2f767..0ed7f6b 100644
610 --- a/drivers/md/linear.c
611 +++ b/drivers/md/linear.c
612 @@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
613
614 if (md_check_no_bitmap(mddev))
615 return -EINVAL;
616 - mddev->queue->queue_lock = &mddev->queue->__queue_lock;
617 conf = linear_conf(mddev, mddev->raid_disks);
618
619 if (!conf)
620 diff --git a/drivers/md/md.c b/drivers/md/md.c
621 index 0e5a483..ec4c585 100644
622 --- a/drivers/md/md.c
623 +++ b/drivers/md/md.c
624 @@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
625 {
626 mddev_t *mddev, *new = NULL;
627
628 + if (unit && MAJOR(unit) != MD_MAJOR)
629 + unit &= ~((1<<MdpMinorShift)-1);
630 +
631 retry:
632 spin_lock(&all_mddevs_lock);
633
634 @@ -4611,6 +4614,7 @@ static int do_md_run(mddev_t *mddev)
635 }
636 set_capacity(mddev->gendisk, mddev->array_sectors);
637 revalidate_disk(mddev->gendisk);
638 + mddev->changed = 1;
639 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
640 out:
641 return err;
642 @@ -4699,6 +4703,7 @@ static void md_clean(mddev_t *mddev)
643 mddev->sync_speed_min = mddev->sync_speed_max = 0;
644 mddev->recovery = 0;
645 mddev->in_sync = 0;
646 + mddev->changed = 0;
647 mddev->degraded = 0;
648 mddev->safemode = 0;
649 mddev->bitmap_info.offset = 0;
650 @@ -4808,6 +4813,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
651
652 set_capacity(disk, 0);
653 mutex_unlock(&mddev->open_mutex);
654 + mddev->changed = 1;
655 revalidate_disk(disk);
656
657 if (mddev->ro)
658 @@ -5991,7 +5997,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
659 atomic_inc(&mddev->openers);
660 mutex_unlock(&mddev->open_mutex);
661
662 - check_disk_size_change(mddev->gendisk, bdev);
663 + check_disk_change(bdev);
664 out:
665 return err;
666 }
667 @@ -6006,6 +6012,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
668
669 return 0;
670 }
671 +
672 +static int md_media_changed(struct gendisk *disk)
673 +{
674 + mddev_t *mddev = disk->private_data;
675 +
676 + return mddev->changed;
677 +}
678 +
679 +static int md_revalidate(struct gendisk *disk)
680 +{
681 + mddev_t *mddev = disk->private_data;
682 +
683 + mddev->changed = 0;
684 + return 0;
685 +}
686 static const struct block_device_operations md_fops =
687 {
688 .owner = THIS_MODULE,
689 @@ -6016,6 +6037,8 @@ static const struct block_device_operations md_fops =
690 .compat_ioctl = md_compat_ioctl,
691 #endif
692 .getgeo = md_getgeo,
693 + .media_changed = md_media_changed,
694 + .revalidate_disk= md_revalidate,
695 };
696
697 static int md_thread(void * arg)
698 diff --git a/drivers/md/md.h b/drivers/md/md.h
699 index a161283..d271a5e 100644
700 --- a/drivers/md/md.h
701 +++ b/drivers/md/md.h
702 @@ -270,6 +270,8 @@ struct mddev_s
703 atomic_t active; /* general refcount */
704 atomic_t openers; /* number of active opens */
705
706 + int changed; /* True if we might need to
707 + * reread partition info */
708 int degraded; /* whether md should consider
709 * adding a spare
710 */
711 diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
712 index 6d7ddf3..3a62d44 100644
713 --- a/drivers/md/multipath.c
714 +++ b/drivers/md/multipath.c
715 @@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
716 * bookkeeping area. [whatever we allocate in multipath_run(),
717 * should be freed in multipath_stop()]
718 */
719 - mddev->queue->queue_lock = &mddev->queue->__queue_lock;
720
721 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
722 mddev->private = conf;
723 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
724 index a39f4c3..734d943 100644
725 --- a/drivers/md/raid0.c
726 +++ b/drivers/md/raid0.c
727 @@ -353,7 +353,6 @@ static int raid0_run(mddev_t *mddev)
728 if (md_check_no_bitmap(mddev))
729 return -EINVAL;
730 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
731 - mddev->queue->queue_lock = &mddev->queue->__queue_lock;
732
733 /* if private is not null, we are here after takeover */
734 if (mddev->private == NULL) {
735 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
736 index 845cf95..2d9855c 100644
737 --- a/drivers/md/raid1.c
738 +++ b/drivers/md/raid1.c
739 @@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
740 if (conf->pending_bio_list.head) {
741 struct bio *bio;
742 bio = bio_list_get(&conf->pending_bio_list);
743 + /* Only take the spinlock to quiet a warning */
744 + spin_lock(conf->mddev->queue->queue_lock);
745 blk_remove_plug(conf->mddev->queue);
746 + spin_unlock(conf->mddev->queue->queue_lock);
747 spin_unlock_irq(&conf->device_lock);
748 /* flush any pending bitmap writes to
749 * disk before proceeding w/ I/O */
750 @@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
751 atomic_inc(&r1_bio->remaining);
752 spin_lock_irqsave(&conf->device_lock, flags);
753 bio_list_add(&conf->pending_bio_list, mbio);
754 - blk_plug_device(mddev->queue);
755 + blk_plug_device_unlocked(mddev->queue);
756 spin_unlock_irqrestore(&conf->device_lock, flags);
757 }
758 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
759 @@ -2024,7 +2027,6 @@ static int run(mddev_t *mddev)
760 if (IS_ERR(conf))
761 return PTR_ERR(conf);
762
763 - mddev->queue->queue_lock = &conf->device_lock;
764 list_for_each_entry(rdev, &mddev->disks, same_set) {
765 disk_stack_limits(mddev->gendisk, rdev->bdev,
766 rdev->data_offset << 9);
767 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
768 index 0641674..f51d713 100644
769 --- a/drivers/md/raid10.c
770 +++ b/drivers/md/raid10.c
771 @@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
772 if (conf->pending_bio_list.head) {
773 struct bio *bio;
774 bio = bio_list_get(&conf->pending_bio_list);
775 + /* Spinlock only taken to quiet a warning */
776 + spin_lock(conf->mddev->queue->queue_lock);
777 blk_remove_plug(conf->mddev->queue);
778 + spin_unlock(conf->mddev->queue->queue_lock);
779 spin_unlock_irq(&conf->device_lock);
780 /* flush any pending bitmap writes to disk
781 * before proceeding w/ I/O */
782 @@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
783 atomic_inc(&r10_bio->remaining);
784 spin_lock_irqsave(&conf->device_lock, flags);
785 bio_list_add(&conf->pending_bio_list, mbio);
786 - blk_plug_device(mddev->queue);
787 + blk_plug_device_unlocked(mddev->queue);
788 spin_unlock_irqrestore(&conf->device_lock, flags);
789 }
790
791 @@ -2303,8 +2306,6 @@ static int run(mddev_t *mddev)
792 if (!conf)
793 goto out;
794
795 - mddev->queue->queue_lock = &conf->device_lock;
796 -
797 mddev->thread = conf->thread;
798 conf->thread = NULL;
799
800 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
801 index dc574f3..30f0c0a 100644
802 --- a/drivers/md/raid5.c
803 +++ b/drivers/md/raid5.c
804 @@ -5205,7 +5205,6 @@ static int run(mddev_t *mddev)
805
806 mddev->queue->backing_dev_info.congested_data = mddev;
807 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
808 - mddev->queue->queue_lock = &conf->device_lock;
809 mddev->queue->unplug_fn = raid5_unplug_queue;
810
811 chunk_size = mddev->chunk_sectors << 9;
812 diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
813 index b4931ab..31882f6 100644
814 --- a/drivers/mfd/tps6586x.c
815 +++ b/drivers/mfd/tps6586x.c
816 @@ -152,12 +152,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
817 static inline int __tps6586x_writes(struct i2c_client *client, int reg,
818 int len, uint8_t *val)
819 {
820 - int ret;
821 + int ret, i;
822
823 - ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
824 - if (ret < 0) {
825 - dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
826 - return ret;
827 + for (i = 0; i < len; i++) {
828 + ret = __tps6586x_write(client, reg + i, *(val + i));
829 + if (ret < 0)
830 + return ret;
831 }
832
833 return 0;
834 diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
835 index 000cb41..92b85e2 100644
836 --- a/drivers/mfd/ucb1x00-ts.c
837 +++ b/drivers/mfd/ucb1x00-ts.c
838 @@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
839 idev->close = ucb1x00_ts_close;
840
841 __set_bit(EV_ABS, idev->evbit);
842 - __set_bit(ABS_X, idev->absbit);
843 - __set_bit(ABS_Y, idev->absbit);
844 - __set_bit(ABS_PRESSURE, idev->absbit);
845
846 input_set_drvdata(idev, ts);
847
848 + ucb1x00_adc_enable(ts->ucb);
849 + ts->x_res = ucb1x00_ts_read_xres(ts);
850 + ts->y_res = ucb1x00_ts_read_yres(ts);
851 + ucb1x00_adc_disable(ts->ucb);
852 +
853 + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
854 + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
855 + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
856 +
857 err = input_register_device(idev);
858 if (err)
859 goto fail;
860 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
861 index e3374d9..94754f2 100644
862 --- a/drivers/net/e1000e/ich8lan.c
863 +++ b/drivers/net/e1000e/ich8lan.c
864 @@ -338,12 +338,17 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
865 }
866
867 phy->id = e1000_phy_unknown;
868 - ret_val = e1000e_get_phy_id(hw);
869 - if (ret_val)
870 - goto out;
871 - if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
872 + switch (hw->mac.type) {
873 + default:
874 + ret_val = e1000e_get_phy_id(hw);
875 + if (ret_val)
876 + goto out;
877 + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
878 + break;
879 + /* fall-through */
880 + case e1000_pch2lan:
881 /*
882 - * In case the PHY needs to be in mdio slow mode (eg. 82577),
883 + * In case the PHY needs to be in mdio slow mode,
884 * set slow mode and try to get the PHY id again.
885 */
886 ret_val = e1000_set_mdio_slow_mode_hv(hw);
887 @@ -352,6 +357,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
888 ret_val = e1000e_get_phy_id(hw);
889 if (ret_val)
890 goto out;
891 + break;
892 }
893 phy->type = e1000e_get_phy_type_from_id(phy->id);
894
895 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
896 index c4ca162..a69230d 100644
897 --- a/drivers/net/e1000e/netdev.c
898 +++ b/drivers/net/e1000e/netdev.c
899 @@ -5884,7 +5884,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
900 /* APME bit in EEPROM is mapped to WUC.APME */
901 eeprom_data = er32(WUC);
902 eeprom_apme_mask = E1000_WUC_APME;
903 - if (eeprom_data & E1000_WUC_PHY_WAKE)
904 + if ((hw->mac.type > e1000_ich10lan) &&
905 + (eeprom_data & E1000_WUC_PHY_WAKE))
906 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
907 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
908 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
909 diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
910 index 3d3dc0c..5ce87a6 100644
911 --- a/drivers/net/e1000e/phy.c
912 +++ b/drivers/net/e1000e/phy.c
913 @@ -226,6 +226,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
914 }
915 *data = (u16) mdic;
916
917 + /*
918 + * Allow some time after each MDIC transaction to avoid
919 + * reading duplicate data in the next MDIC transaction.
920 + */
921 + if (hw->mac.type == e1000_pch2lan)
922 + udelay(100);
923 +
924 return 0;
925 }
926
927 @@ -279,6 +286,13 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
928 return -E1000_ERR_PHY;
929 }
930
931 + /*
932 + * Allow some time after each MDIC transaction to avoid
933 + * reading duplicate data in the next MDIC transaction.
934 + */
935 + if (hw->mac.type == e1000_pch2lan)
936 + udelay(100);
937 +
938 return 0;
939 }
940
941 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
942 index 53b13de..3f70a2f 100644
943 --- a/drivers/net/r8169.c
944 +++ b/drivers/net/r8169.c
945 @@ -24,6 +24,7 @@
946 #include <linux/init.h>
947 #include <linux/dma-mapping.h>
948 #include <linux/pm_runtime.h>
949 +#include <linux/pci-aspm.h>
950
951 #include <asm/system.h>
952 #include <asm/io.h>
953 @@ -3047,6 +3048,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
954 mii->reg_num_mask = 0x1f;
955 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
956
957 + /* disable ASPM completely as that cause random device stop working
958 + * problems as well as full system hangs for some PCIe devices users */
959 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
960 + PCIE_LINK_STATE_CLKPM);
961 +
962 /* enable device (incl. PCI PM wakeup and hotplug setup) */
963 rc = pci_enable_device(pdev);
964 if (rc < 0) {
965 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
966 index 6f97b7b..dcae19d 100644
967 --- a/drivers/net/tg3.c
968 +++ b/drivers/net/tg3.c
969 @@ -11165,7 +11165,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
970 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
971 break; /* We have no PHY */
972
973 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
974 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
975 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
976 + !netif_running(dev)))
977 return -EAGAIN;
978
979 spin_lock_bh(&tp->lock);
980 @@ -11181,7 +11183,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
982 break; /* We have no PHY */
983
984 - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
985 + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
986 + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
987 + !netif_running(dev)))
988 return -EAGAIN;
989
990 spin_lock_bh(&tp->lock);
991 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
992 index 270671f..1587a82 100644
993 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
994 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
995 @@ -210,8 +210,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
996 struct tx_buf *tx_buf = NULL;
997 struct sk_buff *nskb = NULL;
998 int ret = 0, i;
999 - u16 *hdr, tx_skb_cnt = 0;
1000 + u16 tx_skb_cnt = 0;
1001 u8 *buf;
1002 + __le16 *hdr;
1003
1004 if (hif_dev->tx.tx_skb_cnt == 0)
1005 return 0;
1006 @@ -236,9 +237,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
1007
1008 buf = tx_buf->buf;
1009 buf += tx_buf->offset;
1010 - hdr = (u16 *)buf;
1011 - *hdr++ = nskb->len;
1012 - *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
1013 + hdr = (__le16 *)buf;
1014 + *hdr++ = cpu_to_le16(nskb->len);
1015 + *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
1016 buf += 4;
1017 memcpy(buf, nskb->data, nskb->len);
1018 tx_buf->len = nskb->len + 4;
1019 diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
1020 index 7504ed1..7f8eb31 100644
1021 --- a/drivers/net/wireless/ath/carl9170/usb.c
1022 +++ b/drivers/net/wireless/ath/carl9170/usb.c
1023 @@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
1024 { USB_DEVICE(0x057c, 0x8402) },
1025 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
1026 { USB_DEVICE(0x1668, 0x1200) },
1027 + /* Airlive X.USB a/b/g/n */
1028 + { USB_DEVICE(0x1b75, 0x9170) },
1029
1030 /* terminate */
1031 {}
1032 diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
1033 index 1eacba4..0494d7b 100644
1034 --- a/drivers/net/wireless/p54/p54pci.c
1035 +++ b/drivers/net/wireless/p54/p54pci.c
1036 @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
1037 while (i != idx) {
1038 u16 len;
1039 struct sk_buff *skb;
1040 + dma_addr_t dma_addr;
1041 desc = &ring[i];
1042 len = le16_to_cpu(desc->len);
1043 skb = rx_buf[i];
1044 @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
1045
1046 len = priv->common.rx_mtu;
1047 }
1048 + dma_addr = le32_to_cpu(desc->host_addr);
1049 + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
1050 + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
1051 skb_put(skb, len);
1052
1053 if (p54_rx(dev, skb)) {
1054 - pci_unmap_single(priv->pdev,
1055 - le32_to_cpu(desc->host_addr),
1056 - priv->common.rx_mtu + 32,
1057 - PCI_DMA_FROMDEVICE);
1058 + pci_unmap_single(priv->pdev, dma_addr,
1059 + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
1060 rx_buf[i] = NULL;
1061 - desc->host_addr = 0;
1062 + desc->host_addr = cpu_to_le32(0);
1063 } else {
1064 skb_trim(skb, 0);
1065 + pci_dma_sync_single_for_device(priv->pdev, dma_addr,
1066 + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
1067 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
1068 }
1069
1070 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
1071 index 2325e56..3576be5 100644
1072 --- a/drivers/net/wireless/p54/p54usb.c
1073 +++ b/drivers/net/wireless/p54/p54usb.c
1074 @@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
1075 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
1076 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
1077 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
1078 + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
1079 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
1080 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
1081 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
1082 diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
1083 index 5706355..16413f8 100644
1084 --- a/drivers/rtc/rtc-ds3232.c
1085 +++ b/drivers/rtc/rtc-ds3232.c
1086 @@ -1,7 +1,7 @@
1087 /*
1088 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
1089 *
1090 - * Copyright (C) 2009-2010 Freescale Semiconductor.
1091 + * Copyright (C) 2009-2011 Freescale Semiconductor.
1092 * Author: Jack Lan <jack.lan@freescale.com>
1093 *
1094 * This program is free software; you can redistribute it and/or modify it
1095 @@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
1096 time->tm_hour = bcd2bin(hour);
1097 }
1098
1099 - time->tm_wday = bcd2bin(week);
1100 + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
1101 + time->tm_wday = bcd2bin(week) - 1;
1102 time->tm_mday = bcd2bin(day);
1103 - time->tm_mon = bcd2bin(month & 0x7F);
1104 + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
1105 + time->tm_mon = bcd2bin(month & 0x7F) - 1;
1106 if (century)
1107 add_century = 100;
1108
1109 @@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
1110 buf[0] = bin2bcd(time->tm_sec);
1111 buf[1] = bin2bcd(time->tm_min);
1112 buf[2] = bin2bcd(time->tm_hour);
1113 - buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
1114 + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
1115 + buf[3] = bin2bcd(time->tm_wday + 1);
1116 buf[4] = bin2bcd(time->tm_mday); /* Date */
1117 - buf[5] = bin2bcd(time->tm_mon);
1118 + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
1119 + buf[5] = bin2bcd(time->tm_mon + 1);
1120 if (time->tm_year >= 100) {
1121 buf[5] |= 0x80;
1122 buf[6] = bin2bcd(time->tm_year - 100);
1123 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1124 index 4a38422..4095248 100644
1125 --- a/drivers/scsi/scsi_lib.c
1126 +++ b/drivers/scsi/scsi_lib.c
1127 @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
1128 &sdev->request_queue->queue_flags);
1129 if (flagset)
1130 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
1131 - __blk_run_queue(sdev->request_queue);
1132 + __blk_run_queue(sdev->request_queue, false);
1133 if (flagset)
1134 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
1135 spin_unlock(sdev->request_queue->queue_lock);
1136 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
1137 index 998c01b..5c3ccfc 100644
1138 --- a/drivers/scsi/scsi_transport_fc.c
1139 +++ b/drivers/scsi/scsi_transport_fc.c
1140 @@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
1141 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
1142 if (flagset)
1143 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
1144 - __blk_run_queue(rport->rqst_q);
1145 + __blk_run_queue(rport->rqst_q, false);
1146 if (flagset)
1147 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
1148 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
1149 diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
1150 index cb2041a..69f9687 100644
1151 --- a/drivers/staging/brcm80211/sys/wl_mac80211.c
1152 +++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
1153 @@ -353,9 +353,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
1154 switch (type) {
1155 case NL80211_CHAN_HT20:
1156 case NL80211_CHAN_NO_HT:
1157 - WL_LOCK(wl);
1158 err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
1159 - WL_UNLOCK(wl);
1160 break;
1161 case NL80211_CHAN_HT40MINUS:
1162 case NL80211_CHAN_HT40PLUS:
1163 @@ -376,6 +374,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
1164 int err = 0;
1165 int new_int;
1166
1167 + WL_LOCK(wl);
1168 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1169 WL_NONE(("%s: Setting listen interval to %d\n",
1170 __func__, conf->listen_interval));
1171 @@ -431,6 +430,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
1172 }
1173
1174 config_out:
1175 + WL_UNLOCK(wl);
1176 return err;
1177 }
1178
1179 @@ -559,14 +559,20 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1180
1181 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
1182 {
1183 + struct wl_info *wl = hw->priv;
1184 WL_NONE(("Scan Start\n"));
1185 - return;
1186 + WL_LOCK(wl);
1187 + wlc_scan_start(wl->wlc);
1188 + WL_UNLOCK(wl);
1189 }
1190
1191 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
1192 {
1193 + struct wl_info *wl = hw->priv;
1194 WL_NONE(("Scan Complete\n"));
1195 - return;
1196 + WL_LOCK(wl);
1197 + wlc_scan_stop(wl->wlc);
1198 + WL_UNLOCK(wl);
1199 }
1200
1201 static void wl_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1202 diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
1203 index feaffcc..1671cc0 100644
1204 --- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
1205 +++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
1206 @@ -5336,7 +5336,6 @@ wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw)
1207 fifo = prio2fifo[prio];
1208
1209 ASSERT((uint) PKTHEADROOM(sdu) >= TXOFF);
1210 - ASSERT(!PKTSHARED(sdu));
1211 ASSERT(!PKTNEXT(sdu));
1212 ASSERT(!PKTLINK(sdu));
1213 ASSERT(fifo < NFIFO);
1214 @@ -8673,3 +8672,16 @@ static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi)
1215
1216 kfree(qi);
1217 }
1218 +
1219 +/*
1220 + * Flag 'scan in progress' to withold dynamic phy calibration
1221 + */
1222 +void wlc_scan_start(struct wlc_info *wlc)
1223 +{
1224 + wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
1225 +}
1226 +
1227 +void wlc_scan_stop(struct wlc_info *wlc)
1228 +{
1229 + wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
1230 +}
1231 diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
1232 index a6a8c33..b66723b 100644
1233 --- a/drivers/staging/brcm80211/sys/wlc_pub.h
1234 +++ b/drivers/staging/brcm80211/sys/wlc_pub.h
1235 @@ -568,6 +568,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
1236 extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
1237 extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
1238 extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
1239 +extern void wlc_scan_start(struct wlc_info *wlc);
1240 +extern void wlc_scan_stop(struct wlc_info *wlc);
1241
1242 static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
1243 uint *arg)
1244 diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
1245 index cd25b24..fd274e9 100644
1246 --- a/drivers/staging/comedi/drivers/mite.c
1247 +++ b/drivers/staging/comedi/drivers/mite.c
1248 @@ -61,8 +61,6 @@
1249 #define PCI_DAQ_SIZE 4096
1250 #define PCI_DAQ_SIZE_660X 8192
1251
1252 -MODULE_LICENSE("GPL");
1253 -
1254 struct mite_struct *mite_devices;
1255 EXPORT_SYMBOL(mite_devices);
1256
1257 diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
1258 index 14e716e..54741c9 100644
1259 --- a/drivers/staging/comedi/drivers/ni_6527.c
1260 +++ b/drivers/staging/comedi/drivers/ni_6527.c
1261 @@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
1262
1263 module_init(driver_ni6527_init_module);
1264 module_exit(driver_ni6527_cleanup_module);
1265 +
1266 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1267 +MODULE_DESCRIPTION("Comedi low-level driver");
1268 +MODULE_LICENSE("GPL");
1269 diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
1270 index 8b8e2aa..403fc09 100644
1271 --- a/drivers/staging/comedi/drivers/ni_65xx.c
1272 +++ b/drivers/staging/comedi/drivers/ni_65xx.c
1273 @@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
1274
1275 module_init(driver_ni_65xx_init_module);
1276 module_exit(driver_ni_65xx_cleanup_module);
1277 +
1278 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1279 +MODULE_DESCRIPTION("Comedi low-level driver");
1280 +MODULE_LICENSE("GPL");
1281 diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
1282 index 6612b08..ca2aeaa 100644
1283 --- a/drivers/staging/comedi/drivers/ni_660x.c
1284 +++ b/drivers/staging/comedi/drivers/ni_660x.c
1285 @@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
1286 };
1287 return 0;
1288 }
1289 +
1290 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1291 +MODULE_DESCRIPTION("Comedi low-level driver");
1292 +MODULE_LICENSE("GPL");
1293 diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
1294 index e9f034e..d8d91f9 100644
1295 --- a/drivers/staging/comedi/drivers/ni_670x.c
1296 +++ b/drivers/staging/comedi/drivers/ni_670x.c
1297 @@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
1298 mite_list_devices();
1299 return -EIO;
1300 }
1301 +
1302 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1303 +MODULE_DESCRIPTION("Comedi low-level driver");
1304 +MODULE_LICENSE("GPL");
1305 diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
1306 index 84a15c3..005d2fe 100644
1307 --- a/drivers/staging/comedi/drivers/ni_pcidio.c
1308 +++ b/drivers/staging/comedi/drivers/ni_pcidio.c
1309 @@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
1310
1311 module_init(driver_pcidio_init_module);
1312 module_exit(driver_pcidio_cleanup_module);
1313 +
1314 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1315 +MODULE_DESCRIPTION("Comedi low-level driver");
1316 +MODULE_LICENSE("GPL");
1317 diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
1318 index 23a3812..9148abd 100644
1319 --- a/drivers/staging/comedi/drivers/ni_pcimio.c
1320 +++ b/drivers/staging/comedi/drivers/ni_pcimio.c
1321 @@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
1322
1323 return 0;
1324 }
1325 +
1326 +MODULE_AUTHOR("Comedi http://www.comedi.org");
1327 +MODULE_DESCRIPTION("Comedi low-level driver");
1328 +MODULE_LICENSE("GPL");
1329 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
1330 index 41a1fe5..afc3b1a 100644
1331 --- a/drivers/staging/usbip/vhci.h
1332 +++ b/drivers/staging/usbip/vhci.h
1333 @@ -100,9 +100,6 @@ struct vhci_hcd {
1334 * But, the index of this array begins from 0.
1335 */
1336 struct vhci_device vdev[VHCI_NPORTS];
1337 -
1338 - /* vhci_device which has not been assiged its address yet */
1339 - int pending_port;
1340 };
1341
1342
1343 @@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport);
1344 void vhci_rx_loop(struct usbip_task *ut);
1345 void vhci_tx_loop(struct usbip_task *ut);
1346
1347 +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
1348 + __u32 seqnum);
1349 +
1350 #define hardware (&the_controller->pdev.dev)
1351
1352 static inline struct vhci_device *port_to_vdev(__u32 port)
1353 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
1354 index 08bd26a..a35fe61 100644
1355 --- a/drivers/staging/usbip/vhci_hcd.c
1356 +++ b/drivers/staging/usbip/vhci_hcd.c
1357 @@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
1358 * the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
1359 * spin_unlock(&the_controller->vdev[rhport].ud.lock); */
1360
1361 - the_controller->pending_port = rhport;
1362 -
1363 spin_unlock_irqrestore(&the_controller->lock, flags);
1364
1365 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
1366 @@ -559,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1367 struct device *dev = &urb->dev->dev;
1368 int ret = 0;
1369 unsigned long flags;
1370 + struct vhci_device *vdev;
1371
1372 usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
1373 hcd, urb, mem_flags);
1374 @@ -574,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1375 return urb->status;
1376 }
1377
1378 + vdev = port_to_vdev(urb->dev->portnum-1);
1379 +
1380 + /* refuse enqueue for dead connection */
1381 + spin_lock(&vdev->ud.lock);
1382 + if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
1383 + usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
1384 + spin_unlock(&vdev->ud.lock);
1385 + spin_unlock_irqrestore(&the_controller->lock, flags);
1386 + return -ENODEV;
1387 + }
1388 + spin_unlock(&vdev->ud.lock);
1389 +
1390 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1391 if (ret)
1392 goto no_need_unlink;
1393 @@ -592,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1394 __u8 type = usb_pipetype(urb->pipe);
1395 struct usb_ctrlrequest *ctrlreq =
1396 (struct usb_ctrlrequest *) urb->setup_packet;
1397 - struct vhci_device *vdev =
1398 - port_to_vdev(the_controller->pending_port);
1399
1400 if (type != PIPE_CONTROL || !ctrlreq) {
1401 dev_err(dev, "invalid request to devnum 0\n");
1402 @@ -607,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1403 dev_info(dev, "SetAddress Request (%d) to port %d\n",
1404 ctrlreq->wValue, vdev->rhport);
1405
1406 - vdev->udev = urb->dev;
1407 + if (vdev->udev)
1408 + usb_put_dev(vdev->udev);
1409 + vdev->udev = usb_get_dev(urb->dev);
1410
1411 spin_lock(&vdev->ud.lock);
1412 vdev->ud.status = VDEV_ST_USED;
1413 @@ -627,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1414 "Get_Descriptor to device 0 "
1415 "(get max pipe size)\n");
1416
1417 - /* FIXME: reference count? (usb_get_dev()) */
1418 - vdev->udev = urb->dev;
1419 + if (vdev->udev)
1420 + usb_put_dev(vdev->udev);
1421 + vdev->udev = usb_get_dev(urb->dev);
1422 goto out;
1423
1424 default:
1425 @@ -805,7 +817,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1426 return 0;
1427 }
1428
1429 -
1430 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
1431 {
1432 struct vhci_unlink *unlink, *tmp;
1433 @@ -813,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
1434 spin_lock(&vdev->priv_lock);
1435
1436 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
1437 + usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
1438 list_del(&unlink->list);
1439 kfree(unlink);
1440 }
1441
1442 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
1443 + struct urb *urb;
1444 +
1445 + /* give back URB of unanswered unlink request */
1446 + usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
1447 +
1448 + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
1449 + if (!urb) {
1450 + usbip_uinfo("the urb (seqnum %lu) was already given back\n",
1451 + unlink->unlink_seqnum);
1452 + list_del(&unlink->list);
1453 + kfree(unlink);
1454 + continue;
1455 + }
1456 +
1457 + urb->status = -ENODEV;
1458 +
1459 + spin_lock(&the_controller->lock);
1460 + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
1461 + spin_unlock(&the_controller->lock);
1462 +
1463 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
1464 +
1465 list_del(&unlink->list);
1466 kfree(unlink);
1467 }
1468 @@ -887,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud)
1469 vdev->speed = 0;
1470 vdev->devid = 0;
1471
1472 + if (vdev->udev)
1473 + usb_put_dev(vdev->udev);
1474 + vdev->udev = NULL;
1475 +
1476 ud->tcp_socket = NULL;
1477
1478 ud->status = VDEV_ST_NULL;
1479 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
1480 index 8147d72..bdbedd2 100644
1481 --- a/drivers/staging/usbip/vhci_rx.c
1482 +++ b/drivers/staging/usbip/vhci_rx.c
1483 @@ -23,16 +23,14 @@
1484 #include "vhci.h"
1485
1486
1487 -/* get URB from transmitted urb queue */
1488 -static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
1489 +/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
1490 +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
1491 __u32 seqnum)
1492 {
1493 struct vhci_priv *priv, *tmp;
1494 struct urb *urb = NULL;
1495 int status;
1496
1497 - spin_lock(&vdev->priv_lock);
1498 -
1499 list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
1500 if (priv->seqnum == seqnum) {
1501 urb = priv->urb;
1502 @@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
1503 }
1504 }
1505
1506 - spin_unlock(&vdev->priv_lock);
1507 -
1508 return urb;
1509 }
1510
1511 @@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1512 struct usbip_device *ud = &vdev->ud;
1513 struct urb *urb;
1514
1515 + spin_lock(&vdev->priv_lock);
1516
1517 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
1518
1519 + spin_unlock(&vdev->priv_lock);
1520
1521 if (!urb) {
1522 usbip_uerr("cannot find a urb of seqnum %u\n",
1523 @@ -161,7 +159,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1524 return;
1525 }
1526
1527 + spin_lock(&vdev->priv_lock);
1528 +
1529 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
1530 +
1531 + spin_unlock(&vdev->priv_lock);
1532 +
1533 if (!urb) {
1534 /*
1535 * I get the result of a unlink request. But, it seems that I
1536 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1537 index 32d0ad2..e8fa0dd 100644
1538 --- a/drivers/usb/core/hub.c
1539 +++ b/drivers/usb/core/hub.c
1540 @@ -2672,17 +2672,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
1541
1542 mutex_lock(&usb_address0_mutex);
1543
1544 - if (!udev->config && oldspeed == USB_SPEED_SUPER) {
1545 - /* Don't reset USB 3.0 devices during an initial setup */
1546 - usb_set_device_state(udev, USB_STATE_DEFAULT);
1547 - } else {
1548 - /* Reset the device; full speed may morph to high speed */
1549 - /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
1550 - retval = hub_port_reset(hub, port1, udev, delay);
1551 - if (retval < 0) /* error or disconnect */
1552 - goto fail;
1553 - /* success, speed is known */
1554 - }
1555 + /* Reset the device; full speed may morph to high speed */
1556 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
1557 + retval = hub_port_reset(hub, port1, udev, delay);
1558 + if (retval < 0) /* error or disconnect */
1559 + goto fail;
1560 + /* success, speed is known */
1561 +
1562 retval = -ENODEV;
1563
1564 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
1565 @@ -2744,6 +2740,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
1566 udev->ttport = hdev->ttport;
1567 } else if (udev->speed != USB_SPEED_HIGH
1568 && hdev->speed == USB_SPEED_HIGH) {
1569 + if (!hub->tt.hub) {
1570 + dev_err(&udev->dev, "parent hub has no TT\n");
1571 + retval = -EINVAL;
1572 + goto fail;
1573 + }
1574 udev->tt = &hub->tt;
1575 udev->ttport = port1;
1576 }
1577 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1578 index 25719da..1848420 100644
1579 --- a/drivers/usb/core/quirks.c
1580 +++ b/drivers/usb/core/quirks.c
1581 @@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1582 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
1583 USB_QUIRK_CONFIG_INTF_STRINGS },
1584
1585 + /* Samsung Android phone modem - ID conflict with SPH-I500 */
1586 + { USB_DEVICE(0x04e8, 0x6601), .driver_info =
1587 + USB_QUIRK_CONFIG_INTF_STRINGS },
1588 +
1589 /* Roland SC-8820 */
1590 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
1591
1592 @@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1593 /* M-Systems Flash Disk Pioneers */
1594 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
1595
1596 + /* Keytouch QWERTY Panel keyboard */
1597 + { USB_DEVICE(0x0926, 0x3333), .driver_info =
1598 + USB_QUIRK_CONFIG_INTF_STRINGS },
1599 +
1600 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
1601 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
1602
1603 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1604 index 62c70c2..09bb3c9 100644
1605 --- a/drivers/usb/host/xhci-ring.c
1606 +++ b/drivers/usb/host/xhci-ring.c
1607 @@ -479,8 +479,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1608 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
1609 dev->eps[ep_index].stopped_trb,
1610 &state->new_cycle_state);
1611 - if (!state->new_deq_seg)
1612 - BUG();
1613 + if (!state->new_deq_seg) {
1614 + WARN_ON(1);
1615 + return;
1616 + }
1617 +
1618 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
1619 xhci_dbg(xhci, "Finding endpoint context\n");
1620 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1621 @@ -491,8 +494,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1622 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
1623 state->new_deq_ptr,
1624 &state->new_cycle_state);
1625 - if (!state->new_deq_seg)
1626 - BUG();
1627 + if (!state->new_deq_seg) {
1628 + WARN_ON(1);
1629 + return;
1630 + }
1631
1632 trb = &state->new_deq_ptr->generic;
1633 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
1634 @@ -2369,12 +2374,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1635
1636 /* Scatter gather list entries may cross 64KB boundaries */
1637 running_total = TRB_MAX_BUFF_SIZE -
1638 - (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1639 + (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
1640 + running_total &= TRB_MAX_BUFF_SIZE - 1;
1641 if (running_total != 0)
1642 num_trbs++;
1643
1644 /* How many more 64KB chunks to transfer, how many more TRBs? */
1645 - while (running_total < sg_dma_len(sg)) {
1646 + while (running_total < sg_dma_len(sg) && running_total < temp) {
1647 num_trbs++;
1648 running_total += TRB_MAX_BUFF_SIZE;
1649 }
1650 @@ -2399,11 +2405,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1651 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1652 {
1653 if (num_trbs != 0)
1654 - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
1655 + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
1656 "TRBs, %d left\n", __func__,
1657 urb->ep->desc.bEndpointAddress, num_trbs);
1658 if (running_total != urb->transfer_buffer_length)
1659 - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
1660 + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
1661 "queued %#x (%d), asked for %#x (%d)\n",
1662 __func__,
1663 urb->ep->desc.bEndpointAddress,
1664 @@ -2535,8 +2541,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1665 sg = urb->sg;
1666 addr = (u64) sg_dma_address(sg);
1667 this_sg_len = sg_dma_len(sg);
1668 - trb_buff_len = TRB_MAX_BUFF_SIZE -
1669 - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1670 + trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
1671 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1672 if (trb_buff_len > urb->transfer_buffer_length)
1673 trb_buff_len = urb->transfer_buffer_length;
1674 @@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1675 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1676 (unsigned int) addr + trb_buff_len);
1677 if (TRB_MAX_BUFF_SIZE -
1678 - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
1679 + (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
1680 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
1681 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
1682 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1683 @@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1684 }
1685
1686 trb_buff_len = TRB_MAX_BUFF_SIZE -
1687 - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1688 + (addr & (TRB_MAX_BUFF_SIZE - 1));
1689 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1690 if (running_total + trb_buff_len > urb->transfer_buffer_length)
1691 trb_buff_len =
1692 @@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1693 num_trbs = 0;
1694 /* How much data is (potentially) left before the 64KB boundary? */
1695 running_total = TRB_MAX_BUFF_SIZE -
1696 - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1697 + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
1698 + running_total &= TRB_MAX_BUFF_SIZE - 1;
1699
1700 /* If there's some data on this 64KB chunk, or we have to send a
1701 * zero-length transfer, we need at least one TRB
1702 @@ -2699,8 +2705,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1703 /* How much data is in the first TRB? */
1704 addr = (u64) urb->transfer_dma;
1705 trb_buff_len = TRB_MAX_BUFF_SIZE -
1706 - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1707 - if (urb->transfer_buffer_length < trb_buff_len)
1708 + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
1709 + if (trb_buff_len > urb->transfer_buffer_length)
1710 trb_buff_len = urb->transfer_buffer_length;
1711
1712 first_trb = true;
1713 @@ -2872,8 +2878,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
1714 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
1715 td_len = urb->iso_frame_desc[i].length;
1716
1717 - running_total = TRB_MAX_BUFF_SIZE -
1718 - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1719 + running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
1720 + running_total &= TRB_MAX_BUFF_SIZE - 1;
1721 if (running_total != 0)
1722 num_trbs++;
1723
1724 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
1725 index 99beebc..826485b 100644
1726 --- a/drivers/usb/musb/musb_core.c
1727 +++ b/drivers/usb/musb/musb_core.c
1728 @@ -1880,6 +1880,7 @@ allocate_instance(struct device *dev,
1729 INIT_LIST_HEAD(&musb->out_bulk);
1730
1731 hcd->uses_new_polling = 1;
1732 + hcd->has_tt = 1;
1733
1734 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1735 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1736 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
1737 index ed618bd..be10d31 100644
1738 --- a/drivers/usb/musb/omap2430.c
1739 +++ b/drivers/usb/musb/omap2430.c
1740 @@ -317,6 +317,7 @@ static int musb_platform_resume(struct musb *musb)
1741
1742 int musb_platform_exit(struct musb *musb)
1743 {
1744 + del_timer_sync(&musb_idle_timer);
1745
1746 musb_platform_suspend(musb);
1747
1748 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1749 index 7481ff8..0457813 100644
1750 --- a/drivers/usb/serial/sierra.c
1751 +++ b/drivers/usb/serial/sierra.c
1752 @@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
1753 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
1754 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
1755 },
1756 + { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
1757 + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
1758 + },
1759 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
1760
1761 { }
1762 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
1763 index fbc9467..b14d082 100644
1764 --- a/drivers/usb/serial/usb_wwan.c
1765 +++ b/drivers/usb/serial/usb_wwan.c
1766 @@ -216,12 +216,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
1767 __func__, status, endpoint);
1768 } else {
1769 tty = tty_port_tty_get(&port->port);
1770 - if (urb->actual_length) {
1771 - tty_insert_flip_string(tty, data, urb->actual_length);
1772 - tty_flip_buffer_push(tty);
1773 - } else
1774 - dbg("%s: empty read urb received", __func__);
1775 - tty_kref_put(tty);
1776 + if (tty) {
1777 + if (urb->actual_length) {
1778 + tty_insert_flip_string(tty, data,
1779 + urb->actual_length);
1780 + tty_flip_buffer_push(tty);
1781 + } else
1782 + dbg("%s: empty read urb received", __func__);
1783 + tty_kref_put(tty);
1784 + }
1785
1786 /* Resubmit urb so we continue receiving */
1787 if (status != -ESHUTDOWN) {
1788 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
1789 index 15a5d89..1c11959 100644
1790 --- a/drivers/usb/serial/visor.c
1791 +++ b/drivers/usb/serial/visor.c
1792 @@ -27,6 +27,7 @@
1793 #include <linux/uaccess.h>
1794 #include <linux/usb.h>
1795 #include <linux/usb/serial.h>
1796 +#include <linux/usb/cdc.h>
1797 #include "visor.h"
1798
1799 /*
1800 @@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
1801
1802 dbg("%s", __func__);
1803
1804 + /*
1805 + * some Samsung Android phones in modem mode have the same ID
1806 + * as SPH-I500, but they are ACM devices, so dont bind to them
1807 + */
1808 + if (id->idVendor == SAMSUNG_VENDOR_ID &&
1809 + id->idProduct == SAMSUNG_SPH_I500_ID &&
1810 + serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
1811 + serial->dev->descriptor.bDeviceSubClass ==
1812 + USB_CDC_SUBCLASS_ACM)
1813 + return -ENODEV;
1814 +
1815 if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
1816 dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
1817 serial->dev->actconfig->desc.bConfigurationValue);
1818 diff --git a/fs/block_dev.c b/fs/block_dev.c
1819 index 4230252..be84b70 100644
1820 --- a/fs/block_dev.c
1821 +++ b/fs/block_dev.c
1822 @@ -1217,9 +1217,9 @@ EXPORT_SYMBOL(open_by_devnum);
1823 * when a disk has been changed -- either by a media change or online
1824 * resize.
1825 */
1826 -static void flush_disk(struct block_device *bdev)
1827 +static void flush_disk(struct block_device *bdev, bool kill_dirty)
1828 {
1829 - if (__invalidate_device(bdev)) {
1830 + if (__invalidate_device(bdev, kill_dirty)) {
1831 char name[BDEVNAME_SIZE] = "";
1832
1833 if (bdev->bd_disk)
1834 @@ -1256,7 +1256,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1835 "%s: detected capacity change from %lld to %lld\n",
1836 name, bdev_size, disk_size);
1837 i_size_write(bdev->bd_inode, disk_size);
1838 - flush_disk(bdev);
1839 + flush_disk(bdev, false);
1840 }
1841 }
1842 EXPORT_SYMBOL(check_disk_size_change);
1843 @@ -1308,7 +1308,7 @@ int check_disk_change(struct block_device *bdev)
1844 if (!bdops->media_changed(bdev->bd_disk))
1845 return 0;
1846
1847 - flush_disk(bdev);
1848 + flush_disk(bdev, true);
1849 if (bdops->revalidate_disk)
1850 bdops->revalidate_disk(bdev->bd_disk);
1851 return 1;
1852 @@ -1776,7 +1776,7 @@ void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
1853
1854 EXPORT_SYMBOL(close_bdev_exclusive);
1855
1856 -int __invalidate_device(struct block_device *bdev)
1857 +int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1858 {
1859 struct super_block *sb = get_super(bdev);
1860 int res = 0;
1861 @@ -1789,7 +1789,7 @@ int __invalidate_device(struct block_device *bdev)
1862 * hold).
1863 */
1864 shrink_dcache_sb(sb);
1865 - res = invalidate_inodes(sb);
1866 + res = invalidate_inodes(sb, kill_dirty);
1867 drop_super(sb);
1868 }
1869 invalidate_bdev(bdev);
1870 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
1871 index 7daf1e6..be029a4 100644
1872 --- a/fs/eventpoll.c
1873 +++ b/fs/eventpoll.c
1874 @@ -63,6 +63,13 @@
1875 * cleanup path and it is also acquired by eventpoll_release_file()
1876 * if a file has been pushed inside an epoll set and it is then
1877 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
1878 + * It is also acquired when inserting an epoll fd onto another epoll
1879 + * fd. We do this so that we walk the epoll tree and ensure that this
1880 + * insertion does not create a cycle of epoll file descriptors, which
1881 + * could lead to deadlock. We need a global mutex to prevent two
1882 + * simultaneous inserts (A into B and B into A) from racing and
1883 + * constructing a cycle without either insert observing that it is
1884 + * going to.
1885 * It is possible to drop the "ep->mtx" and to use the global
1886 * mutex "epmutex" (together with "ep->lock") to have it working,
1887 * but having "ep->mtx" will make the interface more scalable.
1888 @@ -224,6 +231,9 @@ static int max_user_watches __read_mostly;
1889 */
1890 static DEFINE_MUTEX(epmutex);
1891
1892 +/* Used to check for epoll file descriptor inclusion loops */
1893 +static struct nested_calls poll_loop_ncalls;
1894 +
1895 /* Used for safe wake up implementation */
1896 static struct nested_calls poll_safewake_ncalls;
1897
1898 @@ -1195,6 +1205,62 @@ retry:
1899 return res;
1900 }
1901
1902 +/**
1903 + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1904 + * API, to verify that adding an epoll file inside another
1905 + * epoll structure, does not violate the constraints, in
1906 + * terms of closed loops, or too deep chains (which can
1907 + * result in excessive stack usage).
1908 + *
1909 + * @priv: Pointer to the epoll file to be currently checked.
1910 + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1911 + * data structure pointer.
1912 + * @call_nests: Current dept of the @ep_call_nested() call stack.
1913 + *
1914 + * Returns: Returns zero if adding the epoll @file inside current epoll
1915 + * structure @ep does not violate the constraints, or -1 otherwise.
1916 + */
1917 +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1918 +{
1919 + int error = 0;
1920 + struct file *file = priv;
1921 + struct eventpoll *ep = file->private_data;
1922 + struct rb_node *rbp;
1923 + struct epitem *epi;
1924 +
1925 + mutex_lock(&ep->mtx);
1926 + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1927 + epi = rb_entry(rbp, struct epitem, rbn);
1928 + if (unlikely(is_file_epoll(epi->ffd.file))) {
1929 + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1930 + ep_loop_check_proc, epi->ffd.file,
1931 + epi->ffd.file->private_data, current);
1932 + if (error != 0)
1933 + break;
1934 + }
1935 + }
1936 + mutex_unlock(&ep->mtx);
1937 +
1938 + return error;
1939 +}
1940 +
1941 +/**
1942 + * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1943 + * another epoll file (represented by @ep) does not create
1944 + * closed loops or too deep chains.
1945 + *
1946 + * @ep: Pointer to the epoll private data structure.
1947 + * @file: Pointer to the epoll file to be checked.
1948 + *
1949 + * Returns: Returns zero if adding the epoll @file inside current epoll
1950 + * structure @ep does not violate the constraints, or -1 otherwise.
1951 + */
1952 +static int ep_loop_check(struct eventpoll *ep, struct file *file)
1953 +{
1954 + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1955 + ep_loop_check_proc, file, ep, current);
1956 +}
1957 +
1958 /*
1959 * Open an eventpoll file descriptor.
1960 */
1961 @@ -1243,6 +1309,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1962 struct epoll_event __user *, event)
1963 {
1964 int error;
1965 + int did_lock_epmutex = 0;
1966 struct file *file, *tfile;
1967 struct eventpoll *ep;
1968 struct epitem *epi;
1969 @@ -1284,6 +1351,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1970 */
1971 ep = file->private_data;
1972
1973 + /*
1974 + * When we insert an epoll file descriptor, inside another epoll file
1975 + * descriptor, there is the change of creating closed loops, which are
1976 + * better be handled here, than in more critical paths.
1977 + *
1978 + * We hold epmutex across the loop check and the insert in this case, in
1979 + * order to prevent two separate inserts from racing and each doing the
1980 + * insert "at the same time" such that ep_loop_check passes on both
1981 + * before either one does the insert, thereby creating a cycle.
1982 + */
1983 + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
1984 + mutex_lock(&epmutex);
1985 + did_lock_epmutex = 1;
1986 + error = -ELOOP;
1987 + if (ep_loop_check(ep, tfile) != 0)
1988 + goto error_tgt_fput;
1989 + }
1990 +
1991 +
1992 mutex_lock(&ep->mtx);
1993
1994 /*
1995 @@ -1319,6 +1405,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1996 mutex_unlock(&ep->mtx);
1997
1998 error_tgt_fput:
1999 + if (unlikely(did_lock_epmutex))
2000 + mutex_unlock(&epmutex);
2001 +
2002 fput(tfile);
2003 error_fput:
2004 fput(file);
2005 @@ -1437,6 +1526,12 @@ static int __init eventpoll_init(void)
2006 max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2007 EP_ITEM_COST;
2008
2009 + /*
2010 + * Initialize the structure used to perform epoll file descriptor
2011 + * inclusion loops checks.
2012 + */
2013 + ep_nested_calls_init(&poll_loop_ncalls);
2014 +
2015 /* Initialize the structure used to perform safe poll wait head wake ups */
2016 ep_nested_calls_init(&poll_safewake_ncalls);
2017
2018 diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
2019 index f8aecd2..9a922b2 100644
2020 --- a/fs/ext2/namei.c
2021 +++ b/fs/ext2/namei.c
2022 @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
2023 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
2024 if (!new_de)
2025 goto out_dir;
2026 - inode_inc_link_count(old_inode);
2027 ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
2028 new_inode->i_ctime = CURRENT_TIME_SEC;
2029 if (dir_de)
2030 @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
2031 if (new_dir->i_nlink >= EXT2_LINK_MAX)
2032 goto out_dir;
2033 }
2034 - inode_inc_link_count(old_inode);
2035 err = ext2_add_link(new_dentry, old_inode);
2036 - if (err) {
2037 - inode_dec_link_count(old_inode);
2038 + if (err)
2039 goto out_dir;
2040 - }
2041 if (dir_de)
2042 inode_inc_link_count(new_dir);
2043 }
2044 @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
2045 /*
2046 * Like most other Unix systems, set the ctime for inodes on a
2047 * rename.
2048 - * inode_dec_link_count() will mark the inode dirty.
2049 */
2050 old_inode->i_ctime = CURRENT_TIME_SEC;
2051 + mark_inode_dirty(old_inode);
2052
2053 ext2_delete_entry (old_de, old_page);
2054 - inode_dec_link_count(old_inode);
2055
2056 if (dir_de) {
2057 if (old_dir != new_dir)
2058 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
2059 index 8b984a2..14a183a 100644
2060 --- a/fs/fuse/file.c
2061 +++ b/fs/fuse/file.c
2062 @@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
2063 return ff;
2064 }
2065
2066 +static void fuse_release_async(struct work_struct *work)
2067 +{
2068 + struct fuse_req *req;
2069 + struct fuse_conn *fc;
2070 + struct path path;
2071 +
2072 + req = container_of(work, struct fuse_req, misc.release.work);
2073 + path = req->misc.release.path;
2074 + fc = get_fuse_conn(path.dentry->d_inode);
2075 +
2076 + fuse_put_request(fc, req);
2077 + path_put(&path);
2078 +}
2079 +
2080 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
2081 {
2082 - path_put(&req->misc.release.path);
2083 + if (fc->destroy_req) {
2084 + /*
2085 + * If this is a fuseblk mount, then it's possible that
2086 + * releasing the path will result in releasing the
2087 + * super block and sending the DESTROY request. If
2088 + * the server is single threaded, this would hang.
2089 + * For this reason do the path_put() in a separate
2090 + * thread.
2091 + */
2092 + atomic_inc(&req->count);
2093 + INIT_WORK(&req->misc.release.work, fuse_release_async);
2094 + schedule_work(&req->misc.release.work);
2095 + } else {
2096 + path_put(&req->misc.release.path);
2097 + }
2098 }
2099
2100 -static void fuse_file_put(struct fuse_file *ff)
2101 +static void fuse_file_put(struct fuse_file *ff, bool sync)
2102 {
2103 if (atomic_dec_and_test(&ff->count)) {
2104 struct fuse_req *req = ff->reserved_req;
2105
2106 - req->end = fuse_release_end;
2107 - fuse_request_send_background(ff->fc, req);
2108 + if (sync) {
2109 + fuse_request_send(ff->fc, req);
2110 + path_put(&req->misc.release.path);
2111 + fuse_put_request(ff->fc, req);
2112 + } else {
2113 + req->end = fuse_release_end;
2114 + fuse_request_send_background(ff->fc, req);
2115 + }
2116 kfree(ff);
2117 }
2118 }
2119 @@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
2120 * Normally this will send the RELEASE request, however if
2121 * some asynchronous READ or WRITE requests are outstanding,
2122 * the sending will be delayed.
2123 + *
2124 + * Make the release synchronous if this is a fuseblk mount,
2125 + * synchronous RELEASE is allowed (and desirable) in this case
2126 + * because the server can be trusted not to screw up.
2127 */
2128 - fuse_file_put(ff);
2129 + fuse_file_put(ff, ff->fc->destroy_req != NULL);
2130 }
2131
2132 static int fuse_open(struct inode *inode, struct file *file)
2133 @@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
2134 page_cache_release(page);
2135 }
2136 if (req->ff)
2137 - fuse_file_put(req->ff);
2138 + fuse_file_put(req->ff, false);
2139 }
2140
2141 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
2142 @@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
2143 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
2144 {
2145 __free_page(req->pages[0]);
2146 - fuse_file_put(req->ff);
2147 + fuse_file_put(req->ff, false);
2148 }
2149
2150 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
2151 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
2152 index 57d4a3a..ccb5225 100644
2153 --- a/fs/fuse/fuse_i.h
2154 +++ b/fs/fuse/fuse_i.h
2155 @@ -21,6 +21,7 @@
2156 #include <linux/rwsem.h>
2157 #include <linux/rbtree.h>
2158 #include <linux/poll.h>
2159 +#include <linux/workqueue.h>
2160
2161 /** Max number of pages that can be used in a single read request */
2162 #define FUSE_MAX_PAGES_PER_REQ 32
2163 @@ -257,7 +258,10 @@ struct fuse_req {
2164 union {
2165 struct fuse_forget_in forget_in;
2166 struct {
2167 - struct fuse_release_in in;
2168 + union {
2169 + struct fuse_release_in in;
2170 + struct work_struct work;
2171 + };
2172 struct path path;
2173 } release;
2174 struct fuse_init_in init_in;
2175 diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
2176 index ebef7ab..f910999 100644
2177 --- a/fs/gfs2/main.c
2178 +++ b/fs/gfs2/main.c
2179 @@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
2180 struct address_space *mapping = (struct address_space *)(gl + 1);
2181
2182 gfs2_init_glock_once(gl);
2183 - memset(mapping, 0, sizeof(*mapping));
2184 - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
2185 - spin_lock_init(&mapping->tree_lock);
2186 - spin_lock_init(&mapping->i_mmap_lock);
2187 - INIT_LIST_HEAD(&mapping->private_list);
2188 - spin_lock_init(&mapping->private_lock);
2189 - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
2190 - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
2191 + address_space_init_once(mapping);
2192 }
2193
2194 /**
2195 diff --git a/fs/inode.c b/fs/inode.c
2196 index ae2727a..e45734b 100644
2197 --- a/fs/inode.c
2198 +++ b/fs/inode.c
2199 @@ -280,6 +280,20 @@ static void destroy_inode(struct inode *inode)
2200 kmem_cache_free(inode_cachep, (inode));
2201 }
2202
2203 +void address_space_init_once(struct address_space *mapping)
2204 +{
2205 + memset(mapping, 0, sizeof(*mapping));
2206 + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
2207 + spin_lock_init(&mapping->tree_lock);
2208 + spin_lock_init(&mapping->i_mmap_lock);
2209 + INIT_LIST_HEAD(&mapping->private_list);
2210 + spin_lock_init(&mapping->private_lock);
2211 + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
2212 + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
2213 + mutex_init(&mapping->unmap_mutex);
2214 +}
2215 +EXPORT_SYMBOL(address_space_init_once);
2216 +
2217 /*
2218 * These are initializations that only need to be done
2219 * once, because the fields are idempotent across use
2220 @@ -293,13 +307,7 @@ void inode_init_once(struct inode *inode)
2221 INIT_LIST_HEAD(&inode->i_devices);
2222 INIT_LIST_HEAD(&inode->i_wb_list);
2223 INIT_LIST_HEAD(&inode->i_lru);
2224 - INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
2225 - spin_lock_init(&inode->i_data.tree_lock);
2226 - spin_lock_init(&inode->i_data.i_mmap_lock);
2227 - INIT_LIST_HEAD(&inode->i_data.private_list);
2228 - spin_lock_init(&inode->i_data.private_lock);
2229 - INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
2230 - INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
2231 + address_space_init_once(&inode->i_data);
2232 i_size_ordered_init(inode);
2233 #ifdef CONFIG_FSNOTIFY
2234 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
2235 @@ -524,11 +532,14 @@ void evict_inodes(struct super_block *sb)
2236 /**
2237 * invalidate_inodes - attempt to free all inodes on a superblock
2238 * @sb: superblock to operate on
2239 + * @kill_dirty: flag to guide handling of dirty inodes
2240 *
2241 * Attempts to free all inodes for a given superblock. If there were any
2242 * busy inodes return a non-zero value, else zero.
2243 + * If @kill_dirty is set, discard dirty inodes too, otherwise treat
2244 + * them as busy.
2245 */
2246 -int invalidate_inodes(struct super_block *sb)
2247 +int invalidate_inodes(struct super_block *sb, bool kill_dirty)
2248 {
2249 int busy = 0;
2250 struct inode *inode, *next;
2251 @@ -540,6 +551,10 @@ int invalidate_inodes(struct super_block *sb)
2252 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
2253 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
2254 continue;
2255 + if (inode->i_state & I_DIRTY && !kill_dirty) {
2256 + busy = 1;
2257 + continue;
2258 + }
2259 if (atomic_read(&inode->i_count)) {
2260 busy = 1;
2261 continue;
2262 diff --git a/fs/internal.h b/fs/internal.h
2263 index e43b9a4..5e3a1aa 100644
2264 --- a/fs/internal.h
2265 +++ b/fs/internal.h
2266 @@ -107,4 +107,4 @@ extern void release_open_intent(struct nameidata *);
2267 */
2268 extern int get_nr_dirty_inodes(void);
2269 extern void evict_inodes(struct super_block *);
2270 -extern int invalidate_inodes(struct super_block *);
2271 +extern int invalidate_inodes(struct super_block *, bool);
2272 diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
2273 index 5115814..3b008b8 100644
2274 --- a/fs/nilfs2/btnode.c
2275 +++ b/fs/nilfs2/btnode.c
2276 @@ -35,11 +35,6 @@
2277 #include "btnode.h"
2278
2279
2280 -void nilfs_btnode_cache_init_once(struct address_space *btnc)
2281 -{
2282 - nilfs_mapping_init_once(btnc);
2283 -}
2284 -
2285 static const struct address_space_operations def_btnode_aops = {
2286 .sync_page = block_sync_page,
2287 };
2288 diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
2289 index 7903749..1b8ebd8 100644
2290 --- a/fs/nilfs2/btnode.h
2291 +++ b/fs/nilfs2/btnode.h
2292 @@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
2293 struct buffer_head *newbh;
2294 };
2295
2296 -void nilfs_btnode_cache_init_once(struct address_space *);
2297 void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
2298 void nilfs_btnode_cache_clear(struct address_space *);
2299 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
2300 diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
2301 index 39a5b84..bdb8de6 100644
2302 --- a/fs/nilfs2/mdt.c
2303 +++ b/fs/nilfs2/mdt.c
2304 @@ -460,9 +460,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
2305 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
2306
2307 INIT_LIST_HEAD(&shadow->frozen_buffers);
2308 - nilfs_mapping_init_once(&shadow->frozen_data);
2309 + address_space_init_once(&shadow->frozen_data);
2310 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
2311 - nilfs_mapping_init_once(&shadow->frozen_btnodes);
2312 + address_space_init_once(&shadow->frozen_btnodes);
2313 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
2314 mi->mi_shadow = shadow;
2315 return 0;
2316 diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
2317 index fb9e8a8..b7e2726 100644
2318 --- a/fs/nilfs2/page.h
2319 +++ b/fs/nilfs2/page.h
2320 @@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
2321 int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
2322 void nilfs_copy_back_pages(struct address_space *, struct address_space *);
2323 void nilfs_clear_dirty_pages(struct address_space *);
2324 -void nilfs_mapping_init_once(struct address_space *mapping);
2325 void nilfs_mapping_init(struct address_space *mapping,
2326 struct backing_dev_info *bdi,
2327 const struct address_space_operations *aops);
2328 diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
2329 index 687d090..4cf8cec 100644
2330 --- a/fs/nilfs2/segment.c
2331 +++ b/fs/nilfs2/segment.c
2332 @@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
2333 nilfs_segctor_map_segsum_entry(
2334 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
2335
2336 - if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
2337 + if (NILFS_I(inode)->i_root &&
2338 + !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
2339 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
2340 /* skip finfo */
2341 }
2342 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
2343 index 2940a58..388e26b 100644
2344 --- a/fs/nilfs2/super.c
2345 +++ b/fs/nilfs2/super.c
2346 @@ -1263,7 +1263,7 @@ static void nilfs_inode_init_once(void *obj)
2347 #ifdef CONFIG_NILFS_XATTR
2348 init_rwsem(&ii->xattr_sem);
2349 #endif
2350 - nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
2351 + address_space_init_once(&ii->i_btnode_cache);
2352 ii->i_bmap = &ii->i_bmap_data;
2353 inode_init_once(&ii->vfs_inode);
2354 }
2355 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2356 index b5f9160..19ebc5a 100644
2357 --- a/fs/ocfs2/refcounttree.c
2358 +++ b/fs/ocfs2/refcounttree.c
2359 @@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
2360 u32 num_clusters, unsigned int e_flags)
2361 {
2362 int ret, delete, index, credits = 0;
2363 - u32 new_bit, new_len;
2364 + u32 new_bit, new_len, orig_num_clusters;
2365 unsigned int set_len;
2366 struct ocfs2_super *osb = OCFS2_SB(sb);
2367 handle_t *handle;
2368 @@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
2369 goto out;
2370 }
2371
2372 + orig_num_clusters = num_clusters;
2373 +
2374 while (num_clusters) {
2375 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
2376 p_cluster, num_clusters,
2377 @@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
2378 * in write-back mode.
2379 */
2380 if (context->get_clusters == ocfs2_di_get_clusters) {
2381 - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
2382 + ret = ocfs2_cow_sync_writeback(sb, context, cpos,
2383 + orig_num_clusters);
2384 if (ret)
2385 mlog_errno(ret);
2386 }
2387 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
2388 index cfeab7c..02e3cd0 100644
2389 --- a/fs/ocfs2/super.c
2390 +++ b/fs/ocfs2/super.c
2391 @@ -1310,7 +1310,7 @@ static int ocfs2_parse_options(struct super_block *sb,
2392 struct mount_options *mopt,
2393 int is_remount)
2394 {
2395 - int status;
2396 + int status, user_stack = 0;
2397 char *p;
2398 u32 tmp;
2399
2400 @@ -1453,6 +1453,15 @@ static int ocfs2_parse_options(struct super_block *sb,
2401 memcpy(mopt->cluster_stack, args[0].from,
2402 OCFS2_STACK_LABEL_LEN);
2403 mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
2404 + /*
2405 + * Open code the memcmp here as we don't have
2406 + * an osb to pass to
2407 + * ocfs2_userspace_stack().
2408 + */
2409 + if (memcmp(mopt->cluster_stack,
2410 + OCFS2_CLASSIC_CLUSTER_STACK,
2411 + OCFS2_STACK_LABEL_LEN))
2412 + user_stack = 1;
2413 break;
2414 case Opt_inode64:
2415 mopt->mount_opt |= OCFS2_MOUNT_INODE64;
2416 @@ -1508,13 +1517,16 @@ static int ocfs2_parse_options(struct super_block *sb,
2417 }
2418 }
2419
2420 - /* Ensure only one heartbeat mode */
2421 - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
2422 - OCFS2_MOUNT_HB_NONE);
2423 - if (hweight32(tmp) != 1) {
2424 - mlog(ML_ERROR, "Invalid heartbeat mount options\n");
2425 - status = 0;
2426 - goto bail;
2427 + if (user_stack == 0) {
2428 + /* Ensure only one heartbeat mode */
2429 + tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
2430 + OCFS2_MOUNT_HB_GLOBAL |
2431 + OCFS2_MOUNT_HB_NONE);
2432 + if (hweight32(tmp) != 1) {
2433 + mlog(ML_ERROR, "Invalid heartbeat mount options\n");
2434 + status = 0;
2435 + goto bail;
2436 + }
2437 }
2438
2439 status = 1;
2440 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
2441 index 789c625..b10e354 100644
2442 --- a/fs/partitions/ldm.c
2443 +++ b/fs/partitions/ldm.c
2444 @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
2445 }
2446
2447 vm->vblk_size = get_unaligned_be32(data + 0x08);
2448 + if (vm->vblk_size == 0) {
2449 + ldm_error ("Illegal VBLK size");
2450 + return false;
2451 + }
2452 +
2453 vm->vblk_offset = get_unaligned_be32(data + 0x0C);
2454 vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
2455
2456 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
2457 index 274eaaa..4e53d77 100644
2458 --- a/include/drm/drmP.h
2459 +++ b/include/drm/drmP.h
2460 @@ -1022,7 +1022,7 @@ struct drm_device {
2461 struct platform_device *platformdev; /**< Platform device struture */
2462
2463 struct drm_sg_mem *sg; /**< Scatter gather memory */
2464 - int num_crtcs; /**< Number of CRTCs on this device */
2465 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
2466 void *dev_private; /**< device private data */
2467 void *mm_private;
2468 struct address_space *dev_mapping;
2469 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2470 index 7572b19..4554ab6 100644
2471 --- a/include/linux/blkdev.h
2472 +++ b/include/linux/blkdev.h
2473 @@ -700,7 +700,7 @@ extern void blk_start_queue(struct request_queue *q);
2474 extern void blk_stop_queue(struct request_queue *q);
2475 extern void blk_sync_queue(struct request_queue *q);
2476 extern void __blk_stop_queue(struct request_queue *q);
2477 -extern void __blk_run_queue(struct request_queue *);
2478 +extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
2479 extern void blk_run_queue(struct request_queue *);
2480 extern int blk_rq_map_user(struct request_queue *, struct request *,
2481 struct rq_map_data *, void __user *, unsigned long,
2482 @@ -1089,7 +1089,6 @@ static inline void put_dev_sector(Sector p)
2483
2484 struct work_struct;
2485 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
2486 -int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
2487
2488 #ifdef CONFIG_BLK_CGROUP
2489 /*
2490 @@ -1137,7 +1136,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
2491 extern int blk_throtl_init(struct request_queue *q);
2492 extern void blk_throtl_exit(struct request_queue *q);
2493 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
2494 -extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
2495 extern void throtl_shutdown_timer_wq(struct request_queue *q);
2496 #else /* CONFIG_BLK_DEV_THROTTLING */
2497 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
2498 @@ -1147,7 +1145,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
2499
2500 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
2501 static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
2502 -static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
2503 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
2504 #endif /* CONFIG_BLK_DEV_THROTTLING */
2505
2506 diff --git a/include/linux/fs.h b/include/linux/fs.h
2507 index 090f0ea..0570a28 100644
2508 --- a/include/linux/fs.h
2509 +++ b/include/linux/fs.h
2510 @@ -646,6 +646,7 @@ struct address_space {
2511 spinlock_t private_lock; /* for use by the address_space */
2512 struct list_head private_list; /* ditto */
2513 struct address_space *assoc_mapping; /* ditto */
2514 + struct mutex unmap_mutex; /* to protect unmapping */
2515 } __attribute__((aligned(sizeof(long))));
2516 /*
2517 * On most architectures that alignment is already the case; but
2518 @@ -2117,7 +2118,7 @@ extern void check_disk_size_change(struct gendisk *disk,
2519 struct block_device *bdev);
2520 extern int revalidate_disk(struct gendisk *);
2521 extern int check_disk_change(struct block_device *);
2522 -extern int __invalidate_device(struct block_device *);
2523 +extern int __invalidate_device(struct block_device *, bool);
2524 extern int invalidate_partition(struct gendisk *, int);
2525 #endif
2526 unsigned long invalidate_mapping_pages(struct address_space *mapping,
2527 @@ -2203,6 +2204,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
2528
2529 extern int inode_init_always(struct super_block *, struct inode *);
2530 extern void inode_init_once(struct inode *);
2531 +extern void address_space_init_once(struct address_space *mapping);
2532 extern void ihold(struct inode * inode);
2533 extern void iput(struct inode *);
2534 extern struct inode * igrab(struct inode *);
2535 diff --git a/include/linux/pm.h b/include/linux/pm.h
2536 index 40f3f45..f09b769 100644
2537 --- a/include/linux/pm.h
2538 +++ b/include/linux/pm.h
2539 @@ -470,6 +470,8 @@ struct dev_pm_info {
2540 struct list_head entry;
2541 struct completion completion;
2542 struct wakeup_source *wakeup;
2543 +#else
2544 + unsigned int should_wakeup:1;
2545 #endif
2546 #ifdef CONFIG_PM_RUNTIME
2547 struct timer_list suspend_timer;
2548 diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
2549 index 9cff00d..03a67db 100644
2550 --- a/include/linux/pm_wakeup.h
2551 +++ b/include/linux/pm_wakeup.h
2552 @@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
2553 return dev->power.can_wakeup;
2554 }
2555
2556 -static inline bool device_may_wakeup(struct device *dev)
2557 -{
2558 - return false;
2559 -}
2560 -
2561 static inline struct wakeup_source *wakeup_source_create(const char *name)
2562 {
2563 return NULL;
2564 @@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
2565
2566 static inline int device_wakeup_enable(struct device *dev)
2567 {
2568 - return -EINVAL;
2569 + dev->power.should_wakeup = true;
2570 + return 0;
2571 }
2572
2573 static inline int device_wakeup_disable(struct device *dev)
2574 {
2575 + dev->power.should_wakeup = false;
2576 return 0;
2577 }
2578
2579 -static inline int device_init_wakeup(struct device *dev, bool val)
2580 +static inline int device_set_wakeup_enable(struct device *dev, bool enable)
2581 {
2582 - dev->power.can_wakeup = val;
2583 - return val ? -EINVAL : 0;
2584 + dev->power.should_wakeup = enable;
2585 + return 0;
2586 }
2587
2588 +static inline int device_init_wakeup(struct device *dev, bool val)
2589 +{
2590 + device_set_wakeup_capable(dev, val);
2591 + device_set_wakeup_enable(dev, val);
2592 + return 0;
2593 +}
2594
2595 -static inline int device_set_wakeup_enable(struct device *dev, bool enable)
2596 +static inline bool device_may_wakeup(struct device *dev)
2597 {
2598 - return -EINVAL;
2599 + return dev->power.can_wakeup && dev->power.should_wakeup;
2600 }
2601
2602 static inline void __pm_stay_awake(struct wakeup_source *ws) {}
2603 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
2604 index e4cc21c..833e676 100644
2605 --- a/include/linux/vmstat.h
2606 +++ b/include/linux/vmstat.h
2607 @@ -254,8 +254,11 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
2608 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2609
2610 void refresh_cpu_vm_stats(int);
2611 -void reduce_pgdat_percpu_threshold(pg_data_t *pgdat);
2612 -void restore_pgdat_percpu_threshold(pg_data_t *pgdat);
2613 +
2614 +int calculate_pressure_threshold(struct zone *zone);
2615 +int calculate_normal_threshold(struct zone *zone);
2616 +void set_pgdat_percpu_threshold(pg_data_t *pgdat,
2617 + int (*calculate_pressure)(struct zone *));
2618 #else /* CONFIG_SMP */
2619
2620 /*
2621 @@ -300,8 +303,7 @@ static inline void __dec_zone_page_state(struct page *page,
2622 #define dec_zone_page_state __dec_zone_page_state
2623 #define mod_zone_page_state __mod_zone_page_state
2624
2625 -static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { }
2626 -static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { }
2627 +#define set_pgdat_percpu_threshold(pgdat, callback) { }
2628
2629 static inline void refresh_cpu_vm_stats(int cpu) { }
2630 #endif
2631 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
2632 index 48b2761..a3b5aff 100644
2633 --- a/kernel/time/tick-broadcast.c
2634 +++ b/kernel/time/tick-broadcast.c
2635 @@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
2636 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
2637 }
2638
2639 +/*
2640 + * Check whether the broadcast device supports oneshot.
2641 + */
2642 +bool tick_broadcast_oneshot_available(void)
2643 +{
2644 + struct clock_event_device *bc = tick_broadcast_device.evtdev;
2645 +
2646 + return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
2647 +}
2648 +
2649 #endif
2650 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
2651 index b6b898d..61e296b 100644
2652 --- a/kernel/time/tick-common.c
2653 +++ b/kernel/time/tick-common.c
2654 @@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
2655 {
2656 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
2657
2658 - return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
2659 + if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
2660 + return 0;
2661 + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
2662 + return 1;
2663 + return tick_broadcast_oneshot_available();
2664 }
2665
2666 /*
2667 diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
2668 index 290eefb..f65d3a7 100644
2669 --- a/kernel/time/tick-internal.h
2670 +++ b/kernel/time/tick-internal.h
2671 @@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
2672 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
2673 extern int tick_broadcast_oneshot_active(void);
2674 extern void tick_check_oneshot_broadcast(int cpu);
2675 +bool tick_broadcast_oneshot_available(void);
2676 # else /* BROADCAST */
2677 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
2678 {
2679 @@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
2680 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
2681 static inline int tick_broadcast_oneshot_active(void) { return 0; }
2682 static inline void tick_check_oneshot_broadcast(int cpu) { }
2683 +static inline bool tick_broadcast_oneshot_available(void) { return true; }
2684 # endif /* !BROADCAST */
2685
2686 #else /* !ONESHOT */
2687 @@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
2688 return 0;
2689 }
2690 static inline int tick_broadcast_oneshot_active(void) { return 0; }
2691 +static inline bool tick_broadcast_oneshot_available(void) { return false; }
2692 #endif /* !TICK_ONESHOT */
2693
2694 /*
2695 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
2696 index 7c06ee5..554a4bb 100644
2697 --- a/lib/swiotlb.c
2698 +++ b/lib/swiotlb.c
2699 @@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
2700 /*
2701 * Ensure that the address returned is DMA'ble
2702 */
2703 - if (!dma_capable(dev, dev_addr, size))
2704 - panic("map_single: bounce buffer is not DMA'ble");
2705 + if (!dma_capable(dev, dev_addr, size)) {
2706 + swiotlb_tbl_unmap_single(dev, map, size, dir);
2707 + dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
2708 + }
2709
2710 return dev_addr;
2711 }
2712 diff --git a/mm/memory.c b/mm/memory.c
2713 index 02e48aa..e8b2f03 100644
2714 --- a/mm/memory.c
2715 +++ b/mm/memory.c
2716 @@ -2572,6 +2572,7 @@ void unmap_mapping_range(struct address_space *mapping,
2717 details.last_index = ULONG_MAX;
2718 details.i_mmap_lock = &mapping->i_mmap_lock;
2719
2720 + mutex_lock(&mapping->unmap_mutex);
2721 spin_lock(&mapping->i_mmap_lock);
2722
2723 /* Protect against endless unmapping loops */
2724 @@ -2588,6 +2589,7 @@ void unmap_mapping_range(struct address_space *mapping,
2725 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2726 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2727 spin_unlock(&mapping->i_mmap_lock);
2728 + mutex_unlock(&mapping->unmap_mutex);
2729 }
2730 EXPORT_SYMBOL(unmap_mapping_range);
2731
2732 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2733 index 91b6d8c..92e89a0 100644
2734 --- a/mm/page_alloc.c
2735 +++ b/mm/page_alloc.c
2736 @@ -5335,10 +5335,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
2737 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
2738 unsigned long check = pfn + iter;
2739
2740 - if (!pfn_valid_within(check)) {
2741 - iter++;
2742 + if (!pfn_valid_within(check))
2743 continue;
2744 - }
2745 +
2746 page = pfn_to_page(check);
2747 if (!page_count(page)) {
2748 if (PageBuddy(page))
2749 diff --git a/mm/vmscan.c b/mm/vmscan.c
2750 index 5da4295..86f8c34 100644
2751 --- a/mm/vmscan.c
2752 +++ b/mm/vmscan.c
2753 @@ -2448,9 +2448,24 @@ static int kswapd(void *p)
2754 */
2755 if (!sleeping_prematurely(pgdat, order, remaining)) {
2756 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2757 - restore_pgdat_percpu_threshold(pgdat);
2758 +
2759 + /*
2760 + * vmstat counters are not perfectly
2761 + * accurate and the estimated value
2762 + * for counters such as NR_FREE_PAGES
2763 + * can deviate from the true value by
2764 + * nr_online_cpus * threshold. To
2765 + * avoid the zone watermarks being
2766 + * breached while under pressure, we
2767 + * reduce the per-cpu vmstat threshold
2768 + * while kswapd is awake and restore
2769 + * them before going back to sleep.
2770 + */
2771 + set_pgdat_percpu_threshold(pgdat,
2772 + calculate_normal_threshold);
2773 schedule();
2774 - reduce_pgdat_percpu_threshold(pgdat);
2775 + set_pgdat_percpu_threshold(pgdat,
2776 + calculate_pressure_threshold);
2777 } else {
2778 if (remaining)
2779 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2780 diff --git a/mm/vmstat.c b/mm/vmstat.c
2781 index 8aff417..3555636 100644
2782 --- a/mm/vmstat.c
2783 +++ b/mm/vmstat.c
2784 @@ -83,7 +83,7 @@ EXPORT_SYMBOL(vm_stat);
2785
2786 #ifdef CONFIG_SMP
2787
2788 -static int calculate_pressure_threshold(struct zone *zone)
2789 +int calculate_pressure_threshold(struct zone *zone)
2790 {
2791 int threshold;
2792 int watermark_distance;
2793 @@ -107,7 +107,7 @@ static int calculate_pressure_threshold(struct zone *zone)
2794 return threshold;
2795 }
2796
2797 -static int calculate_threshold(struct zone *zone)
2798 +int calculate_normal_threshold(struct zone *zone)
2799 {
2800 int threshold;
2801 int mem; /* memory in 128 MB units */
2802 @@ -166,7 +166,7 @@ static void refresh_zone_stat_thresholds(void)
2803 for_each_populated_zone(zone) {
2804 unsigned long max_drift, tolerate_drift;
2805
2806 - threshold = calculate_threshold(zone);
2807 + threshold = calculate_normal_threshold(zone);
2808
2809 for_each_online_cpu(cpu)
2810 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
2811 @@ -185,46 +185,24 @@ static void refresh_zone_stat_thresholds(void)
2812 }
2813 }
2814
2815 -void reduce_pgdat_percpu_threshold(pg_data_t *pgdat)
2816 +void set_pgdat_percpu_threshold(pg_data_t *pgdat,
2817 + int (*calculate_pressure)(struct zone *))
2818 {
2819 struct zone *zone;
2820 int cpu;
2821 int threshold;
2822 int i;
2823
2824 - get_online_cpus();
2825 - for (i = 0; i < pgdat->nr_zones; i++) {
2826 - zone = &pgdat->node_zones[i];
2827 - if (!zone->percpu_drift_mark)
2828 - continue;
2829 -
2830 - threshold = calculate_pressure_threshold(zone);
2831 - for_each_online_cpu(cpu)
2832 - per_cpu_ptr(zone->pageset, cpu)->stat_threshold
2833 - = threshold;
2834 - }
2835 - put_online_cpus();
2836 -}
2837 -
2838 -void restore_pgdat_percpu_threshold(pg_data_t *pgdat)
2839 -{
2840 - struct zone *zone;
2841 - int cpu;
2842 - int threshold;
2843 - int i;
2844 -
2845 - get_online_cpus();
2846 for (i = 0; i < pgdat->nr_zones; i++) {
2847 zone = &pgdat->node_zones[i];
2848 if (!zone->percpu_drift_mark)
2849 continue;
2850
2851 - threshold = calculate_threshold(zone);
2852 - for_each_online_cpu(cpu)
2853 + threshold = (*calculate_pressure)(zone);
2854 + for_each_possible_cpu(cpu)
2855 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
2856 = threshold;
2857 }
2858 - put_online_cpus();
2859 }
2860
2861 /*
2862 diff --git a/net/dccp/input.c b/net/dccp/input.c
2863 index e424a09..421f42c 100644
2864 --- a/net/dccp/input.c
2865 +++ b/net/dccp/input.c
2866 @@ -621,6 +621,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
2867 /* Caller (dccp_v4_do_rcv) will send Reset */
2868 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
2869 return 1;
2870 + } else if (sk->sk_state == DCCP_CLOSED) {
2871 + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
2872 + return 1;
2873 }
2874
2875 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
2876 @@ -683,10 +686,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
2877 }
2878
2879 switch (sk->sk_state) {
2880 - case DCCP_CLOSED:
2881 - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
2882 - return 1;
2883 -
2884 case DCCP_REQUESTING:
2885 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
2886 if (queued >= 0)
2887 diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
2888 index 739435a..cfa7a5e 100644
2889 --- a/net/dns_resolver/dns_key.c
2890 +++ b/net/dns_resolver/dns_key.c
2891 @@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
2892 size_t result_len = 0;
2893 const char *data = _data, *end, *opt;
2894
2895 - kenter("%%%d,%s,'%s',%zu",
2896 - key->serial, key->description, data, datalen);
2897 + kenter("%%%d,%s,'%*.*s',%zu",
2898 + key->serial, key->description,
2899 + (int)datalen, (int)datalen, data, datalen);
2900
2901 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
2902 return -EINVAL;
2903 @@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
2904 seq_printf(m, ": %u", key->datalen);
2905 }
2906
2907 +/*
2908 + * read the DNS data
2909 + * - the key's semaphore is read-locked
2910 + */
2911 +static long dns_resolver_read(const struct key *key,
2912 + char __user *buffer, size_t buflen)
2913 +{
2914 + if (key->type_data.x[0])
2915 + return key->type_data.x[0];
2916 +
2917 + return user_read(key, buffer, buflen);
2918 +}
2919 +
2920 struct key_type key_type_dns_resolver = {
2921 .name = "dns_resolver",
2922 .instantiate = dns_resolver_instantiate,
2923 @@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
2924 .revoke = user_revoke,
2925 .destroy = user_destroy,
2926 .describe = dns_resolver_describe,
2927 - .read = user_read,
2928 + .read = dns_resolver_read,
2929 };
2930
2931 static int __init init_dns_resolver(void)
2932 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
2933 index dc94b03..ab8523b 100644
2934 --- a/net/ipv4/devinet.c
2935 +++ b/net/ipv4/devinet.c
2936 @@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
2937 return mtu >= 68;
2938 }
2939
2940 +static void inetdev_send_gratuitous_arp(struct net_device *dev,
2941 + struct in_device *in_dev)
2942 +
2943 +{
2944 + struct in_ifaddr *ifa = in_dev->ifa_list;
2945 +
2946 + if (!ifa)
2947 + return;
2948 +
2949 + arp_send(ARPOP_REQUEST, ETH_P_ARP,
2950 + ifa->ifa_address, dev,
2951 + ifa->ifa_address, NULL,
2952 + dev->dev_addr, NULL);
2953 +}
2954 +
2955 /* Called only under RTNL semaphore */
2956
2957 static int inetdev_event(struct notifier_block *this, unsigned long event,
2958 @@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
2959 }
2960 ip_mc_up(in_dev);
2961 /* fall through */
2962 - case NETDEV_NOTIFY_PEERS:
2963 case NETDEV_CHANGEADDR:
2964 + if (!IN_DEV_ARP_NOTIFY(in_dev))
2965 + break;
2966 + /* fall through */
2967 + case NETDEV_NOTIFY_PEERS:
2968 /* Send gratuitous ARP to notify of link change */
2969 - if (IN_DEV_ARP_NOTIFY(in_dev)) {
2970 - struct in_ifaddr *ifa = in_dev->ifa_list;
2971 -
2972 - if (ifa)
2973 - arp_send(ARPOP_REQUEST, ETH_P_ARP,
2974 - ifa->ifa_address, dev,
2975 - ifa->ifa_address, NULL,
2976 - dev->dev_addr, NULL);
2977 - }
2978 + inetdev_send_gratuitous_arp(dev, in_dev);
2979 break;
2980 case NETDEV_DOWN:
2981 ip_mc_down(in_dev);
2982 diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
2983 index c5af909..3c8dfa1 100644
2984 --- a/net/ipv4/inet_timewait_sock.c
2985 +++ b/net/ipv4/inet_timewait_sock.c
2986 @@ -505,7 +505,9 @@ restart:
2987 }
2988
2989 rcu_read_unlock();
2990 + local_bh_disable();
2991 inet_twsk_deschedule(tw, twdr);
2992 + local_bh_enable();
2993 inet_twsk_put(tw);
2994 goto restart_rcu;
2995 }
2996 diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
2997 index 12222ee..8d26d55 100644
2998 --- a/net/wireless/wext-compat.c
2999 +++ b/net/wireless/wext-compat.c
3000 @@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
3001 return freq;
3002 if (freq == 0)
3003 return -EINVAL;
3004 - wdev_lock(wdev);
3005 mutex_lock(&rdev->devlist_mtx);
3006 + wdev_lock(wdev);
3007 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
3008 - mutex_unlock(&rdev->devlist_mtx);
3009 wdev_unlock(wdev);
3010 + mutex_unlock(&rdev->devlist_mtx);
3011 return err;
3012 default:
3013 return -EOPNOTSUPP;
3014 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3015 index fc5e027..4d964da 100644
3016 --- a/sound/pci/hda/patch_conexant.c
3017 +++ b/sound/pci/hda/patch_conexant.c
3018 @@ -3106,6 +3106,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3019 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
3020 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
3021 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3022 + SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
3023 + SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
3024 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3025 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
3026 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3027 @@ -3890,6 +3892,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
3028 .patch = patch_cxt5066 },
3029 { .id = 0x14f15069, .name = "CX20585",
3030 .patch = patch_cxt5066 },
3031 + { .id = 0x14f1506e, .name = "CX20590",
3032 + .patch = patch_cxt5066 },
3033 { .id = 0x14f15097, .name = "CX20631",
3034 .patch = patch_conexant_auto },
3035 { .id = 0x14f15098, .name = "CX20632",
3036 @@ -3916,6 +3920,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
3037 MODULE_ALIAS("snd-hda-codec-id:14f15067");
3038 MODULE_ALIAS("snd-hda-codec-id:14f15068");
3039 MODULE_ALIAS("snd-hda-codec-id:14f15069");
3040 +MODULE_ALIAS("snd-hda-codec-id:14f1506e");
3041 MODULE_ALIAS("snd-hda-codec-id:14f15097");
3042 MODULE_ALIAS("snd-hda-codec-id:14f15098");
3043 MODULE_ALIAS("snd-hda-codec-id:14f150a1");
3044 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
3045 index d1c3f8d..e1b775d 100644
3046 --- a/sound/pci/hda/patch_via.c
3047 +++ b/sound/pci/hda/patch_via.c
3048 @@ -568,7 +568,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
3049 hda_nid_t nid = cfg->inputs[i].pin;
3050 if (spec->smart51_enabled && is_smart51_pins(spec, nid))
3051 ctl = PIN_OUT;
3052 - else if (i == AUTO_PIN_MIC)
3053 + else if (cfg->inputs[i].type == AUTO_PIN_MIC)
3054 ctl = PIN_VREF50;
3055 else
3056 ctl = PIN_IN;
3057 diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
3058 index dd4fffd..85934ee 100644
3059 --- a/sound/soc/imx/eukrea-tlv320.c
3060 +++ b/sound/soc/imx/eukrea-tlv320.c
3061 @@ -80,7 +80,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
3062 .name = "tlv320aic23",
3063 .stream_name = "TLV320AIC23",
3064 .codec_dai_name = "tlv320aic23-hifi",
3065 - .platform_name = "imx-pcm-audio.0",
3066 + .platform_name = "imx-fiq-pcm-audio.0",
3067 .codec_name = "tlv320aic23-codec.0-001a",
3068 .cpu_dai_name = "imx-ssi.0",
3069 .ops = &eukrea_tlv320_snd_ops,
3070 diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
3071 index c82cedb..3b721a2 100644
3072 --- a/sound/soc/pxa/e740_wm9705.c
3073 +++ b/sound/soc/pxa/e740_wm9705.c
3074 @@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
3075 {
3076 .name = "AC97",
3077 .stream_name = "AC97 HiFi",
3078 - .cpu_dai_name = "pxa-ac97.0",
3079 + .cpu_dai_name = "pxa2xx-ac97",
3080 .codec_dai_name = "wm9705-hifi",
3081 .platform_name = "pxa-pcm-audio",
3082 .codec_name = "wm9705-codec",
3083 @@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
3084 {
3085 .name = "AC97 Aux",
3086 .stream_name = "AC97 Aux",
3087 - .cpu_dai_name = "pxa-ac97.1",
3088 + .cpu_dai_name = "pxa2xx-ac97-aux",
3089 .codec_dai_name = "wm9705-aux",
3090 .platform_name = "pxa-pcm-audio",
3091 .codec_name = "wm9705-codec",
3092 diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
3093 index 4c14380..226fae4 100644
3094 --- a/sound/soc/pxa/e750_wm9705.c
3095 +++ b/sound/soc/pxa/e750_wm9705.c
3096 @@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
3097 {
3098 .name = "AC97",
3099 .stream_name = "AC97 HiFi",
3100 - .cpu_dai_name = "pxa-ac97.0",
3101 + .cpu_dai_name = "pxa2xx-ac97",
3102 .codec_dai_name = "wm9705-hifi",
3103 .platform_name = "pxa-pcm-audio",
3104 .codec_name = "wm9705-codec",
3105 @@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
3106 {
3107 .name = "AC97 Aux",
3108 .stream_name = "AC97 Aux",
3109 - .cpu_dai_name = "pxa-ac97.1",
3110 + .cpu_dai_name = "pxa2xx-ac97-aux",
3111 .codec_dai_name ="wm9705-aux",
3112 .platform_name = "pxa-pcm-audio",
3113 .codec_name = "wm9705-codec",
3114 diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
3115 index d42e5fe..00252f6 100644
3116 --- a/sound/soc/pxa/e800_wm9712.c
3117 +++ b/sound/soc/pxa/e800_wm9712.c
3118 @@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
3119 {
3120 .name = "AC97",
3121 .stream_name = "AC97 HiFi",
3122 - .cpu_dai_name = "pxa-ac97.0",
3123 + .cpu_dai_name = "pxa2xx-ac97",
3124 .codec_dai_name = "wm9712-hifi",
3125 .platform_name = "pxa-pcm-audio",
3126 .codec_name = "wm9712-codec",
3127 @@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
3128 {
3129 .name = "AC97 Aux",
3130 .stream_name = "AC97 Aux",
3131 - .cpu_dai_name = "pxa-ac97.1",
3132 + .cpu_dai_name = "pxa2xx-ac97-aux",
3133 .codec_dai_name ="wm9712-aux",
3134 .platform_name = "pxa-pcm-audio",
3135 .codec_name = "wm9712-codec",
3136 diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
3137 index eadf9d3..e0a4850 100644
3138 --- a/sound/soc/pxa/em-x270.c
3139 +++ b/sound/soc/pxa/em-x270.c
3140 @@ -38,7 +38,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
3141 {
3142 .name = "AC97",
3143 .stream_name = "AC97 HiFi",
3144 - .cpu_dai_name = "pxa-ac97.0",
3145 + .cpu_dai_name = "pxa2xx-ac97",
3146 .codec_dai_name = "wm9712-hifi",
3147 .platform_name = "pxa-pcm-audio",
3148 .codec_name = "wm9712-codec",
3149 @@ -46,7 +46,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
3150 {
3151 .name = "AC97 Aux",
3152 .stream_name = "AC97 Aux",
3153 - .cpu_dai_name = "pxa-ac97.1",
3154 + .cpu_dai_name = "pxa2xx-ac97-aux",
3155 .codec_dai_name ="wm9712-aux",
3156 .platform_name = "pxa-pcm-audio",
3157 .codec_name = "wm9712-codec",
3158 diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
3159 index f284cc5..2026b68 100644
3160 --- a/sound/soc/pxa/mioa701_wm9713.c
3161 +++ b/sound/soc/pxa/mioa701_wm9713.c
3162 @@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
3163 {
3164 .name = "AC97",
3165 .stream_name = "AC97 HiFi",
3166 - .cpu_dai_name = "pxa-ac97.0",
3167 + .cpu_dai_name = "pxa2xx-ac97",
3168 .codec_dai_name = "wm9713-hifi",
3169 .codec_name = "wm9713-codec",
3170 .init = mioa701_wm9713_init,
3171 @@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
3172 {
3173 .name = "AC97 Aux",
3174 .stream_name = "AC97 Aux",
3175 - .cpu_dai_name = "pxa-ac97.1",
3176 + .cpu_dai_name = "pxa2xx-ac97-aux",
3177 .codec_dai_name ="wm9713-aux",
3178 .codec_name = "wm9713-codec",
3179 .platform_name = "pxa-pcm-audio",
3180 diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
3181 index 13f6d48..500d878 100644
3182 --- a/sound/soc/pxa/palm27x.c
3183 +++ b/sound/soc/pxa/palm27x.c
3184 @@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
3185 {
3186 .name = "AC97 HiFi",
3187 .stream_name = "AC97 HiFi",
3188 - .cpu_dai_name = "pxa-ac97.0",
3189 + .cpu_dai_name = "pxa2xx-ac97",
3190 .codec_dai_name = "wm9712-hifi",
3191 .codec_name = "wm9712-codec",
3192 .platform_name = "pxa-pcm-audio",
3193 @@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
3194 {
3195 .name = "AC97 Aux",
3196 .stream_name = "AC97 Aux",
3197 - .cpu_dai_name = "pxa-ac97.1",
3198 + .cpu_dai_name = "pxa2xx-ac97-aux",
3199 .codec_dai_name = "wm9712-aux",
3200 .codec_name = "wm9712-codec",
3201 .platform_name = "pxa-pcm-audio",
3202 diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
3203 index 7b983f9..9c89d40 100644
3204 --- a/sound/soc/pxa/tosa.c
3205 +++ b/sound/soc/pxa/tosa.c
3206 @@ -217,7 +217,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
3207 {
3208 .name = "AC97",
3209 .stream_name = "AC97 HiFi",
3210 - .cpu_dai_name = "pxa-ac97.0",
3211 + .cpu_dai_name = "pxa2xx-ac97",
3212 .codec_dai_name = "wm9712-hifi",
3213 .platform_name = "pxa-pcm-audio",
3214 .codec_name = "wm9712-codec",
3215 @@ -227,7 +227,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
3216 {
3217 .name = "AC97 Aux",
3218 .stream_name = "AC97 Aux",
3219 - .cpu_dai_name = "pxa-ac97.1",
3220 + .cpu_dai_name = "pxa2xx-ac97-aux",
3221 .codec_dai_name = "wm9712-aux",
3222 .platform_name = "pxa-pcm-audio",
3223 .codec_name = "wm9712-codec",
3224 diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
3225 index d27e05a..324a86d 100644
3226 --- a/sound/soc/pxa/zylonite.c
3227 +++ b/sound/soc/pxa/zylonite.c
3228 @@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
3229 .stream_name = "AC97 HiFi",
3230 .codec_name = "wm9713-codec",
3231 .platform_name = "pxa-pcm-audio",
3232 - .cpu_dai_name = "pxa-ac97.0",
3233 + .cpu_dai_name = "pxa2xx-ac97",
3234 .codec_name = "wm9713-hifi",
3235 .init = zylonite_wm9713_init,
3236 },
3237 @@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
3238 .stream_name = "AC97 Aux",
3239 .codec_name = "wm9713-codec",
3240 .platform_name = "pxa-pcm-audio",
3241 - .cpu_dai_name = "pxa-ac97.1",
3242 + .cpu_dai_name = "pxa2xx-ac97-aux",
3243 .codec_name = "wm9713-aux",
3244 },
3245 {
3246 diff --git a/sound/usb/card.c b/sound/usb/card.c
3247 index 800f7cb..c0f8270 100644
3248 --- a/sound/usb/card.c
3249 +++ b/sound/usb/card.c
3250 @@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
3251 return -ENOMEM;
3252 }
3253
3254 + mutex_init(&chip->shutdown_mutex);
3255 chip->index = idx;
3256 chip->dev = dev;
3257 chip->card = card;
3258 @@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
3259 chip = ptr;
3260 card = chip->card;
3261 mutex_lock(&register_mutex);
3262 + mutex_lock(&chip->shutdown_mutex);
3263 chip->shutdown = 1;
3264 chip->num_interfaces--;
3265 if (chip->num_interfaces <= 0) {
3266 @@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
3267 snd_usb_mixer_disconnect(p);
3268 }
3269 usb_chip[chip->index] = NULL;
3270 + mutex_unlock(&chip->shutdown_mutex);
3271 mutex_unlock(&register_mutex);
3272 snd_card_free_when_closed(card);
3273 } else {
3274 + mutex_unlock(&chip->shutdown_mutex);
3275 mutex_unlock(&register_mutex);
3276 }
3277 }
3278 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
3279 index 4132522..e3f6805 100644
3280 --- a/sound/usb/pcm.c
3281 +++ b/sound/usb/pcm.c
3282 @@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
3283 }
3284
3285 if (changed) {
3286 + mutex_lock(&subs->stream->chip->shutdown_mutex);
3287 /* format changed */
3288 snd_usb_release_substream_urbs(subs, 0);
3289 /* influenced: period_bytes, channels, rate, format, */
3290 @@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
3291 params_rate(hw_params),
3292 snd_pcm_format_physical_width(params_format(hw_params)) *
3293 params_channels(hw_params));
3294 + mutex_unlock(&subs->stream->chip->shutdown_mutex);
3295 }
3296
3297 return ret;
3298 @@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
3299 subs->cur_audiofmt = NULL;
3300 subs->cur_rate = 0;
3301 subs->period_bytes = 0;
3302 - if (!subs->stream->chip->shutdown)
3303 - snd_usb_release_substream_urbs(subs, 0);
3304 + mutex_lock(&subs->stream->chip->shutdown_mutex);
3305 + snd_usb_release_substream_urbs(subs, 0);
3306 + mutex_unlock(&subs->stream->chip->shutdown_mutex);
3307 return snd_pcm_lib_free_vmalloc_buffer(substream);
3308 }
3309
3310 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
3311 index db3eb21..6e66fff 100644
3312 --- a/sound/usb/usbaudio.h
3313 +++ b/sound/usb/usbaudio.h
3314 @@ -36,6 +36,7 @@ struct snd_usb_audio {
3315 struct snd_card *card;
3316 u32 usb_id;
3317 int shutdown;
3318 + struct mutex shutdown_mutex;
3319 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
3320 int num_interfaces;
3321 int num_suspended_intf;