Magellan Linux

Contents of /trunk/kernel26-alx/patches-3.10/0145-3.10.46-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (show annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 84402 byte(s)
-3.10.84-alx-r1
1 diff --git a/Makefile b/Makefile
2 index 8a63ca1db77a..c226f110181d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 45
9 +SUBLEVEL = 46
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
14 index af4e8c8a5422..6582c4adc182 100644
15 --- a/arch/arm/kernel/stacktrace.c
16 +++ b/arch/arm/kernel/stacktrace.c
17 @@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
18 return trace->nr_entries >= trace->max_entries;
19 }
20
21 -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
22 +/* This must be noinline to so that our skip calculation works correctly */
23 +static noinline void __save_stack_trace(struct task_struct *tsk,
24 + struct stack_trace *trace, unsigned int nosched)
25 {
26 struct stack_trace_data data;
27 struct stackframe frame;
28
29 data.trace = trace;
30 data.skip = trace->skip;
31 + data.no_sched_functions = nosched;
32
33 if (tsk != current) {
34 #ifdef CONFIG_SMP
35 @@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
36 trace->entries[trace->nr_entries++] = ULONG_MAX;
37 return;
38 #else
39 - data.no_sched_functions = 1;
40 frame.fp = thread_saved_fp(tsk);
41 frame.sp = thread_saved_sp(tsk);
42 frame.lr = 0; /* recovered from the stack */
43 @@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
44 } else {
45 register unsigned long current_sp asm ("sp");
46
47 - data.no_sched_functions = 0;
48 + /* We don't want this function nor the caller */
49 + data.skip += 2;
50 frame.fp = (unsigned long)__builtin_frame_address(0);
51 frame.sp = current_sp;
52 frame.lr = (unsigned long)__builtin_return_address(0);
53 - frame.pc = (unsigned long)save_stack_trace_tsk;
54 + frame.pc = (unsigned long)__save_stack_trace;
55 }
56
57 walk_stackframe(&frame, save_trace, &data);
58 @@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
59 trace->entries[trace->nr_entries++] = ULONG_MAX;
60 }
61
62 +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
63 +{
64 + __save_stack_trace(tsk, trace, 1);
65 +}
66 +
67 void save_stack_trace(struct stack_trace *trace)
68 {
69 - save_stack_trace_tsk(current, trace);
70 + __save_stack_trace(current, trace, 0);
71 }
72 EXPORT_SYMBOL_GPL(save_stack_trace);
73 #endif
74 diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
75 index 0dac3d239e32..d712c5172237 100644
76 --- a/arch/arm/mach-omap1/board-h2.c
77 +++ b/arch/arm/mach-omap1/board-h2.c
78 @@ -379,7 +379,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
79 /* usb1 has a Mini-AB port and external isp1301 transceiver */
80 .otg = 2,
81
82 -#ifdef CONFIG_USB_GADGET_OMAP
83 +#if IS_ENABLED(CONFIG_USB_OMAP)
84 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
85 /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
86 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
87 diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
88 index 816ecd13f81e..bfed4f928663 100644
89 --- a/arch/arm/mach-omap1/board-h3.c
90 +++ b/arch/arm/mach-omap1/board-h3.c
91 @@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
92 /* usb1 has a Mini-AB port and external isp1301 transceiver */
93 .otg = 2,
94
95 -#ifdef CONFIG_USB_GADGET_OMAP
96 +#if IS_ENABLED(CONFIG_USB_OMAP)
97 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
98 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
99 /* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
100 diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
101 index bd5f02e9c354..c49ce83cc1eb 100644
102 --- a/arch/arm/mach-omap1/board-innovator.c
103 +++ b/arch/arm/mach-omap1/board-innovator.c
104 @@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
105 /* usb1 has a Mini-AB port and external isp1301 transceiver */
106 .otg = 2,
107
108 -#ifdef CONFIG_USB_GADGET_OMAP
109 +#if IS_ENABLED(CONFIG_USB_OMAP)
110 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
111 /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
112 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
113 diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
114 index a7ce69286688..006fbb5f9654 100644
115 --- a/arch/arm/mach-omap1/board-osk.c
116 +++ b/arch/arm/mach-omap1/board-osk.c
117 @@ -280,7 +280,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
118 * be used, with a NONSTANDARD gender-bending cable/dongle, as
119 * a peripheral.
120 */
121 -#ifdef CONFIG_USB_GADGET_OMAP
122 +#if IS_ENABLED(CONFIG_USB_OMAP)
123 .register_dev = 1,
124 .hmc_mode = 0,
125 #else
126 diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
127 index 6ba4bd9118f2..6f3b0476b729 100644
128 --- a/arch/arm/mm/proc-v7-3level.S
129 +++ b/arch/arm/mm/proc-v7-3level.S
130 @@ -56,6 +56,14 @@ ENTRY(cpu_v7_switch_mm)
131 mov pc, lr
132 ENDPROC(cpu_v7_switch_mm)
133
134 +#ifdef __ARMEB__
135 +#define rl r3
136 +#define rh r2
137 +#else
138 +#define rl r2
139 +#define rh r3
140 +#endif
141 +
142 /*
143 * cpu_v7_set_pte_ext(ptep, pte)
144 *
145 @@ -65,13 +73,13 @@ ENDPROC(cpu_v7_switch_mm)
146 */
147 ENTRY(cpu_v7_set_pte_ext)
148 #ifdef CONFIG_MMU
149 - tst r2, #L_PTE_VALID
150 + tst rl, #L_PTE_VALID
151 beq 1f
152 - tst r3, #1 << (57 - 32) @ L_PTE_NONE
153 - bicne r2, #L_PTE_VALID
154 + tst rh, #1 << (57 - 32) @ L_PTE_NONE
155 + bicne rl, #L_PTE_VALID
156 bne 1f
157 - tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
158 - orreq r2, #L_PTE_RDONLY
159 + tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
160 + orreq rl, #L_PTE_RDONLY
161 1: strd r2, r3, [r0]
162 ALT_SMP(W(nop))
163 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
164 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
165 index 5341534b6d04..85536688f753 100644
166 --- a/arch/arm64/kernel/ptrace.c
167 +++ b/arch/arm64/kernel/ptrace.c
168 @@ -872,6 +872,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
169 compat_ulong_t val)
170 {
171 int ret;
172 + mm_segment_t old_fs = get_fs();
173
174 if (off & 3 || off >= COMPAT_USER_SZ)
175 return -EIO;
176 @@ -879,10 +880,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
177 if (off >= sizeof(compat_elf_gregset_t))
178 return 0;
179
180 + set_fs(KERNEL_DS);
181 ret = copy_regset_from_user(tsk, &user_aarch32_view,
182 REGSET_COMPAT_GPR, off,
183 sizeof(compat_ulong_t),
184 &val);
185 + set_fs(old_fs);
186 +
187 return ret;
188 }
189
190 diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
191 index bbf8141408cd..2bed4f02a558 100644
192 --- a/arch/s390/include/asm/lowcore.h
193 +++ b/arch/s390/include/asm/lowcore.h
194 @@ -142,9 +142,9 @@ struct _lowcore {
195 __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
196
197 /* Interrupt response block */
198 - __u8 irb[64]; /* 0x0300 */
199 + __u8 irb[96]; /* 0x0300 */
200
201 - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
202 + __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
203
204 /*
205 * 0xe00 contains the address of the IPL Parameter Information
206 @@ -288,12 +288,13 @@ struct _lowcore {
207 __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
208
209 /* Interrupt response block. */
210 - __u8 irb[64]; /* 0x0400 */
211 + __u8 irb[96]; /* 0x0400 */
212 + __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
213
214 /* Per cpu primary space access list */
215 - __u32 paste[16]; /* 0x0440 */
216 + __u32 paste[16]; /* 0x0480 */
217
218 - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
219 + __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
220
221 /*
222 * 0xe00 contains the address of the IPL Parameter Information
223 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
224 index 94e52cf064b0..ac6328176097 100644
225 --- a/arch/x86/kernel/entry_32.S
226 +++ b/arch/x86/kernel/entry_32.S
227 @@ -434,9 +434,10 @@ sysenter_past_esp:
228 jnz sysenter_audit
229 sysenter_do_call:
230 cmpl $(NR_syscalls), %eax
231 - jae syscall_badsys
232 + jae sysenter_badsys
233 call *sys_call_table(,%eax,4)
234 movl %eax,PT_EAX(%esp)
235 +sysenter_after_call:
236 LOCKDEP_SYS_EXIT
237 DISABLE_INTERRUPTS(CLBR_ANY)
238 TRACE_IRQS_OFF
239 @@ -554,11 +555,6 @@ ENTRY(iret_exc)
240
241 CFI_RESTORE_STATE
242 ldt_ss:
243 - larl PT_OLDSS(%esp), %eax
244 - jnz restore_nocheck
245 - testl $0x00400000, %eax # returning to 32bit stack?
246 - jnz restore_nocheck # allright, normal return
247 -
248 #ifdef CONFIG_PARAVIRT
249 /*
250 * The kernel can't run on a non-flat stack if paravirt mode
251 @@ -691,7 +687,12 @@ END(syscall_fault)
252
253 syscall_badsys:
254 movl $-ENOSYS,PT_EAX(%esp)
255 - jmp resume_userspace
256 + jmp syscall_exit
257 +END(syscall_badsys)
258 +
259 +sysenter_badsys:
260 + movl $-ENOSYS,PT_EAX(%esp)
261 + jmp sysenter_after_call
262 END(syscall_badsys)
263 CFI_ENDPROC
264 /*
265 diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
266 index 38ae65dfd14f..63a899304d27 100644
267 --- a/arch/x86/syscalls/syscall_64.tbl
268 +++ b/arch/x86/syscalls/syscall_64.tbl
269 @@ -212,10 +212,10 @@
270 203 common sched_setaffinity sys_sched_setaffinity
271 204 common sched_getaffinity sys_sched_getaffinity
272 205 64 set_thread_area
273 -206 common io_setup sys_io_setup
274 +206 64 io_setup sys_io_setup
275 207 common io_destroy sys_io_destroy
276 208 common io_getevents sys_io_getevents
277 -209 common io_submit sys_io_submit
278 +209 64 io_submit sys_io_submit
279 210 common io_cancel sys_io_cancel
280 211 64 get_thread_area
281 212 common lookup_dcookie sys_lookup_dcookie
282 @@ -356,3 +356,5 @@
283 540 x32 process_vm_writev compat_sys_process_vm_writev
284 541 x32 setsockopt compat_sys_setsockopt
285 542 x32 getsockopt compat_sys_getsockopt
286 +543 x32 io_setup compat_sys_io_setup
287 +544 x32 io_submit compat_sys_io_submit
288 diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
289 index b3e36a81aa4d..ca6d2acafa66 100644
290 --- a/drivers/acpi/acpica/utstring.c
291 +++ b/drivers/acpi/acpica/utstring.c
292 @@ -349,7 +349,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
293 }
294
295 acpi_os_printf("\"");
296 - for (i = 0; string[i] && (i < max_length); i++) {
297 + for (i = 0; (i < max_length) && string[i]; i++) {
298
299 /* Escape sequences */
300
301 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
302 index ccba6e46cfb3..b62207a87430 100644
303 --- a/drivers/acpi/bus.c
304 +++ b/drivers/acpi/bus.c
305 @@ -57,6 +57,12 @@ EXPORT_SYMBOL(acpi_root_dir);
306
307
308 #ifdef CONFIG_X86
309 +#ifdef CONFIG_ACPI_CUSTOM_DSDT
310 +static inline int set_copy_dsdt(const struct dmi_system_id *id)
311 +{
312 + return 0;
313 +}
314 +#else
315 static int set_copy_dsdt(const struct dmi_system_id *id)
316 {
317 printk(KERN_NOTICE "%s detected - "
318 @@ -64,6 +70,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
319 acpi_gbl_copy_dsdt_locally = 1;
320 return 0;
321 }
322 +#endif
323
324 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
325 /*
326 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
327 index bc68a440d432..c4d2f0e48685 100644
328 --- a/drivers/bluetooth/hci_ldisc.c
329 +++ b/drivers/bluetooth/hci_ldisc.c
330 @@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
331
332 int hci_uart_tx_wakeup(struct hci_uart *hu)
333 {
334 - struct tty_struct *tty = hu->tty;
335 - struct hci_dev *hdev = hu->hdev;
336 - struct sk_buff *skb;
337 -
338 if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
339 set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
340 return 0;
341 @@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
342
343 BT_DBG("");
344
345 + schedule_work(&hu->write_work);
346 +
347 + return 0;
348 +}
349 +
350 +static void hci_uart_write_work(struct work_struct *work)
351 +{
352 + struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
353 + struct tty_struct *tty = hu->tty;
354 + struct hci_dev *hdev = hu->hdev;
355 + struct sk_buff *skb;
356 +
357 + /* REVISIT: should we cope with bad skbs or ->write() returning
358 + * and error value ?
359 + */
360 +
361 restart:
362 clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
363
364 @@ -153,7 +165,6 @@ restart:
365 goto restart;
366
367 clear_bit(HCI_UART_SENDING, &hu->tx_state);
368 - return 0;
369 }
370
371 static void hci_uart_init_work(struct work_struct *work)
372 @@ -289,6 +300,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
373 tty->receive_room = 65536;
374
375 INIT_WORK(&hu->init_ready, hci_uart_init_work);
376 + INIT_WORK(&hu->write_work, hci_uart_write_work);
377
378 spin_lock_init(&hu->rx_lock);
379
380 @@ -326,6 +338,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
381 if (hdev)
382 hci_uart_close(hdev);
383
384 + cancel_work_sync(&hu->write_work);
385 +
386 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
387 if (hdev) {
388 if (test_bit(HCI_UART_REGISTERED, &hu->flags))
389 diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
390 index fffa61ff5cb1..12df101ca942 100644
391 --- a/drivers/bluetooth/hci_uart.h
392 +++ b/drivers/bluetooth/hci_uart.h
393 @@ -68,6 +68,7 @@ struct hci_uart {
394 unsigned long hdev_flags;
395
396 struct work_struct init_ready;
397 + struct work_struct write_work;
398
399 struct hci_uart_proto *proto;
400 void *priv;
401 diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
402 index 974321a2508d..14790304b84b 100644
403 --- a/drivers/char/applicom.c
404 +++ b/drivers/char/applicom.c
405 @@ -345,7 +345,6 @@ out:
406 free_irq(apbs[i].irq, &dummy);
407 iounmap(apbs[i].RamIO);
408 }
409 - pci_disable_device(dev);
410 return ret;
411 }
412
413 diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
414 index 67d6738d85a0..09f4a9374cf5 100644
415 --- a/drivers/extcon/extcon-max8997.c
416 +++ b/drivers/extcon/extcon-max8997.c
417 @@ -712,7 +712,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
418 goto err_irq;
419 }
420
421 - if (pdata->muic_pdata) {
422 + if (pdata && pdata->muic_pdata) {
423 struct max8997_muic_platform_data *muic_pdata
424 = pdata->muic_pdata;
425
426 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
427 index c4e5cdfa5d24..81d0e6e1f754 100644
428 --- a/drivers/hid/hid-core.c
429 +++ b/drivers/hid/hid-core.c
430 @@ -796,7 +796,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
431 * ->numbered being checked, which may not always be the case when
432 * drivers go to access report values.
433 */
434 - report = hid->report_enum[type].report_id_hash[id];
435 + if (id == 0) {
436 + /*
437 + * Validating on id 0 means we should examine the first
438 + * report in the list.
439 + */
440 + report = list_entry(
441 + hid->report_enum[type].report_list.next,
442 + struct hid_report, list);
443 + } else {
444 + report = hid->report_enum[type].report_id_hash[id];
445 + }
446 if (!report) {
447 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
448 return NULL;
449 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
450 index 144999918022..0e93152384f0 100644
451 --- a/drivers/infiniband/ulp/isert/ib_isert.c
452 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
453 @@ -572,14 +572,12 @@ isert_disconnect_work(struct work_struct *work)
454 isert_put_conn(isert_conn);
455 return;
456 }
457 - if (!isert_conn->logout_posted) {
458 - pr_debug("Calling rdma_disconnect for !logout_posted from"
459 - " isert_disconnect_work\n");
460 +
461 + if (isert_conn->disconnect) {
462 + /* Send DREQ/DREP towards our initiator */
463 rdma_disconnect(isert_conn->conn_cm_id);
464 - mutex_unlock(&isert_conn->conn_mutex);
465 - iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
466 - goto wake_up;
467 }
468 +
469 mutex_unlock(&isert_conn->conn_mutex);
470
471 wake_up:
472 @@ -588,10 +586,11 @@ wake_up:
473 }
474
475 static void
476 -isert_disconnected_handler(struct rdma_cm_id *cma_id)
477 +isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
478 {
479 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
480
481 + isert_conn->disconnect = disconnect;
482 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
483 schedule_work(&isert_conn->conn_logout_work);
484 }
485 @@ -600,29 +599,28 @@ static int
486 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
487 {
488 int ret = 0;
489 + bool disconnect = false;
490
491 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
492 event->event, event->status, cma_id->context, cma_id);
493
494 switch (event->event) {
495 case RDMA_CM_EVENT_CONNECT_REQUEST:
496 - pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
497 ret = isert_connect_request(cma_id, event);
498 break;
499 case RDMA_CM_EVENT_ESTABLISHED:
500 - pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
501 isert_connected_handler(cma_id);
502 break;
503 - case RDMA_CM_EVENT_DISCONNECTED:
504 - pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
505 - isert_disconnected_handler(cma_id);
506 - break;
507 - case RDMA_CM_EVENT_DEVICE_REMOVAL:
508 - case RDMA_CM_EVENT_ADDR_CHANGE:
509 + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
510 + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
511 + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
512 + disconnect = true;
513 + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
514 + isert_disconnected_handler(cma_id, disconnect);
515 break;
516 case RDMA_CM_EVENT_CONNECT_ERROR:
517 default:
518 - pr_err("Unknown RDMA CMA event: %d\n", event->event);
519 + pr_err("Unhandled RDMA CMA event: %d\n", event->event);
520 break;
521 }
522
523 @@ -1371,11 +1369,8 @@ isert_do_control_comp(struct work_struct *work)
524 break;
525 case ISTATE_SEND_LOGOUTRSP:
526 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
527 - /*
528 - * Call atomic_dec(&isert_conn->post_send_buf_count)
529 - * from isert_wait_conn()
530 - */
531 - isert_conn->logout_posted = true;
532 +
533 + atomic_dec(&isert_conn->post_send_buf_count);
534 iscsit_logout_post_handler(cmd, cmd->conn);
535 break;
536 default:
537 @@ -1483,6 +1478,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
538 isert_conn->state = ISER_CONN_DOWN;
539 mutex_unlock(&isert_conn->conn_mutex);
540
541 + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
542 +
543 complete(&isert_conn->conn_wait_comp_err);
544 }
545
546 @@ -2190,9 +2187,14 @@ accept_wait:
547 return -ENODEV;
548
549 spin_lock_bh(&np->np_thread_lock);
550 - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
551 + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
552 spin_unlock_bh(&np->np_thread_lock);
553 - pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
554 + pr_debug("np_thread_state %d for isert_accept_np\n",
555 + np->np_thread_state);
556 + /**
557 + * No point in stalling here when np_thread
558 + * is in state RESET/SHUTDOWN/EXIT - bail
559 + **/
560 return -ENODEV;
561 }
562 spin_unlock_bh(&np->np_thread_lock);
563 @@ -2242,15 +2244,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
564 struct isert_conn *isert_conn = conn->context;
565
566 pr_debug("isert_wait_conn: Starting \n");
567 - /*
568 - * Decrement post_send_buf_count for special case when called
569 - * from isert_do_control_comp() -> iscsit_logout_post_handler()
570 - */
571 - mutex_lock(&isert_conn->conn_mutex);
572 - if (isert_conn->logout_posted)
573 - atomic_dec(&isert_conn->post_send_buf_count);
574
575 - if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
576 + mutex_lock(&isert_conn->conn_mutex);
577 + if (isert_conn->conn_cm_id) {
578 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
579 rdma_disconnect(isert_conn->conn_cm_id);
580 }
581 @@ -2336,6 +2332,7 @@ destroy_rx_wq:
582
583 static void __exit isert_exit(void)
584 {
585 + flush_scheduled_work();
586 kmem_cache_destroy(isert_cmd_cache);
587 destroy_workqueue(isert_comp_wq);
588 destroy_workqueue(isert_rx_wq);
589 diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
590 index dfe4a2ebef0d..032f65abee36 100644
591 --- a/drivers/infiniband/ulp/isert/ib_isert.h
592 +++ b/drivers/infiniband/ulp/isert/ib_isert.h
593 @@ -78,7 +78,6 @@ struct isert_device;
594
595 struct isert_conn {
596 enum iser_conn_state state;
597 - bool logout_posted;
598 int post_recv_buf_count;
599 atomic_t post_send_buf_count;
600 u32 responder_resources;
601 @@ -106,6 +105,7 @@ struct isert_conn {
602 struct completion conn_wait;
603 struct completion conn_wait_comp_err;
604 struct kref conn_kref;
605 + bool disconnect;
606 };
607
608 #define ISERT_MAX_CQ 64
609 diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
610 index e1863dbf4edc..7a9b98bc208b 100644
611 --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
612 +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
613 @@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
614
615 /* Instruct the CX2341[56] to start sending packets */
616 snd_ivtv_lock(itvsc);
617 +
618 + if (ivtv_init_on_first_open(itv)) {
619 + snd_ivtv_unlock(itvsc);
620 + return -ENXIO;
621 + }
622 +
623 s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
624
625 v4l2_fh_init(&item.fh, s->vdev);
626 diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
627 index 34a26e0cfe77..03504dcf3c52 100644
628 --- a/drivers/media/usb/stk1160/stk1160-core.c
629 +++ b/drivers/media/usb/stk1160/stk1160-core.c
630 @@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
631 {
632 int ret;
633 int pipe = usb_rcvctrlpipe(dev->udev, 0);
634 + u8 *buf;
635
636 *value = 0;
637 +
638 + buf = kmalloc(sizeof(u8), GFP_KERNEL);
639 + if (!buf)
640 + return -ENOMEM;
641 ret = usb_control_msg(dev->udev, pipe, 0x00,
642 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
643 - 0x00, reg, value, sizeof(u8), HZ);
644 + 0x00, reg, buf, sizeof(u8), HZ);
645 if (ret < 0) {
646 stk1160_err("read failed on reg 0x%x (%d)\n",
647 reg, ret);
648 + kfree(buf);
649 return ret;
650 }
651
652 + *value = *buf;
653 + kfree(buf);
654 return 0;
655 }
656
657 diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
658 index 05b05b160e1e..abdea484c998 100644
659 --- a/drivers/media/usb/stk1160/stk1160.h
660 +++ b/drivers/media/usb/stk1160/stk1160.h
661 @@ -143,7 +143,6 @@ struct stk1160 {
662 int num_alt;
663
664 struct stk1160_isoc_ctl isoc_ctl;
665 - char urb_buf[255]; /* urb control msg buffer */
666
667 /* frame properties */
668 int width; /* current frame width */
669 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
670 index e1c5bf3ea112..c081812ac5c0 100644
671 --- a/drivers/media/usb/uvc/uvc_video.c
672 +++ b/drivers/media/usb/uvc/uvc_video.c
673 @@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream,
674 * Clocks and timestamps
675 */
676
677 +static inline void uvc_video_get_ts(struct timespec *ts)
678 +{
679 + if (uvc_clock_param == CLOCK_MONOTONIC)
680 + ktime_get_ts(ts);
681 + else
682 + ktime_get_real_ts(ts);
683 +}
684 +
685 static void
686 uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
687 const __u8 *data, int len)
688 @@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
689 stream->clock.last_sof = dev_sof;
690
691 host_sof = usb_get_current_frame_number(stream->dev->udev);
692 - ktime_get_ts(&ts);
693 + uvc_video_get_ts(&ts);
694
695 /* The UVC specification allows device implementations that can't obtain
696 * the USB frame number to keep their own frame counters as long as they
697 @@ -1010,10 +1018,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
698 return -ENODATA;
699 }
700
701 - if (uvc_clock_param == CLOCK_MONOTONIC)
702 - ktime_get_ts(&ts);
703 - else
704 - ktime_get_real_ts(&ts);
705 + uvc_video_get_ts(&ts);
706
707 buf->buf.v4l2_buf.sequence = stream->sequence;
708 buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
709 diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
710 index 6b6f0ad75090..7042f5faddd7 100644
711 --- a/drivers/net/can/sja1000/peak_pci.c
712 +++ b/drivers/net/can/sja1000/peak_pci.c
713 @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
714 {
715 struct sja1000_priv *priv;
716 struct peak_pci_chan *chan;
717 - struct net_device *dev;
718 + struct net_device *dev, *prev_dev;
719 void __iomem *cfg_base, *reg_base;
720 u16 sub_sys_id, icr;
721 int i, err, channels;
722 @@ -687,11 +687,13 @@ failure_remove_channels:
723 writew(0x0, cfg_base + PITA_ICR + 2);
724
725 chan = NULL;
726 - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
727 - unregister_sja1000dev(dev);
728 - free_sja1000dev(dev);
729 + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
730 priv = netdev_priv(dev);
731 chan = priv->priv;
732 + prev_dev = chan->prev_dev;
733 +
734 + unregister_sja1000dev(dev);
735 + free_sja1000dev(dev);
736 }
737
738 /* free any PCIeC resources too */
739 @@ -725,10 +727,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
740
741 /* Loop over all registered devices */
742 while (1) {
743 + struct net_device *prev_dev = chan->prev_dev;
744 +
745 dev_info(&pdev->dev, "removing device %s\n", dev->name);
746 unregister_sja1000dev(dev);
747 free_sja1000dev(dev);
748 - dev = chan->prev_dev;
749 + dev = prev_dev;
750
751 if (!dev) {
752 /* do that only for first channel */
753 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
754 index d1a769f35f9d..b1ab3a4956a5 100644
755 --- a/drivers/net/ethernet/ti/cpsw.c
756 +++ b/drivers/net/ethernet/ti/cpsw.c
757 @@ -1547,6 +1547,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
758 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
759 phyid = be32_to_cpup(parp+1);
760 mdio = of_find_device_by_node(mdio_node);
761 + if (!mdio) {
762 + pr_err("Missing mdio platform device\n");
763 + return -EINVAL;
764 + }
765 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
766 PHY_ID_FMT, mdio->name, phyid);
767
768 diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
769 index 2f084e181d39..a1aca4416ca7 100644
770 --- a/drivers/staging/tidspbridge/core/dsp-clock.c
771 +++ b/drivers/staging/tidspbridge/core/dsp-clock.c
772 @@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
773 case GPT_CLK:
774 status = omap_dm_timer_start(timer[clk_id - 1]);
775 break;
776 -#ifdef CONFIG_OMAP_MCBSP
777 +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
778 case MCBSP_CLK:
779 omap_mcbsp_request(MCBSP_ID(clk_id));
780 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
781 @@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
782 case GPT_CLK:
783 status = omap_dm_timer_stop(timer[clk_id - 1]);
784 break;
785 -#ifdef CONFIG_OMAP_MCBSP
786 +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
787 case MCBSP_CLK:
788 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
789 omap_mcbsp_free(MCBSP_ID(clk_id));
790 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
791 index 68dbd88babbd..72663ba228dc 100644
792 --- a/drivers/target/iscsi/iscsi_target.c
793 +++ b/drivers/target/iscsi/iscsi_target.c
794 @@ -4151,8 +4151,6 @@ int iscsit_close_connection(
795 if (conn->conn_transport->iscsit_wait_conn)
796 conn->conn_transport->iscsit_wait_conn(conn);
797
798 - iscsit_free_queue_reqs_for_conn(conn);
799 -
800 /*
801 * During Connection recovery drop unacknowledged out of order
802 * commands for this connection, and prepare the other commands
803 @@ -4169,6 +4167,7 @@ int iscsit_close_connection(
804 iscsit_clear_ooo_cmdsns_for_conn(conn);
805 iscsit_release_commands_from_conn(conn);
806 }
807 + iscsit_free_queue_reqs_for_conn(conn);
808
809 /*
810 * Handle decrementing session or connection usage count if
811 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
812 index 0d6c3dd25679..e14e105acff8 100644
813 --- a/drivers/target/iscsi/iscsi_target_login.c
814 +++ b/drivers/target/iscsi/iscsi_target_login.c
815 @@ -597,13 +597,8 @@ static int iscsi_login_non_zero_tsih_s2(
816 *
817 * In our case, we have already located the struct iscsi_tiqn at this point.
818 */
819 - memset(buf, 0, 32);
820 - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
821 - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
822 - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
823 - ISCSI_LOGIN_STATUS_NO_RESOURCES);
824 + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
825 return -1;
826 - }
827
828 return iscsi_login_disable_FIM_keys(conn->param_list, conn);
829 }
830 diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
831 index 0921a64b5550..5c3b6778c22a 100644
832 --- a/drivers/target/target_core_rd.c
833 +++ b/drivers/target/target_core_rd.c
834 @@ -174,7 +174,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
835 - 1;
836
837 for (j = 0; j < sg_per_table; j++) {
838 - pg = alloc_pages(GFP_KERNEL, 0);
839 + pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
840 if (!pg) {
841 pr_err("Unable to allocate scatterlist"
842 " pages for struct rd_dev_sg_table\n");
843 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
844 index bbc5b0ee2bdc..0ef75fb0ecba 100644
845 --- a/drivers/target/target_core_sbc.c
846 +++ b/drivers/target/target_core_sbc.c
847 @@ -63,7 +63,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
848 transport_kunmap_data_sg(cmd);
849 }
850
851 - target_complete_cmd(cmd, GOOD);
852 + target_complete_cmd_with_length(cmd, GOOD, 8);
853 return 0;
854 }
855
856 @@ -101,7 +101,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
857 transport_kunmap_data_sg(cmd);
858 }
859
860 - target_complete_cmd(cmd, GOOD);
861 + target_complete_cmd_with_length(cmd, GOOD, 32);
862 return 0;
863 }
864
865 diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
866 index 9fabbf7214cd..34254b2ec466 100644
867 --- a/drivers/target/target_core_spc.c
868 +++ b/drivers/target/target_core_spc.c
869 @@ -628,6 +628,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
870 unsigned char buf[SE_INQUIRY_BUF];
871 sense_reason_t ret;
872 int p;
873 + int len = 0;
874
875 memset(buf, 0, SE_INQUIRY_BUF);
876
877 @@ -645,6 +646,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
878 }
879
880 ret = spc_emulate_inquiry_std(cmd, buf);
881 + len = buf[4] + 5;
882 goto out;
883 }
884
885 @@ -652,6 +654,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
886 if (cdb[2] == evpd_handlers[p].page) {
887 buf[1] = cdb[2];
888 ret = evpd_handlers[p].emulate(cmd, buf);
889 + len = get_unaligned_be16(&buf[2]) + 4;
890 goto out;
891 }
892 }
893 @@ -667,7 +670,7 @@ out:
894 }
895
896 if (!ret)
897 - target_complete_cmd(cmd, GOOD);
898 + target_complete_cmd_with_length(cmd, GOOD, len);
899 return ret;
900 }
901
902 @@ -985,7 +988,7 @@ set_length:
903 transport_kunmap_data_sg(cmd);
904 }
905
906 - target_complete_cmd(cmd, GOOD);
907 + target_complete_cmd_with_length(cmd, GOOD, length);
908 return 0;
909 }
910
911 @@ -1162,7 +1165,7 @@ done:
912 buf[3] = (lun_count & 0xff);
913 transport_kunmap_data_sg(cmd);
914
915 - target_complete_cmd(cmd, GOOD);
916 + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
917 return 0;
918 }
919 EXPORT_SYMBOL(spc_emulate_report_luns);
920 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
921 index 21e315874a54..6866d86e8663 100644
922 --- a/drivers/target/target_core_transport.c
923 +++ b/drivers/target/target_core_transport.c
924 @@ -488,7 +488,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
925
926 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
927
928 - complete(&cmd->t_transport_stop_comp);
929 + complete_all(&cmd->t_transport_stop_comp);
930 return 1;
931 }
932
933 @@ -617,7 +617,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
934 if (cmd->transport_state & CMD_T_ABORTED &&
935 cmd->transport_state & CMD_T_STOP) {
936 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
937 - complete(&cmd->t_transport_stop_comp);
938 + complete_all(&cmd->t_transport_stop_comp);
939 return;
940 } else if (cmd->transport_state & CMD_T_FAILED) {
941 INIT_WORK(&cmd->work, target_complete_failure_work);
942 @@ -633,6 +633,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
943 }
944 EXPORT_SYMBOL(target_complete_cmd);
945
946 +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
947 +{
948 + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
949 + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
950 + cmd->residual_count += cmd->data_length - length;
951 + } else {
952 + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
953 + cmd->residual_count = cmd->data_length - length;
954 + }
955 +
956 + cmd->data_length = length;
957 + }
958 +
959 + target_complete_cmd(cmd, scsi_status);
960 +}
961 +EXPORT_SYMBOL(target_complete_cmd_with_length);
962 +
963 static void target_add_to_state_list(struct se_cmd *cmd)
964 {
965 struct se_device *dev = cmd->se_dev;
966 @@ -1688,7 +1705,7 @@ void target_execute_cmd(struct se_cmd *cmd)
967 cmd->se_tfo->get_task_tag(cmd));
968
969 spin_unlock_irq(&cmd->t_state_lock);
970 - complete(&cmd->t_transport_stop_comp);
971 + complete_all(&cmd->t_transport_stop_comp);
972 return;
973 }
974
975 @@ -2877,6 +2894,12 @@ static void target_tmr_work(struct work_struct *work)
976 int transport_generic_handle_tmr(
977 struct se_cmd *cmd)
978 {
979 + unsigned long flags;
980 +
981 + spin_lock_irqsave(&cmd->t_state_lock, flags);
982 + cmd->transport_state |= CMD_T_ACTIVE;
983 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
984 +
985 INIT_WORK(&cmd->work, target_tmr_work);
986 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
987 return 0;
988 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
989 index 69948ad39837..d868b62c1a16 100644
990 --- a/drivers/usb/dwc3/gadget.c
991 +++ b/drivers/usb/dwc3/gadget.c
992 @@ -604,6 +604,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
993
994 dwc3_remove_requests(dwc, dep);
995
996 + /* make sure HW endpoint isn't stalled */
997 + if (dep->flags & DWC3_EP_STALL)
998 + __dwc3_gadget_ep_set_halt(dep, 0);
999 +
1000 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1001 reg &= ~DWC3_DALEPENA_EP(dep->number);
1002 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1003 diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
1004 index 570c005062ab..42a30903d4fd 100644
1005 --- a/drivers/usb/gadget/inode.c
1006 +++ b/drivers/usb/gadget/inode.c
1007 @@ -1509,7 +1509,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1008 }
1009 break;
1010
1011 -#ifndef CONFIG_USB_GADGET_PXA25X
1012 +#ifndef CONFIG_USB_PXA25X
1013 /* PXA automagically handles this request too */
1014 case USB_REQ_GET_CONFIGURATION:
1015 if (ctrl->bRequestType != 0x80)
1016 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
1017 index 4c338ec03a07..9cfe3af3101a 100644
1018 --- a/drivers/usb/host/pci-quirks.c
1019 +++ b/drivers/usb/host/pci-quirks.c
1020 @@ -555,6 +555,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
1021 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
1022 },
1023 },
1024 + {
1025 + /* HASEE E200 */
1026 + .matches = {
1027 + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
1028 + DMI_MATCH(DMI_BOARD_NAME, "E210"),
1029 + DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
1030 + },
1031 + },
1032 { }
1033 };
1034
1035 @@ -564,9 +572,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
1036 {
1037 int try_handoff = 1, tried_handoff = 0;
1038
1039 - /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
1040 - * the handoff on its unused controller. Skip it. */
1041 - if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
1042 + /*
1043 + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
1044 + * the handoff on its unused controller. Skip it.
1045 + *
1046 + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
1047 + */
1048 + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
1049 + pdev->device == 0x27cc)) {
1050 if (dmi_check_system(ehci_dmi_nohandoff_table))
1051 try_handoff = 0;
1052 }
1053 diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
1054 index 8b4ca1cb450a..98438b90838f 100644
1055 --- a/drivers/usb/misc/usbtest.c
1056 +++ b/drivers/usb/misc/usbtest.c
1057 @@ -7,9 +7,10 @@
1058 #include <linux/moduleparam.h>
1059 #include <linux/scatterlist.h>
1060 #include <linux/mutex.h>
1061 -
1062 +#include <linux/timer.h>
1063 #include <linux/usb.h>
1064
1065 +#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
1066
1067 /*-------------------------------------------------------------------------*/
1068
1069 @@ -366,6 +367,7 @@ static int simple_io(
1070 int max = urb->transfer_buffer_length;
1071 struct completion completion;
1072 int retval = 0;
1073 + unsigned long expire;
1074
1075 urb->context = &completion;
1076 while (retval == 0 && iterations-- > 0) {
1077 @@ -378,9 +380,15 @@ static int simple_io(
1078 if (retval != 0)
1079 break;
1080
1081 - /* NOTE: no timeouts; can't be broken out of by interrupt */
1082 - wait_for_completion(&completion);
1083 - retval = urb->status;
1084 + expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
1085 + if (!wait_for_completion_timeout(&completion, expire)) {
1086 + usb_kill_urb(urb);
1087 + retval = (urb->status == -ENOENT ?
1088 + -ETIMEDOUT : urb->status);
1089 + } else {
1090 + retval = urb->status;
1091 + }
1092 +
1093 urb->dev = udev;
1094 if (retval == 0 && usb_pipein(urb->pipe))
1095 retval = simple_check_buf(tdev, urb);
1096 @@ -476,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
1097 return sg;
1098 }
1099
1100 +static void sg_timeout(unsigned long _req)
1101 +{
1102 + struct usb_sg_request *req = (struct usb_sg_request *) _req;
1103 +
1104 + req->status = -ETIMEDOUT;
1105 + usb_sg_cancel(req);
1106 +}
1107 +
1108 static int perform_sglist(
1109 struct usbtest_dev *tdev,
1110 unsigned iterations,
1111 @@ -487,6 +503,9 @@ static int perform_sglist(
1112 {
1113 struct usb_device *udev = testdev_to_usbdev(tdev);
1114 int retval = 0;
1115 + struct timer_list sg_timer;
1116 +
1117 + setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
1118
1119 while (retval == 0 && iterations-- > 0) {
1120 retval = usb_sg_init(req, udev, pipe,
1121 @@ -497,7 +516,10 @@ static int perform_sglist(
1122
1123 if (retval)
1124 break;
1125 + mod_timer(&sg_timer, jiffies +
1126 + msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
1127 usb_sg_wait(req);
1128 + del_timer_sync(&sg_timer);
1129 retval = req->status;
1130
1131 /* FIXME check resulting data pattern */
1132 @@ -1149,6 +1171,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1133 urb->context = &completion;
1134 urb->complete = unlink1_callback;
1135
1136 + if (usb_pipeout(urb->pipe)) {
1137 + simple_fill_buf(urb);
1138 + urb->transfer_flags |= URB_ZERO_PACKET;
1139 + }
1140 +
1141 /* keep the endpoint busy. there are lots of hc/hcd-internal
1142 * states, and testing should get to all of them over time.
1143 *
1144 @@ -1279,6 +1306,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1145 unlink_queued_callback, &ctx);
1146 ctx.urbs[i]->transfer_dma = buf_dma;
1147 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1148 +
1149 + if (usb_pipeout(ctx.urbs[i]->pipe)) {
1150 + simple_fill_buf(ctx.urbs[i]);
1151 + ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1152 + }
1153 }
1154
1155 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1156 diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
1157 index ae481afcb3ec..9201feb97e9e 100644
1158 --- a/drivers/usb/phy/phy-isp1301-omap.c
1159 +++ b/drivers/usb/phy/phy-isp1301-omap.c
1160 @@ -1299,7 +1299,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
1161 return isp1301_otg_enable(isp);
1162 return 0;
1163
1164 -#elif !defined(CONFIG_USB_GADGET_OMAP)
1165 +#elif !IS_ENABLED(CONFIG_USB_OMAP)
1166 // FIXME update its refcount
1167 otg->host = host;
1168
1169 diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
1170 index 3c4db6d196c6..7229b265870a 100644
1171 --- a/drivers/usb/serial/bus.c
1172 +++ b/drivers/usb/serial/bus.c
1173 @@ -98,13 +98,19 @@ static int usb_serial_device_remove(struct device *dev)
1174 struct usb_serial_port *port;
1175 int retval = 0;
1176 int minor;
1177 + int autopm_err;
1178
1179 port = to_usb_serial_port(dev);
1180 if (!port)
1181 return -ENODEV;
1182
1183 - /* make sure suspend/resume doesn't race against port_remove */
1184 - usb_autopm_get_interface(port->serial->interface);
1185 + /*
1186 + * Make sure suspend/resume doesn't race against port_remove.
1187 + *
1188 + * Note that no further runtime PM callbacks will be made if
1189 + * autopm_get fails.
1190 + */
1191 + autopm_err = usb_autopm_get_interface(port->serial->interface);
1192
1193 minor = port->number;
1194 tty_unregister_device(usb_serial_tty_driver, minor);
1195 @@ -118,7 +124,9 @@ static int usb_serial_device_remove(struct device *dev)
1196 dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
1197 driver->description, minor);
1198
1199 - usb_autopm_put_interface(port->serial->interface);
1200 + if (!autopm_err)
1201 + usb_autopm_put_interface(port->serial->interface);
1202 +
1203 return retval;
1204 }
1205
1206 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1207 index 948a19f0cdf7..70ede84f4f6b 100644
1208 --- a/drivers/usb/serial/option.c
1209 +++ b/drivers/usb/serial/option.c
1210 @@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port)
1211 struct option_private *priv = intfdata->private;
1212 struct usb_wwan_port_private *portdata;
1213 int val = 0;
1214 + int res;
1215
1216 portdata = usb_get_serial_port_data(port);
1217
1218 @@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port)
1219 if (portdata->rts_state)
1220 val |= 0x02;
1221
1222 - return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
1223 + res = usb_autopm_get_interface(serial->interface);
1224 + if (res)
1225 + return res;
1226 +
1227 + res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
1228 0x22, 0x21, val, priv->bInterfaceNumber, NULL,
1229 0, USB_CTRL_SET_TIMEOUT);
1230 +
1231 + usb_autopm_put_interface(serial->interface);
1232 +
1233 + return res;
1234 }
1235
1236 MODULE_AUTHOR(DRIVER_AUTHOR);
1237 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1238 index 6c0a542e8ec1..43d93dbf7d71 100644
1239 --- a/drivers/usb/serial/qcserial.c
1240 +++ b/drivers/usb/serial/qcserial.c
1241 @@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = {
1242 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
1243 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
1244 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
1245 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */
1246 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */
1247 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */
1248 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
1249 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
1250 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
1251 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
1252 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
1253 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
1254 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */
1255 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */
1256 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */
1257 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */
1258 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */
1259 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */
1260 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */
1261 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */
1262 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */
1263 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */
1264 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */
1265 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */
1266 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */
1267 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */
1268 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */
1269 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */
1270 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */
1271 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */
1272 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
1273 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
1274 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
1275 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1276 index 2df566c0e9e8..4e4590854123 100644
1277 --- a/drivers/usb/serial/sierra.c
1278 +++ b/drivers/usb/serial/sierra.c
1279 @@ -58,6 +58,7 @@ struct sierra_intf_private {
1280 spinlock_t susp_lock;
1281 unsigned int suspended:1;
1282 int in_flight;
1283 + unsigned int open_ports;
1284 };
1285
1286 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
1287 @@ -767,6 +768,7 @@ static void sierra_close(struct usb_serial_port *port)
1288 struct usb_serial *serial = port->serial;
1289 struct sierra_port_private *portdata;
1290 struct sierra_intf_private *intfdata = port->serial->private;
1291 + struct urb *urb;
1292
1293 portdata = usb_get_serial_port_data(port);
1294
1295 @@ -775,7 +777,6 @@ static void sierra_close(struct usb_serial_port *port)
1296
1297 mutex_lock(&serial->disc_mutex);
1298 if (!serial->disconnected) {
1299 - serial->interface->needs_remote_wakeup = 0;
1300 /* odd error handling due to pm counters */
1301 if (!usb_autopm_get_interface(serial->interface))
1302 sierra_send_setup(port);
1303 @@ -786,8 +787,22 @@ static void sierra_close(struct usb_serial_port *port)
1304 mutex_unlock(&serial->disc_mutex);
1305 spin_lock_irq(&intfdata->susp_lock);
1306 portdata->opened = 0;
1307 + if (--intfdata->open_ports == 0)
1308 + serial->interface->needs_remote_wakeup = 0;
1309 spin_unlock_irq(&intfdata->susp_lock);
1310
1311 + for (;;) {
1312 + urb = usb_get_from_anchor(&portdata->delayed);
1313 + if (!urb)
1314 + break;
1315 + kfree(urb->transfer_buffer);
1316 + usb_free_urb(urb);
1317 + usb_autopm_put_interface_async(serial->interface);
1318 + spin_lock(&portdata->lock);
1319 + portdata->outstanding_urbs--;
1320 + spin_unlock(&portdata->lock);
1321 + }
1322 +
1323 sierra_stop_rx_urbs(port);
1324 for (i = 0; i < portdata->num_in_urbs; i++) {
1325 sierra_release_urb(portdata->in_urbs[i]);
1326 @@ -824,23 +839,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
1327 usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
1328
1329 err = sierra_submit_rx_urbs(port, GFP_KERNEL);
1330 - if (err) {
1331 - /* get rid of everything as in close */
1332 - sierra_close(port);
1333 - /* restore balance for autopm */
1334 - if (!serial->disconnected)
1335 - usb_autopm_put_interface(serial->interface);
1336 - return err;
1337 - }
1338 + if (err)
1339 + goto err_submit;
1340 +
1341 sierra_send_setup(port);
1342
1343 - serial->interface->needs_remote_wakeup = 1;
1344 spin_lock_irq(&intfdata->susp_lock);
1345 portdata->opened = 1;
1346 + if (++intfdata->open_ports == 1)
1347 + serial->interface->needs_remote_wakeup = 1;
1348 spin_unlock_irq(&intfdata->susp_lock);
1349 usb_autopm_put_interface(serial->interface);
1350
1351 return 0;
1352 +
1353 +err_submit:
1354 + sierra_stop_rx_urbs(port);
1355 +
1356 + for (i = 0; i < portdata->num_in_urbs; i++) {
1357 + sierra_release_urb(portdata->in_urbs[i]);
1358 + portdata->in_urbs[i] = NULL;
1359 + }
1360 +
1361 + return err;
1362 }
1363
1364
1365 @@ -936,6 +957,7 @@ static int sierra_port_remove(struct usb_serial_port *port)
1366 struct sierra_port_private *portdata;
1367
1368 portdata = usb_get_serial_port_data(port);
1369 + usb_set_serial_port_data(port, NULL);
1370 kfree(portdata);
1371
1372 return 0;
1373 @@ -952,6 +974,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
1374 for (i = 0; i < serial->num_ports; ++i) {
1375 port = serial->port[i];
1376 portdata = usb_get_serial_port_data(port);
1377 + if (!portdata)
1378 + continue;
1379 sierra_stop_rx_urbs(port);
1380 usb_kill_anchored_urbs(&portdata->active);
1381 }
1382 @@ -994,6 +1018,9 @@ static int sierra_resume(struct usb_serial *serial)
1383 port = serial->port[i];
1384 portdata = usb_get_serial_port_data(port);
1385
1386 + if (!portdata)
1387 + continue;
1388 +
1389 while ((urb = usb_get_from_anchor(&portdata->delayed))) {
1390 usb_anchor_urb(urb, &portdata->active);
1391 intfdata->in_flight++;
1392 @@ -1001,8 +1028,12 @@ static int sierra_resume(struct usb_serial *serial)
1393 if (err < 0) {
1394 intfdata->in_flight--;
1395 usb_unanchor_urb(urb);
1396 - usb_scuttle_anchored_urbs(&portdata->delayed);
1397 - break;
1398 + kfree(urb->transfer_buffer);
1399 + usb_free_urb(urb);
1400 + spin_lock(&portdata->lock);
1401 + portdata->outstanding_urbs--;
1402 + spin_unlock(&portdata->lock);
1403 + continue;
1404 }
1405 }
1406
1407 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
1408 index 11952b6dc224..36f6b6a56907 100644
1409 --- a/drivers/usb/serial/usb_wwan.c
1410 +++ b/drivers/usb/serial/usb_wwan.c
1411 @@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
1412 usb_pipeendpoint(this_urb->pipe), i);
1413
1414 err = usb_autopm_get_interface_async(port->serial->interface);
1415 - if (err < 0)
1416 + if (err < 0) {
1417 + clear_bit(i, &portdata->out_busy);
1418 break;
1419 + }
1420
1421 /* send the data */
1422 memcpy(this_urb->transfer_buffer, buf, todo);
1423 @@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
1424 portdata = usb_get_serial_port_data(port);
1425 intfdata = serial->private;
1426
1427 + if (port->interrupt_in_urb) {
1428 + err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
1429 + if (err) {
1430 + dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
1431 + __func__, err);
1432 + }
1433 + }
1434 +
1435 /* Start reading from the IN endpoint */
1436 for (i = 0; i < N_IN_URB; i++) {
1437 urb = portdata->in_urbs[i];
1438 @@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
1439 }
1440 EXPORT_SYMBOL(usb_wwan_open);
1441
1442 +static void unbusy_queued_urb(struct urb *urb,
1443 + struct usb_wwan_port_private *portdata)
1444 +{
1445 + int i;
1446 +
1447 + for (i = 0; i < N_OUT_URB; i++) {
1448 + if (urb == portdata->out_urbs[i]) {
1449 + clear_bit(i, &portdata->out_busy);
1450 + break;
1451 + }
1452 + }
1453 +}
1454 +
1455 void usb_wwan_close(struct usb_serial_port *port)
1456 {
1457 int i;
1458 struct usb_serial *serial = port->serial;
1459 struct usb_wwan_port_private *portdata;
1460 struct usb_wwan_intf_private *intfdata = port->serial->private;
1461 + struct urb *urb;
1462
1463 portdata = usb_get_serial_port_data(port);
1464
1465 @@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port)
1466 portdata->opened = 0;
1467 spin_unlock_irq(&intfdata->susp_lock);
1468
1469 + for (;;) {
1470 + urb = usb_get_from_anchor(&portdata->delayed);
1471 + if (!urb)
1472 + break;
1473 + unbusy_queued_urb(urb, portdata);
1474 + usb_autopm_put_interface_async(serial->interface);
1475 + }
1476 +
1477 for (i = 0; i < N_IN_URB; i++)
1478 usb_kill_urb(portdata->in_urbs[i]);
1479 for (i = 0; i < N_OUT_URB; i++)
1480 usb_kill_urb(portdata->out_urbs[i]);
1481 + usb_kill_urb(port->interrupt_in_urb);
1482
1483 /* balancing - important as an error cannot be handled*/
1484 usb_autopm_get_interface_no_resume(serial->interface);
1485 @@ -467,7 +500,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1486 struct usb_wwan_port_private *portdata;
1487 struct urb *urb;
1488 u8 *buffer;
1489 - int err;
1490 int i;
1491
1492 if (!port->bulk_in_size || !port->bulk_out_size)
1493 @@ -507,13 +539,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
1494
1495 usb_set_serial_port_data(port, portdata);
1496
1497 - if (port->interrupt_in_urb) {
1498 - err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
1499 - if (err)
1500 - dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
1501 - __func__, err);
1502 - }
1503 -
1504 return 0;
1505
1506 bail_out_error2:
1507 @@ -581,44 +606,29 @@ static void stop_read_write_urbs(struct usb_serial *serial)
1508 int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
1509 {
1510 struct usb_wwan_intf_private *intfdata = serial->private;
1511 - int b;
1512
1513 + spin_lock_irq(&intfdata->susp_lock);
1514 if (PMSG_IS_AUTO(message)) {
1515 - spin_lock_irq(&intfdata->susp_lock);
1516 - b = intfdata->in_flight;
1517 - spin_unlock_irq(&intfdata->susp_lock);
1518 -
1519 - if (b)
1520 + if (intfdata->in_flight) {
1521 + spin_unlock_irq(&intfdata->susp_lock);
1522 return -EBUSY;
1523 + }
1524 }
1525 -
1526 - spin_lock_irq(&intfdata->susp_lock);
1527 intfdata->suspended = 1;
1528 spin_unlock_irq(&intfdata->susp_lock);
1529 +
1530 stop_read_write_urbs(serial);
1531
1532 return 0;
1533 }
1534 EXPORT_SYMBOL(usb_wwan_suspend);
1535
1536 -static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
1537 -{
1538 - int i;
1539 -
1540 - for (i = 0; i < N_OUT_URB; i++) {
1541 - if (urb == portdata->out_urbs[i]) {
1542 - clear_bit(i, &portdata->out_busy);
1543 - break;
1544 - }
1545 - }
1546 -}
1547 -
1548 -static void play_delayed(struct usb_serial_port *port)
1549 +static int play_delayed(struct usb_serial_port *port)
1550 {
1551 struct usb_wwan_intf_private *data;
1552 struct usb_wwan_port_private *portdata;
1553 struct urb *urb;
1554 - int err;
1555 + int err = 0;
1556
1557 portdata = usb_get_serial_port_data(port);
1558 data = port->serial->private;
1559 @@ -635,6 +645,8 @@ static void play_delayed(struct usb_serial_port *port)
1560 break;
1561 }
1562 }
1563 +
1564 + return err;
1565 }
1566
1567 int usb_wwan_resume(struct usb_serial *serial)
1568 @@ -644,54 +656,51 @@ int usb_wwan_resume(struct usb_serial *serial)
1569 struct usb_wwan_intf_private *intfdata = serial->private;
1570 struct usb_wwan_port_private *portdata;
1571 struct urb *urb;
1572 - int err = 0;
1573 -
1574 - /* get the interrupt URBs resubmitted unconditionally */
1575 - for (i = 0; i < serial->num_ports; i++) {
1576 - port = serial->port[i];
1577 - if (!port->interrupt_in_urb) {
1578 - dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__);
1579 - continue;
1580 - }
1581 - err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
1582 - dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err);
1583 - if (err < 0) {
1584 - dev_err(&port->dev, "%s: Error %d for interrupt URB\n",
1585 - __func__, err);
1586 - goto err_out;
1587 - }
1588 - }
1589 + int err;
1590 + int err_count = 0;
1591
1592 + spin_lock_irq(&intfdata->susp_lock);
1593 for (i = 0; i < serial->num_ports; i++) {
1594 /* walk all ports */
1595 port = serial->port[i];
1596 portdata = usb_get_serial_port_data(port);
1597
1598 /* skip closed ports */
1599 - spin_lock_irq(&intfdata->susp_lock);
1600 - if (!portdata || !portdata->opened) {
1601 - spin_unlock_irq(&intfdata->susp_lock);
1602 + if (!portdata || !portdata->opened)
1603 continue;
1604 +
1605 + if (port->interrupt_in_urb) {
1606 + err = usb_submit_urb(port->interrupt_in_urb,
1607 + GFP_ATOMIC);
1608 + if (err) {
1609 + dev_err(&port->dev,
1610 + "%s: submit int urb failed: %d\n",
1611 + __func__, err);
1612 + err_count++;
1613 + }
1614 }
1615
1616 + err = play_delayed(port);
1617 + if (err)
1618 + err_count++;
1619 +
1620 for (j = 0; j < N_IN_URB; j++) {
1621 urb = portdata->in_urbs[j];
1622 err = usb_submit_urb(urb, GFP_ATOMIC);
1623 if (err < 0) {
1624 dev_err(&port->dev, "%s: Error %d for bulk URB %d\n",
1625 __func__, err, i);
1626 - spin_unlock_irq(&intfdata->susp_lock);
1627 - goto err_out;
1628 + err_count++;
1629 }
1630 }
1631 - play_delayed(port);
1632 - spin_unlock_irq(&intfdata->susp_lock);
1633 }
1634 - spin_lock_irq(&intfdata->susp_lock);
1635 intfdata->suspended = 0;
1636 spin_unlock_irq(&intfdata->susp_lock);
1637 -err_out:
1638 - return err;
1639 +
1640 + if (err_count)
1641 + return -EIO;
1642 +
1643 + return 0;
1644 }
1645 EXPORT_SYMBOL(usb_wwan_resume);
1646 #endif
1647 diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
1648 index 556d96ce40bf..89a8a89a5eb2 100644
1649 --- a/drivers/video/matrox/matroxfb_base.h
1650 +++ b/drivers/video/matrox/matroxfb_base.h
1651 @@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
1652
1653 #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
1654
1655 -#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
1656 +#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
1657
1658 /* code speedup */
1659 #ifdef CONFIG_FB_MATROX_MILLENIUM
1660 diff --git a/fs/aio.c b/fs/aio.c
1661 index ebd06fd0de89..ded94c4fa30d 100644
1662 --- a/fs/aio.c
1663 +++ b/fs/aio.c
1664 @@ -310,7 +310,6 @@ static void free_ioctx(struct kioctx *ctx)
1665
1666 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
1667
1668 - atomic_sub(avail, &ctx->reqs_active);
1669 head += avail;
1670 head %= ctx->nr_events;
1671 }
1672 @@ -678,6 +677,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
1673 put_rq:
1674 /* everything turned out well, dispose of the aiocb. */
1675 aio_put_req(iocb);
1676 + atomic_dec(&ctx->reqs_active);
1677
1678 /*
1679 * We have to order our ring_info tail store above and test
1680 @@ -717,6 +717,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
1681 if (head == ctx->tail)
1682 goto out;
1683
1684 + head %= ctx->nr_events;
1685 +
1686 while (ret < nr) {
1687 long avail;
1688 struct io_event *ev;
1689 @@ -755,8 +757,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
1690 flush_dcache_page(ctx->ring_pages[0]);
1691
1692 pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
1693 -
1694 - atomic_sub(ret, &ctx->reqs_active);
1695 out:
1696 mutex_unlock(&ctx->ring_lock);
1697
1698 diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
1699 index 290e347b6db3..d85f90c92bb4 100644
1700 --- a/fs/btrfs/backref.c
1701 +++ b/fs/btrfs/backref.c
1702 @@ -1347,9 +1347,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1703 * returns <0 on error
1704 */
1705 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1706 - struct btrfs_extent_item *ei, u32 item_size,
1707 - struct btrfs_extent_inline_ref **out_eiref,
1708 - int *out_type)
1709 + struct btrfs_key *key,
1710 + struct btrfs_extent_item *ei, u32 item_size,
1711 + struct btrfs_extent_inline_ref **out_eiref,
1712 + int *out_type)
1713 {
1714 unsigned long end;
1715 u64 flags;
1716 @@ -1359,19 +1360,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1717 /* first call */
1718 flags = btrfs_extent_flags(eb, ei);
1719 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1720 - info = (struct btrfs_tree_block_info *)(ei + 1);
1721 - *out_eiref =
1722 - (struct btrfs_extent_inline_ref *)(info + 1);
1723 + if (key->type == BTRFS_METADATA_ITEM_KEY) {
1724 + /* a skinny metadata extent */
1725 + *out_eiref =
1726 + (struct btrfs_extent_inline_ref *)(ei + 1);
1727 + } else {
1728 + WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1729 + info = (struct btrfs_tree_block_info *)(ei + 1);
1730 + *out_eiref =
1731 + (struct btrfs_extent_inline_ref *)(info + 1);
1732 + }
1733 } else {
1734 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1735 }
1736 *ptr = (unsigned long)*out_eiref;
1737 - if ((void *)*ptr >= (void *)ei + item_size)
1738 + if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1739 return -ENOENT;
1740 }
1741
1742 end = (unsigned long)ei + item_size;
1743 - *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1744 + *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1745 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1746
1747 *ptr += btrfs_extent_inline_ref_size(*out_type);
1748 @@ -1390,8 +1398,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1749 * <0 on error.
1750 */
1751 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1752 - struct btrfs_extent_item *ei, u32 item_size,
1753 - u64 *out_root, u8 *out_level)
1754 + struct btrfs_key *key, struct btrfs_extent_item *ei,
1755 + u32 item_size, u64 *out_root, u8 *out_level)
1756 {
1757 int ret;
1758 int type;
1759 @@ -1402,8 +1410,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1760 return 1;
1761
1762 while (1) {
1763 - ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1764 - &eiref, &type);
1765 + ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
1766 + &eiref, &type);
1767 if (ret < 0)
1768 return ret;
1769
1770 diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
1771 index 0f446d7ca2c0..526d09e70c93 100644
1772 --- a/fs/btrfs/backref.h
1773 +++ b/fs/btrfs/backref.h
1774 @@ -42,8 +42,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1775 u64 *flags);
1776
1777 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1778 - struct btrfs_extent_item *ei, u32 item_size,
1779 - u64 *out_root, u8 *out_level);
1780 + struct btrfs_key *key, struct btrfs_extent_item *ei,
1781 + u32 item_size, u64 *out_root, u8 *out_level);
1782
1783 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1784 u64 extent_item_objectid,
1785 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1786 index 4354b9127713..abecce399354 100644
1787 --- a/fs/btrfs/disk-io.c
1788 +++ b/fs/btrfs/disk-io.c
1789 @@ -3518,6 +3518,11 @@ int close_ctree(struct btrfs_root *root)
1790
1791 btrfs_free_block_groups(fs_info);
1792
1793 + /*
1794 + * we must make sure there is not any read request to
1795 + * submit after we stopping all workers.
1796 + */
1797 + invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1798 btrfs_stop_all_workers(fs_info);
1799
1800 del_fs_roots(fs_info);
1801 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
1802 index e7e7afb4a872..84ceff6abbc1 100644
1803 --- a/fs/btrfs/extent_io.c
1804 +++ b/fs/btrfs/extent_io.c
1805 @@ -1624,6 +1624,7 @@ again:
1806 * shortening the size of the delalloc range we're searching
1807 */
1808 free_extent_state(cached_state);
1809 + cached_state = NULL;
1810 if (!loops) {
1811 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1812 max_bytes = PAGE_CACHE_SIZE - offset;
1813 @@ -2356,7 +2357,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
1814 {
1815 int uptodate = (err == 0);
1816 struct extent_io_tree *tree;
1817 - int ret;
1818 + int ret = 0;
1819
1820 tree = &BTRFS_I(page->mapping->host)->io_tree;
1821
1822 @@ -2370,6 +2371,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
1823 if (!uptodate) {
1824 ClearPageUptodate(page);
1825 SetPageError(page);
1826 + ret = ret < 0 ? ret : -EIO;
1827 + mapping_set_error(page->mapping, ret);
1828 }
1829 return 0;
1830 }
1831 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
1832 index e53009657f0e..0cbe95dc8113 100644
1833 --- a/fs/btrfs/free-space-cache.c
1834 +++ b/fs/btrfs/free-space-cache.c
1835 @@ -835,7 +835,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
1836
1837 if (!matched) {
1838 __btrfs_remove_free_space_cache(ctl);
1839 - btrfs_err(fs_info, "block group %llu has wrong amount of free space",
1840 + btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
1841 block_group->key.objectid);
1842 ret = -1;
1843 }
1844 @@ -847,7 +847,7 @@ out:
1845 spin_unlock(&block_group->lock);
1846 ret = 0;
1847
1848 - btrfs_err(fs_info, "failed to load free space cache for block group %llu",
1849 + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
1850 block_group->key.objectid);
1851 }
1852
1853 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
1854 index eb84c2db1aca..e4f69e3b78b9 100644
1855 --- a/fs/btrfs/scrub.c
1856 +++ b/fs/btrfs/scrub.c
1857 @@ -545,8 +545,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
1858
1859 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1860 do {
1861 - ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
1862 - &ref_root, &ref_level);
1863 + ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
1864 + item_size, &ref_root,
1865 + &ref_level);
1866 printk_in_rcu(KERN_WARNING
1867 "btrfs: %s at logical %llu on dev %s, "
1868 "sector %llu: metadata %s (level %d) in tree "
1869 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
1870 index 256a9a46d544..414c1b9eb896 100644
1871 --- a/fs/btrfs/send.c
1872 +++ b/fs/btrfs/send.c
1873 @@ -1550,6 +1550,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
1874 goto out;
1875 }
1876 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1877 + if (key.type == BTRFS_ROOT_ITEM_KEY) {
1878 + ret = -ENOENT;
1879 + goto out;
1880 + }
1881 *found_inode = key.objectid;
1882 *found_type = btrfs_dir_type(path->nodes[0], di);
1883
1884 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
1885 index b6c23c4abae2..7fc774639a78 100644
1886 --- a/fs/btrfs/volumes.c
1887 +++ b/fs/btrfs/volumes.c
1888 @@ -1384,6 +1384,22 @@ out:
1889 return ret;
1890 }
1891
1892 +/*
1893 + * Function to update ctime/mtime for a given device path.
1894 + * Mainly used for ctime/mtime based probe like libblkid.
1895 + */
1896 +static void update_dev_time(char *path_name)
1897 +{
1898 + struct file *filp;
1899 +
1900 + filp = filp_open(path_name, O_RDWR, 0);
1901 + if (!filp)
1902 + return;
1903 + file_update_time(filp);
1904 + filp_close(filp, NULL);
1905 + return;
1906 +}
1907 +
1908 static int btrfs_rm_dev_item(struct btrfs_root *root,
1909 struct btrfs_device *device)
1910 {
1911 @@ -1612,11 +1628,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1912 struct btrfs_fs_devices *fs_devices;
1913 fs_devices = root->fs_info->fs_devices;
1914 while (fs_devices) {
1915 - if (fs_devices->seed == cur_devices)
1916 + if (fs_devices->seed == cur_devices) {
1917 + fs_devices->seed = cur_devices->seed;
1918 break;
1919 + }
1920 fs_devices = fs_devices->seed;
1921 }
1922 - fs_devices->seed = cur_devices->seed;
1923 cur_devices->seed = NULL;
1924 lock_chunks(root);
1925 __btrfs_close_devices(cur_devices);
1926 @@ -1642,10 +1659,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1927
1928 ret = 0;
1929
1930 - /* Notify udev that device has changed */
1931 - if (bdev)
1932 + if (bdev) {
1933 + /* Notify udev that device has changed */
1934 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1935
1936 + /* Update ctime/mtime for device path for libblkid */
1937 + update_dev_time(device_path);
1938 + }
1939 +
1940 error_brelse:
1941 brelse(bh);
1942 if (bdev)
1943 @@ -1817,7 +1838,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
1944 fs_devices->seeding = 0;
1945 fs_devices->num_devices = 0;
1946 fs_devices->open_devices = 0;
1947 - fs_devices->total_devices = 0;
1948 fs_devices->seed = seed_devices;
1949
1950 generate_random_uuid(fs_devices->fsid);
1951 @@ -2089,6 +2109,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1952 ret = btrfs_commit_transaction(trans, root);
1953 }
1954
1955 + /* Update ctime/mtime for libblkid */
1956 + update_dev_time(device_path);
1957 return ret;
1958
1959 error_trans:
1960 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1961 index fba960ee26de..16bb6591561b 100644
1962 --- a/fs/ext4/mballoc.c
1963 +++ b/fs/ext4/mballoc.c
1964 @@ -3116,7 +3116,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
1965 }
1966 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
1967 start > ac->ac_o_ex.fe_logical);
1968 - BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1969 + BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1970
1971 /* now prepare goal request */
1972
1973 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1974 index 4acf1f78881b..b12a4427aedc 100644
1975 --- a/fs/ext4/page-io.c
1976 +++ b/fs/ext4/page-io.c
1977 @@ -384,6 +384,17 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
1978 ClearPageError(page);
1979
1980 /*
1981 + * Comments copied from block_write_full_page_endio:
1982 + *
1983 + * The page straddles i_size. It must be zeroed out on each and every
1984 + * writepage invocation because it may be mmapped. "A file is mapped
1985 + * in multiples of the page size. For a file that is not a multiple of
1986 + * the page size, the remaining memory is zeroed when mapped, and
1987 + * writes to that region are not written out to the file."
1988 + */
1989 + if (len < PAGE_CACHE_SIZE)
1990 + zero_user_segment(page, len, PAGE_CACHE_SIZE);
1991 + /*
1992 * In the first loop we prepare and mark buffers to submit. We have to
1993 * mark all buffers in the page before submitting so that
1994 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
1995 @@ -394,19 +405,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
1996 do {
1997 block_start = bh_offset(bh);
1998 if (block_start >= len) {
1999 - /*
2000 - * Comments copied from block_write_full_page_endio:
2001 - *
2002 - * The page straddles i_size. It must be zeroed out on
2003 - * each and every writepage invocation because it may
2004 - * be mmapped. "A file is mapped in multiples of the
2005 - * page size. For a file that is not a multiple of
2006 - * the page size, the remaining memory is zeroed when
2007 - * mapped, and writes to that region are not written
2008 - * out to the file."
2009 - */
2010 - zero_user_segment(page, block_start,
2011 - block_start + blocksize);
2012 clear_buffer_dirty(bh);
2013 set_buffer_uptodate(bh);
2014 continue;
2015 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
2016 index 623325e2ff97..078bc2fc74ff 100644
2017 --- a/include/linux/irqdesc.h
2018 +++ b/include/linux/irqdesc.h
2019 @@ -27,6 +27,8 @@ struct irq_desc;
2020 * @irq_count: stats field to detect stalled irqs
2021 * @last_unhandled: aging timer for unhandled count
2022 * @irqs_unhandled: stats field for spurious unhandled interrupts
2023 + * @threads_handled: stats field for deferred spurious detection of threaded handlers
2024 + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
2025 * @lock: locking for SMP
2026 * @affinity_hint: hint to user space for preferred irq affinity
2027 * @affinity_notify: context for notification of affinity changes
2028 @@ -52,6 +54,8 @@ struct irq_desc {
2029 unsigned int irq_count; /* For detecting broken IRQs */
2030 unsigned long last_unhandled; /* Aging timer for unhandled count */
2031 unsigned int irqs_unhandled;
2032 + atomic_t threads_handled;
2033 + int threads_handled_last;
2034 raw_spinlock_t lock;
2035 struct cpumask *percpu_enabled;
2036 #ifdef CONFIG_SMP
2037 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
2038 index 89573a33ab3c..2e99b8e08770 100644
2039 --- a/include/linux/ptrace.h
2040 +++ b/include/linux/ptrace.h
2041 @@ -5,6 +5,7 @@
2042 #include <linux/sched.h> /* For struct task_struct. */
2043 #include <linux/err.h> /* for IS_ERR_VALUE */
2044 #include <linux/bug.h> /* For BUG_ON. */
2045 +#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
2046 #include <uapi/linux/ptrace.h>
2047
2048 /*
2049 @@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message)
2050 }
2051
2052 /**
2053 + * ptrace_event_pid - possibly stop for a ptrace event notification
2054 + * @event: %PTRACE_EVENT_* value to report
2055 + * @pid: process identifier for %PTRACE_GETEVENTMSG to return
2056 + *
2057 + * Check whether @event is enabled and, if so, report @event and @pid
2058 + * to the ptrace parent. @pid is reported as the pid_t seen from the
2059 + * the ptrace parent's pid namespace.
2060 + *
2061 + * Called without locks.
2062 + */
2063 +static inline void ptrace_event_pid(int event, struct pid *pid)
2064 +{
2065 + /*
2066 + * FIXME: There's a potential race if a ptracer in a different pid
2067 + * namespace than parent attaches between computing message below and
2068 + * when we acquire tasklist_lock in ptrace_stop(). If this happens,
2069 + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
2070 + */
2071 + unsigned long message = 0;
2072 + struct pid_namespace *ns;
2073 +
2074 + rcu_read_lock();
2075 + ns = task_active_pid_ns(rcu_dereference(current->parent));
2076 + if (ns)
2077 + message = pid_nr_ns(pid, ns);
2078 + rcu_read_unlock();
2079 +
2080 + ptrace_event(event, message);
2081 +}
2082 +
2083 +/**
2084 * ptrace_init_task - initialize ptrace state for a new child
2085 * @child: new child task
2086 * @ptrace: true if child should be ptrace'd by parent's tracer
2087 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
2088 index ffa2696d64dc..a63529ab9fd7 100644
2089 --- a/include/target/target_core_backend.h
2090 +++ b/include/target/target_core_backend.h
2091 @@ -50,6 +50,7 @@ int transport_subsystem_register(struct se_subsystem_api *);
2092 void transport_subsystem_release(struct se_subsystem_api *);
2093
2094 void target_complete_cmd(struct se_cmd *, u8);
2095 +void target_complete_cmd_with_length(struct se_cmd *, u8, int);
2096
2097 sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
2098 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
2099 diff --git a/kernel/fork.c b/kernel/fork.c
2100 index ff7be9dac4c1..270c1dab674a 100644
2101 --- a/kernel/fork.c
2102 +++ b/kernel/fork.c
2103 @@ -1607,10 +1607,12 @@ long do_fork(unsigned long clone_flags,
2104 */
2105 if (!IS_ERR(p)) {
2106 struct completion vfork;
2107 + struct pid *pid;
2108
2109 trace_sched_process_fork(current, p);
2110
2111 - nr = task_pid_vnr(p);
2112 + pid = get_task_pid(p, PIDTYPE_PID);
2113 + nr = pid_vnr(pid);
2114
2115 if (clone_flags & CLONE_PARENT_SETTID)
2116 put_user(nr, parent_tidptr);
2117 @@ -1625,12 +1627,14 @@ long do_fork(unsigned long clone_flags,
2118
2119 /* forking complete and child started to run, tell ptracer */
2120 if (unlikely(trace))
2121 - ptrace_event(trace, nr);
2122 + ptrace_event_pid(trace, pid);
2123
2124 if (clone_flags & CLONE_VFORK) {
2125 if (!wait_for_vfork_done(p, &vfork))
2126 - ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
2127 + ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2128 }
2129 +
2130 + put_pid(pid);
2131 } else {
2132 nr = PTR_ERR(p);
2133 }
2134 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2135 index 8815abfdf2cb..a79d267b64ec 100644
2136 --- a/kernel/irq/manage.c
2137 +++ b/kernel/irq/manage.c
2138 @@ -861,8 +861,8 @@ static int irq_thread(void *data)
2139 irq_thread_check_affinity(desc, action);
2140
2141 action_ret = handler_fn(desc, action);
2142 - if (!noirqdebug)
2143 - note_interrupt(action->irq, desc, action_ret);
2144 + if (action_ret == IRQ_HANDLED)
2145 + atomic_inc(&desc->threads_handled);
2146
2147 wake_threads_waitq(desc);
2148 }
2149 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
2150 index 7b5f012bde9d..febcee3c2aa9 100644
2151 --- a/kernel/irq/spurious.c
2152 +++ b/kernel/irq/spurious.c
2153 @@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
2154 return action && (action->flags & IRQF_IRQPOLL);
2155 }
2156
2157 +#define SPURIOUS_DEFERRED 0x80000000
2158 +
2159 void note_interrupt(unsigned int irq, struct irq_desc *desc,
2160 irqreturn_t action_ret)
2161 {
2162 if (desc->istate & IRQS_POLL_INPROGRESS)
2163 return;
2164
2165 - /* we get here again via the threaded handler */
2166 - if (action_ret == IRQ_WAKE_THREAD)
2167 - return;
2168 -
2169 if (bad_action_ret(action_ret)) {
2170 report_bad_irq(irq, desc, action_ret);
2171 return;
2172 }
2173
2174 + /*
2175 + * We cannot call note_interrupt from the threaded handler
2176 + * because we need to look at the compound of all handlers
2177 + * (primary and threaded). Aside of that in the threaded
2178 + * shared case we have no serialization against an incoming
2179 + * hardware interrupt while we are dealing with a threaded
2180 + * result.
2181 + *
2182 + * So in case a thread is woken, we just note the fact and
2183 + * defer the analysis to the next hardware interrupt.
2184 + *
2185 + * The threaded handlers store whether they sucessfully
2186 + * handled an interrupt and we check whether that number
2187 + * changed versus the last invocation.
2188 + *
2189 + * We could handle all interrupts with the delayed by one
2190 + * mechanism, but for the non forced threaded case we'd just
2191 + * add pointless overhead to the straight hardirq interrupts
2192 + * for the sake of a few lines less code.
2193 + */
2194 + if (action_ret & IRQ_WAKE_THREAD) {
2195 + /*
2196 + * There is a thread woken. Check whether one of the
2197 + * shared primary handlers returned IRQ_HANDLED. If
2198 + * not we defer the spurious detection to the next
2199 + * interrupt.
2200 + */
2201 + if (action_ret == IRQ_WAKE_THREAD) {
2202 + int handled;
2203 + /*
2204 + * We use bit 31 of thread_handled_last to
2205 + * denote the deferred spurious detection
2206 + * active. No locking necessary as
2207 + * thread_handled_last is only accessed here
2208 + * and we have the guarantee that hard
2209 + * interrupts are not reentrant.
2210 + */
2211 + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
2212 + desc->threads_handled_last |= SPURIOUS_DEFERRED;
2213 + return;
2214 + }
2215 + /*
2216 + * Check whether one of the threaded handlers
2217 + * returned IRQ_HANDLED since the last
2218 + * interrupt happened.
2219 + *
2220 + * For simplicity we just set bit 31, as it is
2221 + * set in threads_handled_last as well. So we
2222 + * avoid extra masking. And we really do not
2223 + * care about the high bits of the handled
2224 + * count. We just care about the count being
2225 + * different than the one we saw before.
2226 + */
2227 + handled = atomic_read(&desc->threads_handled);
2228 + handled |= SPURIOUS_DEFERRED;
2229 + if (handled != desc->threads_handled_last) {
2230 + action_ret = IRQ_HANDLED;
2231 + /*
2232 + * Note: We keep the SPURIOUS_DEFERRED
2233 + * bit set. We are handling the
2234 + * previous invocation right now.
2235 + * Keep it for the current one, so the
2236 + * next hardware interrupt will
2237 + * account for it.
2238 + */
2239 + desc->threads_handled_last = handled;
2240 + } else {
2241 + /*
2242 + * None of the threaded handlers felt
2243 + * responsible for the last interrupt
2244 + *
2245 + * We keep the SPURIOUS_DEFERRED bit
2246 + * set in threads_handled_last as we
2247 + * need to account for the current
2248 + * interrupt as well.
2249 + */
2250 + action_ret = IRQ_NONE;
2251 + }
2252 + } else {
2253 + /*
2254 + * One of the primary handlers returned
2255 + * IRQ_HANDLED. So we don't care about the
2256 + * threaded handlers on the same line. Clear
2257 + * the deferred detection bit.
2258 + *
2259 + * In theory we could/should check whether the
2260 + * deferred bit is set and take the result of
2261 + * the previous run into account here as
2262 + * well. But it's really not worth the
2263 + * trouble. If every other interrupt is
2264 + * handled we never trigger the spurious
2265 + * detector. And if this is just the one out
2266 + * of 100k unhandled ones which is handled
2267 + * then we merily delay the spurious detection
2268 + * by one hard interrupt. Not a real problem.
2269 + */
2270 + desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
2271 + }
2272 + }
2273 +
2274 if (unlikely(action_ret == IRQ_NONE)) {
2275 /*
2276 * If we are seeing only the odd spurious IRQ caused by
2277 diff --git a/lib/idr.c b/lib/idr.c
2278 index cca4b9302a71..a3bfde8ad60e 100644
2279 --- a/lib/idr.c
2280 +++ b/lib/idr.c
2281 @@ -250,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
2282 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
2283
2284 /* if already at the top layer, we need to grow */
2285 - if (id >= 1 << (idp->layers * IDR_BITS)) {
2286 + if (id > idr_max(idp->layers)) {
2287 *starting_id = id;
2288 return -EAGAIN;
2289 }
2290 @@ -829,12 +829,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
2291 if (!p)
2292 return ERR_PTR(-EINVAL);
2293
2294 - n = (p->layer+1) * IDR_BITS;
2295 -
2296 - if (id >= (1 << n))
2297 + if (id > idr_max(p->layer + 1))
2298 return ERR_PTR(-EINVAL);
2299
2300 - n -= IDR_BITS;
2301 + n = p->layer * IDR_BITS;
2302 while ((n > 0) && p) {
2303 p = p->ary[(id >> n) & IDR_MASK];
2304 n -= IDR_BITS;
2305 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2306 index 4f8548abd6ee..603f1fa1b7a3 100644
2307 --- a/mm/memory-failure.c
2308 +++ b/mm/memory-failure.c
2309 @@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
2310 #endif
2311 si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
2312
2313 - if ((flags & MF_ACTION_REQUIRED) && t == current) {
2314 + if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
2315 si.si_code = BUS_MCEERR_AR;
2316 - ret = force_sig_info(SIGBUS, &si, t);
2317 + ret = force_sig_info(SIGBUS, &si, current);
2318 } else {
2319 /*
2320 * Don't use force here, it's convenient if the signal
2321 @@ -382,10 +382,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
2322 }
2323 }
2324
2325 -static int task_early_kill(struct task_struct *tsk)
2326 +static int task_early_kill(struct task_struct *tsk, int force_early)
2327 {
2328 if (!tsk->mm)
2329 return 0;
2330 + if (force_early)
2331 + return 1;
2332 if (tsk->flags & PF_MCE_PROCESS)
2333 return !!(tsk->flags & PF_MCE_EARLY);
2334 return sysctl_memory_failure_early_kill;
2335 @@ -395,7 +397,7 @@ static int task_early_kill(struct task_struct *tsk)
2336 * Collect processes when the error hit an anonymous page.
2337 */
2338 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
2339 - struct to_kill **tkc)
2340 + struct to_kill **tkc, int force_early)
2341 {
2342 struct vm_area_struct *vma;
2343 struct task_struct *tsk;
2344 @@ -411,7 +413,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
2345 for_each_process (tsk) {
2346 struct anon_vma_chain *vmac;
2347
2348 - if (!task_early_kill(tsk))
2349 + if (!task_early_kill(tsk, force_early))
2350 continue;
2351 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
2352 pgoff, pgoff) {
2353 @@ -430,7 +432,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
2354 * Collect processes when the error hit a file mapped page.
2355 */
2356 static void collect_procs_file(struct page *page, struct list_head *to_kill,
2357 - struct to_kill **tkc)
2358 + struct to_kill **tkc, int force_early)
2359 {
2360 struct vm_area_struct *vma;
2361 struct task_struct *tsk;
2362 @@ -441,7 +443,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
2363 for_each_process(tsk) {
2364 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
2365
2366 - if (!task_early_kill(tsk))
2367 + if (!task_early_kill(tsk, force_early))
2368 continue;
2369
2370 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
2371 @@ -467,7 +469,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
2372 * First preallocate one tokill structure outside the spin locks,
2373 * so that we can kill at least one process reasonably reliable.
2374 */
2375 -static void collect_procs(struct page *page, struct list_head *tokill)
2376 +static void collect_procs(struct page *page, struct list_head *tokill,
2377 + int force_early)
2378 {
2379 struct to_kill *tk;
2380
2381 @@ -478,9 +481,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
2382 if (!tk)
2383 return;
2384 if (PageAnon(page))
2385 - collect_procs_anon(page, tokill, &tk);
2386 + collect_procs_anon(page, tokill, &tk, force_early);
2387 else
2388 - collect_procs_file(page, tokill, &tk);
2389 + collect_procs_file(page, tokill, &tk, force_early);
2390 kfree(tk);
2391 }
2392
2393 @@ -965,7 +968,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
2394 * there's nothing that can be done.
2395 */
2396 if (kill)
2397 - collect_procs(ppage, &tokill);
2398 + collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
2399
2400 ret = try_to_unmap(ppage, ttu);
2401 if (ret != SWAP_SUCCESS)
2402 diff --git a/mm/rmap.c b/mm/rmap.c
2403 index b730a4409be6..705bfc8e6fcd 100644
2404 --- a/mm/rmap.c
2405 +++ b/mm/rmap.c
2406 @@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
2407 * LOCK should suffice since the actual taking of the lock must
2408 * happen _before_ what follows.
2409 */
2410 + might_sleep();
2411 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
2412 anon_vma_lock_write(anon_vma);
2413 anon_vma_unlock_write(anon_vma);
2414 @@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
2415 * above cannot corrupt).
2416 */
2417 if (!page_mapped(page)) {
2418 + rcu_read_unlock();
2419 put_anon_vma(anon_vma);
2420 - anon_vma = NULL;
2421 + return NULL;
2422 }
2423 out:
2424 rcu_read_unlock();
2425 @@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
2426 }
2427
2428 if (!page_mapped(page)) {
2429 + rcu_read_unlock();
2430 put_anon_vma(anon_vma);
2431 - anon_vma = NULL;
2432 - goto out;
2433 + return NULL;
2434 }
2435
2436 /* we pinned the anon_vma, its safe to sleep */
2437 diff --git a/mm/vmscan.c b/mm/vmscan.c
2438 index 43ddef3cf44f..4e89500391dc 100644
2439 --- a/mm/vmscan.c
2440 +++ b/mm/vmscan.c
2441 @@ -2286,10 +2286,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2442
2443 for (i = 0; i <= ZONE_NORMAL; i++) {
2444 zone = &pgdat->node_zones[i];
2445 + if (!populated_zone(zone))
2446 + continue;
2447 +
2448 pfmemalloc_reserve += min_wmark_pages(zone);
2449 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2450 }
2451
2452 + /* If there are no reserves (unexpected config) then do not throttle */
2453 + if (!pfmemalloc_reserve)
2454 + return true;
2455 +
2456 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2457
2458 /* kswapd must be awake if processes are being throttled */
2459 @@ -2314,9 +2321,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2460 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2461 nodemask_t *nodemask)
2462 {
2463 + struct zoneref *z;
2464 struct zone *zone;
2465 - int high_zoneidx = gfp_zone(gfp_mask);
2466 - pg_data_t *pgdat;
2467 + pg_data_t *pgdat = NULL;
2468
2469 /*
2470 * Kernel threads should not be throttled as they may be indirectly
2471 @@ -2335,10 +2342,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2472 if (fatal_signal_pending(current))
2473 goto out;
2474
2475 - /* Check if the pfmemalloc reserves are ok */
2476 - first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2477 - pgdat = zone->zone_pgdat;
2478 - if (pfmemalloc_watermark_ok(pgdat))
2479 + /*
2480 + * Check if the pfmemalloc reserves are ok by finding the first node
2481 + * with a usable ZONE_NORMAL or lower zone. The expectation is that
2482 + * GFP_KERNEL will be required for allocating network buffers when
2483 + * swapping over the network so ZONE_HIGHMEM is unusable.
2484 + *
2485 + * Throttling is based on the first usable node and throttled processes
2486 + * wait on a queue until kswapd makes progress and wakes them. There
2487 + * is an affinity then between processes waking up and where reclaim
2488 + * progress has been made assuming the process wakes on the same node.
2489 + * More importantly, processes running on remote nodes will not compete
2490 + * for remote pfmemalloc reserves and processes on different nodes
2491 + * should make reasonable progress.
2492 + */
2493 + for_each_zone_zonelist_nodemask(zone, z, zonelist,
2494 + gfp_mask, nodemask) {
2495 + if (zone_idx(zone) > ZONE_NORMAL)
2496 + continue;
2497 +
2498 + /* Throttle based on the first usable node */
2499 + pgdat = zone->zone_pgdat;
2500 + if (pfmemalloc_watermark_ok(pgdat))
2501 + goto out;
2502 + break;
2503 + }
2504 +
2505 + /* If no zone was usable by the allocation flags then do not throttle */
2506 + if (!pgdat)
2507 goto out;
2508
2509 /* Account for the throttling */
2510 @@ -3059,7 +3090,10 @@ static int kswapd(void *p)
2511 }
2512 }
2513
2514 + tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
2515 current->reclaim_state = NULL;
2516 + lockdep_clear_current_reclaim_state();
2517 +
2518 return 0;
2519 }
2520
2521 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
2522 index 36fed40c162c..302d29b3744d 100644
2523 --- a/net/bluetooth/l2cap_sock.c
2524 +++ b/net/bluetooth/l2cap_sock.c
2525 @@ -949,13 +949,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
2526 /* Check for backlog size */
2527 if (sk_acceptq_is_full(parent)) {
2528 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2529 + release_sock(parent);
2530 return NULL;
2531 }
2532
2533 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
2534 GFP_ATOMIC);
2535 - if (!sk)
2536 + if (!sk) {
2537 + release_sock(parent);
2538 return NULL;
2539 + }
2540
2541 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
2542
2543 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2544 index 66f722b5052a..9f84a5f7404d 100644
2545 --- a/net/core/skbuff.c
2546 +++ b/net/core/skbuff.c
2547 @@ -2844,6 +2844,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2548 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2549
2550 while (pos < offset + len && i < nfrags) {
2551 + if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
2552 + goto err;
2553 *frag = skb_shinfo(skb)->frags[i];
2554 __skb_frag_ref(frag);
2555 size = skb_frag_size(frag);
2556 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
2557 index 276aa86f366b..215e9b008db6 100644
2558 --- a/net/iucv/af_iucv.c
2559 +++ b/net/iucv/af_iucv.c
2560 @@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
2561 spin_lock_irqsave(&list->lock, flags);
2562
2563 while (list_skb != (struct sk_buff *)list) {
2564 - if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
2565 + if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
2566 this = list_skb;
2567 break;
2568 }