Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0133-3.10.34-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2471 - (show annotations) (download) (as text)
Wed Jul 2 10:42:37 2014 UTC (9 years, 9 months ago) by niro
File MIME type: application/x-xz
File size: 99225 byte(s)
-uncompressed
1 diff --git a/Makefile b/Makefile
2 index 1e602eb906fa..571a1bf14868 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 33
9 +SUBLEVEL = 34
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
14 index dd64cc6f9cba..b07c09e5a0ac 100644
15 --- a/arch/arm/include/asm/spinlock.h
16 +++ b/arch/arm/include/asm/spinlock.h
17 @@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
18 " subs %1, %0, %0, ror #16\n"
19 " addeq %0, %0, %4\n"
20 " strexeq %2, %0, [%3]"
21 - : "=&r" (slock), "=&r" (contended), "=r" (res)
22 + : "=&r" (slock), "=&r" (contended), "=&r" (res)
23 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
24 : "cc");
25 } while (res);
26 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
27 index b4b1d397592b..eb83bcc70ec8 100644
28 --- a/arch/arm/kernel/setup.c
29 +++ b/arch/arm/kernel/setup.c
30 @@ -530,6 +530,7 @@ void __init dump_machine_table(void)
31 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
32 {
33 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
34 + u64 aligned_start;
35
36 if (meminfo.nr_banks >= NR_BANKS) {
37 printk(KERN_CRIT "NR_BANKS too low, "
38 @@ -542,10 +543,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
39 * Size is appropriately rounded down, start is rounded up.
40 */
41 size -= start & ~PAGE_MASK;
42 - bank->start = PAGE_ALIGN(start);
43 + aligned_start = PAGE_ALIGN(start);
44
45 -#ifndef CONFIG_ARM_LPAE
46 - if (bank->start + size < bank->start) {
47 +#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
48 + if (aligned_start > ULONG_MAX) {
49 + printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
50 + "32-bit physical address space\n", (long long)start);
51 + return -EINVAL;
52 + }
53 +
54 + if (aligned_start + size > ULONG_MAX) {
55 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
56 "32-bit physical address space\n", (long long)start);
57 /*
58 @@ -553,10 +560,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
59 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
60 * This means we lose a page after masking.
61 */
62 - size = ULONG_MAX - bank->start;
63 + size = ULONG_MAX - aligned_start;
64 }
65 #endif
66
67 + if (aligned_start < PHYS_OFFSET) {
68 + if (aligned_start + size <= PHYS_OFFSET) {
69 + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
70 + aligned_start, aligned_start + size);
71 + return -EINVAL;
72 + }
73 +
74 + pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
75 + aligned_start, (u64)PHYS_OFFSET);
76 +
77 + size -= PHYS_OFFSET - aligned_start;
78 + aligned_start = PHYS_OFFSET;
79 + }
80 +
81 + bank->start = aligned_start;
82 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
83
84 /*
85 diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
86 index f33679d2d3ee..50e1d850ee2e 100644
87 --- a/arch/arm/mach-sa1100/include/mach/collie.h
88 +++ b/arch/arm/mach-sa1100/include/mach/collie.h
89 @@ -13,6 +13,8 @@
90 #ifndef __ASM_ARCH_COLLIE_H
91 #define __ASM_ARCH_COLLIE_H
92
93 +#include "hardware.h" /* Gives GPIO_MAX */
94 +
95 extern void locomolcd_power(int on);
96
97 #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
98 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
99 index 87e6207b05e4..3d0074e10595 100644
100 --- a/arch/mips/include/asm/mipsregs.h
101 +++ b/arch/mips/include/asm/mipsregs.h
102 @@ -14,6 +14,7 @@
103 #define _ASM_MIPSREGS_H
104
105 #include <linux/linkage.h>
106 +#include <linux/types.h>
107 #include <asm/hazards.h>
108 #include <asm/war.h>
109
110 diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
111 index b47a0e1ab001..c712ecec13ba 100644
112 --- a/arch/powerpc/kernel/reloc_64.S
113 +++ b/arch/powerpc/kernel/reloc_64.S
114 @@ -81,6 +81,7 @@ _GLOBAL(relocate)
115
116 6: blr
117
118 +.balign 8
119 p_dyn: .llong __dynamic_start - 0b
120 p_rela: .llong __rela_dyn_start - 0b
121 p_st: .llong _stext - 0b
122 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
123 index 73afd11799ca..df63cae573e0 100644
124 --- a/arch/x86/kernel/head_32.S
125 +++ b/arch/x86/kernel/head_32.S
126 @@ -566,6 +566,10 @@ ENDPROC(early_idt_handlers)
127 /* This is global to keep gas from relaxing the jumps */
128 ENTRY(early_idt_handler)
129 cld
130 +
131 + cmpl $2,(%esp) # X86_TRAP_NMI
132 + je is_nmi # Ignore NMI
133 +
134 cmpl $2,%ss:early_recursion_flag
135 je hlt_loop
136 incl %ss:early_recursion_flag
137 @@ -616,8 +620,9 @@ ex_entry:
138 pop %edx
139 pop %ecx
140 pop %eax
141 - addl $8,%esp /* drop vector number and error code */
142 decl %ss:early_recursion_flag
143 +is_nmi:
144 + addl $8,%esp /* drop vector number and error code */
145 iret
146 ENDPROC(early_idt_handler)
147
148 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
149 index a8368608ab41..f2a9a2aa98f3 100644
150 --- a/arch/x86/kernel/head_64.S
151 +++ b/arch/x86/kernel/head_64.S
152 @@ -343,6 +343,9 @@ early_idt_handlers:
153 ENTRY(early_idt_handler)
154 cld
155
156 + cmpl $2,(%rsp) # X86_TRAP_NMI
157 + je is_nmi # Ignore NMI
158 +
159 cmpl $2,early_recursion_flag(%rip)
160 jz 1f
161 incl early_recursion_flag(%rip)
162 @@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
163 popq %rdx
164 popq %rcx
165 popq %rax
166 - addq $16,%rsp # drop vector number and error code
167 decl early_recursion_flag(%rip)
168 +is_nmi:
169 + addq $16,%rsp # drop vector number and error code
170 INTERRUPT_RETURN
171 ENDPROC(early_idt_handler)
172
173 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
174 index f7ea30dce238..b03ff1842547 100644
175 --- a/arch/x86/kernel/i387.c
176 +++ b/arch/x86/kernel/i387.c
177 @@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
178
179 void __kernel_fpu_end(void)
180 {
181 - if (use_eager_fpu())
182 - math_state_restore();
183 - else
184 + if (use_eager_fpu()) {
185 + /*
186 + * For eager fpu, most the time, tsk_used_math() is true.
187 + * Restore the user math as we are done with the kernel usage.
188 + * At few instances during thread exit, signal handling etc,
189 + * tsk_used_math() is false. Those few places will take proper
190 + * actions, so we don't need to restore the math here.
191 + */
192 + if (likely(tsk_used_math(current)))
193 + math_state_restore();
194 + } else {
195 stts();
196 + }
197 }
198 EXPORT_SYMBOL(__kernel_fpu_end);
199
200 diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
201 index 04ee1e2e4c02..52dbf1e400dc 100644
202 --- a/arch/x86/kernel/quirks.c
203 +++ b/arch/x86/kernel/quirks.c
204 @@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
205 return;
206
207 pci_read_config_dword(nb_ht, 0x60, &val);
208 - node = val & 7;
209 + node = pcibus_to_node(dev->bus) | (val & 7);
210 /*
211 * Some hardware may return an invalid node ID,
212 * so check it first:
213 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
214 index a14a6eaf871d..765210d4d925 100644
215 --- a/arch/x86/kvm/svm.c
216 +++ b/arch/x86/kvm/svm.c
217 @@ -2985,10 +2985,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
218 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
219 /* instruction emulation calls kvm_set_cr8() */
220 r = cr_interception(svm);
221 - if (irqchip_in_kernel(svm->vcpu.kvm)) {
222 - clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
223 + if (irqchip_in_kernel(svm->vcpu.kvm))
224 return r;
225 - }
226 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
227 return r;
228 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
229 @@ -3550,6 +3548,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
230 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
231 return;
232
233 + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
234 +
235 if (irr == -1)
236 return;
237
238 diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
239 index 3322b47ab7ca..c2dd598e25a2 100644
240 --- a/drivers/acpi/resource.c
241 +++ b/drivers/acpi/resource.c
242 @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
243 switch (ares->type) {
244 case ACPI_RESOURCE_TYPE_MEMORY24:
245 memory24 = &ares->data.memory24;
246 + if (!memory24->address_length)
247 + return false;
248 acpi_dev_get_memresource(res, memory24->minimum,
249 memory24->address_length,
250 memory24->write_protect);
251 break;
252 case ACPI_RESOURCE_TYPE_MEMORY32:
253 memory32 = &ares->data.memory32;
254 + if (!memory32->address_length)
255 + return false;
256 acpi_dev_get_memresource(res, memory32->minimum,
257 memory32->address_length,
258 memory32->write_protect);
259 break;
260 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
261 fixed_memory32 = &ares->data.fixed_memory32;
262 + if (!fixed_memory32->address_length)
263 + return false;
264 acpi_dev_get_memresource(res, fixed_memory32->address,
265 fixed_memory32->address_length,
266 fixed_memory32->write_protect);
267 @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
268 switch (ares->type) {
269 case ACPI_RESOURCE_TYPE_IO:
270 io = &ares->data.io;
271 + if (!io->address_length)
272 + return false;
273 acpi_dev_get_ioresource(res, io->minimum,
274 io->address_length,
275 io->io_decode);
276 break;
277 case ACPI_RESOURCE_TYPE_FIXED_IO:
278 fixed_io = &ares->data.fixed_io;
279 + if (!fixed_io->address_length)
280 + return false;
281 acpi_dev_get_ioresource(res, fixed_io->address,
282 fixed_io->address_length,
283 ACPI_DECODE_10);
284 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
285 index 9c1a435d10e6..035920f2ab4d 100644
286 --- a/drivers/acpi/sleep.c
287 +++ b/drivers/acpi/sleep.c
288 @@ -78,6 +78,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
289 return 0;
290 }
291
292 +static bool acpi_sleep_state_supported(u8 sleep_state)
293 +{
294 + acpi_status status;
295 + u8 type_a, type_b;
296 +
297 + status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
298 + return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
299 + || (acpi_gbl_FADT.sleep_control.address
300 + && acpi_gbl_FADT.sleep_status.address));
301 +}
302 +
303 #ifdef CONFIG_ACPI_SLEEP
304 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
305
306 @@ -600,15 +611,9 @@ static void acpi_sleep_suspend_setup(void)
307 {
308 int i;
309
310 - for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
311 - acpi_status status;
312 - u8 type_a, type_b;
313 -
314 - status = acpi_get_sleep_type_data(i, &type_a, &type_b);
315 - if (ACPI_SUCCESS(status)) {
316 + for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
317 + if (acpi_sleep_state_supported(i))
318 sleep_states[i] = 1;
319 - }
320 - }
321
322 suspend_set_ops(old_suspend_ordering ?
323 &acpi_suspend_ops_old : &acpi_suspend_ops);
324 @@ -739,11 +744,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
325
326 static void acpi_sleep_hibernate_setup(void)
327 {
328 - acpi_status status;
329 - u8 type_a, type_b;
330 -
331 - status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
332 - if (ACPI_FAILURE(status))
333 + if (!acpi_sleep_state_supported(ACPI_STATE_S4))
334 return;
335
336 hibernation_set_ops(old_suspend_ordering ?
337 @@ -792,8 +793,6 @@ static void acpi_power_off(void)
338
339 int __init acpi_sleep_init(void)
340 {
341 - acpi_status status;
342 - u8 type_a, type_b;
343 char supported[ACPI_S_STATE_COUNT * 3 + 1];
344 char *pos = supported;
345 int i;
346 @@ -808,8 +807,7 @@ int __init acpi_sleep_init(void)
347 acpi_sleep_suspend_setup();
348 acpi_sleep_hibernate_setup();
349
350 - status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
351 - if (ACPI_SUCCESS(status)) {
352 + if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
353 sleep_states[ACPI_STATE_S5] = 1;
354 pm_power_off_prepare = acpi_power_off_prepare;
355 pm_power_off = acpi_power_off;
356 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
357 index 15518fda2d2a..8b8cbe9bcb92 100644
358 --- a/drivers/ata/libata-core.c
359 +++ b/drivers/ata/libata-core.c
360 @@ -4152,6 +4152,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
361
362 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
363 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
364 + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
365
366 /* Blacklist entries taken from Silicon Image 3124/3132
367 Windows driver .inf file - also several Linux problem reports */
368 diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
369 index 664a6ff0a823..392ad513dc04 100644
370 --- a/drivers/firewire/core-device.c
371 +++ b/drivers/firewire/core-device.c
372 @@ -895,7 +895,7 @@ static int lookup_existing_device(struct device *dev, void *data)
373 old->config_rom_retries = 0;
374 fw_notice(card, "rediscovered device %s\n", dev_name(dev));
375
376 - PREPARE_DELAYED_WORK(&old->work, fw_device_update);
377 + old->workfn = fw_device_update;
378 fw_schedule_device_work(old, 0);
379
380 if (current_node == card->root_node)
381 @@ -1054,7 +1054,7 @@ static void fw_device_init(struct work_struct *work)
382 if (atomic_cmpxchg(&device->state,
383 FW_DEVICE_INITIALIZING,
384 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
385 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
386 + device->workfn = fw_device_shutdown;
387 fw_schedule_device_work(device, SHUTDOWN_DELAY);
388 } else {
389 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
390 @@ -1175,13 +1175,20 @@ static void fw_device_refresh(struct work_struct *work)
391 dev_name(&device->device), fw_rcode_string(ret));
392 gone:
393 atomic_set(&device->state, FW_DEVICE_GONE);
394 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
395 + device->workfn = fw_device_shutdown;
396 fw_schedule_device_work(device, SHUTDOWN_DELAY);
397 out:
398 if (node_id == card->root_node->node_id)
399 fw_schedule_bm_work(card, 0);
400 }
401
402 +static void fw_device_workfn(struct work_struct *work)
403 +{
404 + struct fw_device *device = container_of(to_delayed_work(work),
405 + struct fw_device, work);
406 + device->workfn(work);
407 +}
408 +
409 void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
410 {
411 struct fw_device *device;
412 @@ -1231,7 +1238,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
413 * power-up after getting plugged in. We schedule the
414 * first config rom scan half a second after bus reset.
415 */
416 - INIT_DELAYED_WORK(&device->work, fw_device_init);
417 + device->workfn = fw_device_init;
418 + INIT_DELAYED_WORK(&device->work, fw_device_workfn);
419 fw_schedule_device_work(device, INITIAL_DELAY);
420 break;
421
422 @@ -1247,7 +1255,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
423 if (atomic_cmpxchg(&device->state,
424 FW_DEVICE_RUNNING,
425 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
426 - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
427 + device->workfn = fw_device_refresh;
428 fw_schedule_device_work(device,
429 device->is_local ? 0 : INITIAL_DELAY);
430 }
431 @@ -1262,7 +1270,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
432 smp_wmb(); /* update node_id before generation */
433 device->generation = card->generation;
434 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
435 - PREPARE_DELAYED_WORK(&device->work, fw_device_update);
436 + device->workfn = fw_device_update;
437 fw_schedule_device_work(device, 0);
438 }
439 break;
440 @@ -1287,7 +1295,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
441 device = node->data;
442 if (atomic_xchg(&device->state,
443 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
444 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
445 + device->workfn = fw_device_shutdown;
446 fw_schedule_device_work(device,
447 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
448 }
449 diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
450 index 815b0fcbe918..7bdb6fe63236 100644
451 --- a/drivers/firewire/net.c
452 +++ b/drivers/firewire/net.c
453 @@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
454 if (rcode == RCODE_COMPLETE) {
455 fwnet_transmit_packet_done(ptask);
456 } else {
457 - fwnet_transmit_packet_failed(ptask);
458 -
459 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
460 dev_err(&ptask->dev->netdev->dev,
461 "fwnet_write_complete failed: %x (skipped %d)\n",
462 @@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
463
464 errors_skipped = 0;
465 last_rcode = rcode;
466 - } else
467 + } else {
468 errors_skipped++;
469 + }
470 + fwnet_transmit_packet_failed(ptask);
471 }
472 }
473
474 diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
475 index 47674b913843..1b1c37dd830b 100644
476 --- a/drivers/firewire/sbp2.c
477 +++ b/drivers/firewire/sbp2.c
478 @@ -146,6 +146,7 @@ struct sbp2_logical_unit {
479 */
480 int generation;
481 int retries;
482 + work_func_t workfn;
483 struct delayed_work work;
484 bool has_sdev;
485 bool blocked;
486 @@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
487 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
488 sbp2_set_busy_timeout(lu);
489
490 - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
491 + lu->workfn = sbp2_reconnect;
492 sbp2_agent_reset(lu);
493
494 /* This was a re-login. */
495 @@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
496 * If a bus reset happened, sbp2_update will have requeued
497 * lu->work already. Reset the work from reconnect to login.
498 */
499 - PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
500 + lu->workfn = sbp2_login;
501 }
502
503 static void sbp2_reconnect(struct work_struct *work)
504 @@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
505 lu->retries++ >= 5) {
506 dev_err(tgt_dev(tgt), "failed to reconnect\n");
507 lu->retries = 0;
508 - PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
509 + lu->workfn = sbp2_login;
510 }
511 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
512
513 @@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
514 sbp2_conditionally_unblock(lu);
515 }
516
517 +static void sbp2_lu_workfn(struct work_struct *work)
518 +{
519 + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
520 + struct sbp2_logical_unit, work);
521 + lu->workfn(work);
522 +}
523 +
524 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
525 {
526 struct sbp2_logical_unit *lu;
527 @@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
528 lu->blocked = false;
529 ++tgt->dont_block;
530 INIT_LIST_HEAD(&lu->orb_list);
531 - INIT_DELAYED_WORK(&lu->work, sbp2_login);
532 + lu->workfn = sbp2_login;
533 + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
534
535 list_add_tail(&lu->link, &tgt->lu_list);
536 return 0;
537 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
538 index 4c81e9faa635..1f7f3ce875c8 100644
539 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
540 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
541 @@ -1281,7 +1281,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
542 }
543 if (is_dp)
544 args.v5.ucLaneNum = dp_lane_count;
545 - else if (radeon_encoder->pixel_clock > 165000)
546 + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
547 args.v5.ucLaneNum = 8;
548 else
549 args.v5.ucLaneNum = 4;
550 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
551 index 8697abd7b173..0ac0a88860a4 100644
552 --- a/drivers/gpu/drm/ttm/ttm_bo.c
553 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
554 @@ -498,9 +498,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
555
556 moved:
557 if (bo->evicted) {
558 - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
559 - if (ret)
560 - pr_err("Can not flush read caches\n");
561 + if (bdev->driver->invalidate_caches) {
562 + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
563 + if (ret)
564 + pr_err("Can not flush read caches\n");
565 + }
566 bo->evicted = false;
567 }
568
569 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
570 index 6fc283a041d6..588a5eca63d8 100644
571 --- a/drivers/infiniband/ulp/isert/ib_isert.c
572 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
573 @@ -392,8 +392,8 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
574 isert_conn->state = ISER_CONN_INIT;
575 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
576 init_completion(&isert_conn->conn_login_comp);
577 - init_waitqueue_head(&isert_conn->conn_wait);
578 - init_waitqueue_head(&isert_conn->conn_wait_comp_err);
579 + init_completion(&isert_conn->conn_wait);
580 + init_completion(&isert_conn->conn_wait_comp_err);
581 kref_init(&isert_conn->conn_kref);
582 kref_get(&isert_conn->conn_kref);
583 mutex_init(&isert_conn->conn_mutex);
584 @@ -550,11 +550,11 @@ isert_disconnect_work(struct work_struct *work)
585
586 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
587 mutex_lock(&isert_conn->conn_mutex);
588 - isert_conn->state = ISER_CONN_DOWN;
589 + if (isert_conn->state == ISER_CONN_UP)
590 + isert_conn->state = ISER_CONN_TERMINATING;
591
592 if (isert_conn->post_recv_buf_count == 0 &&
593 atomic_read(&isert_conn->post_send_buf_count) == 0) {
594 - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
595 mutex_unlock(&isert_conn->conn_mutex);
596 goto wake_up;
597 }
598 @@ -574,7 +574,7 @@ isert_disconnect_work(struct work_struct *work)
599 mutex_unlock(&isert_conn->conn_mutex);
600
601 wake_up:
602 - wake_up(&isert_conn->conn_wait);
603 + complete(&isert_conn->conn_wait);
604 isert_put_conn(isert_conn);
605 }
606
607 @@ -1213,7 +1213,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
608 case ISCSI_OP_SCSI_CMD:
609 spin_lock_bh(&conn->cmd_lock);
610 if (!list_empty(&cmd->i_conn_node))
611 - list_del(&cmd->i_conn_node);
612 + list_del_init(&cmd->i_conn_node);
613 spin_unlock_bh(&conn->cmd_lock);
614
615 if (cmd->data_direction == DMA_TO_DEVICE)
616 @@ -1225,7 +1225,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
617 case ISCSI_OP_SCSI_TMFUNC:
618 spin_lock_bh(&conn->cmd_lock);
619 if (!list_empty(&cmd->i_conn_node))
620 - list_del(&cmd->i_conn_node);
621 + list_del_init(&cmd->i_conn_node);
622 spin_unlock_bh(&conn->cmd_lock);
623
624 transport_generic_free_cmd(&cmd->se_cmd, 0);
625 @@ -1234,7 +1234,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
626 case ISCSI_OP_NOOP_OUT:
627 spin_lock_bh(&conn->cmd_lock);
628 if (!list_empty(&cmd->i_conn_node))
629 - list_del(&cmd->i_conn_node);
630 + list_del_init(&cmd->i_conn_node);
631 spin_unlock_bh(&conn->cmd_lock);
632
633 /*
634 @@ -1308,6 +1308,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
635 }
636
637 cmd->write_data_done = se_cmd->data_length;
638 + wr->send_wr_num = 0;
639
640 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
641 spin_lock_bh(&cmd->istate_lock);
642 @@ -1348,7 +1349,7 @@ isert_do_control_comp(struct work_struct *work)
643 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
644 /*
645 * Call atomic_dec(&isert_conn->post_send_buf_count)
646 - * from isert_free_conn()
647 + * from isert_wait_conn()
648 */
649 isert_conn->logout_posted = true;
650 iscsit_logout_post_handler(cmd, cmd->conn);
651 @@ -1367,6 +1368,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
652 struct ib_device *ib_dev)
653 {
654 struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
655 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
656
657 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
658 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
659 @@ -1377,7 +1379,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
660 queue_work(isert_comp_wq, &isert_cmd->comp_work);
661 return;
662 }
663 - atomic_dec(&isert_conn->post_send_buf_count);
664 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
665
666 cmd->i_state = ISTATE_SENT_STATUS;
667 isert_completion_put(tx_desc, isert_cmd, ib_dev);
668 @@ -1415,7 +1417,7 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
669 case ISER_IB_RDMA_READ:
670 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
671
672 - atomic_dec(&isert_conn->post_send_buf_count);
673 + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
674 isert_completion_rdma_read(tx_desc, isert_cmd);
675 break;
676 default:
677 @@ -1426,31 +1428,38 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
678 }
679
680 static void
681 -isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
682 +isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
683 {
684 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
685 + struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
686
687 - if (tx_desc) {
688 - struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
689 + if (!isert_cmd)
690 + isert_unmap_tx_desc(tx_desc, ib_dev);
691 + else
692 + isert_completion_put(tx_desc, isert_cmd, ib_dev);
693 +}
694
695 - if (!isert_cmd)
696 - isert_unmap_tx_desc(tx_desc, ib_dev);
697 - else
698 - isert_completion_put(tx_desc, isert_cmd, ib_dev);
699 +static void
700 +isert_cq_rx_comp_err(struct isert_conn *isert_conn)
701 +{
702 + struct iscsi_conn *conn = isert_conn->conn;
703 +
704 + if (isert_conn->post_recv_buf_count)
705 + return;
706 +
707 + if (conn->sess) {
708 + target_sess_cmd_list_set_waiting(conn->sess->se_sess);
709 + target_wait_for_sess_cmds(conn->sess->se_sess);
710 }
711
712 - if (isert_conn->post_recv_buf_count == 0 &&
713 - atomic_read(&isert_conn->post_send_buf_count) == 0) {
714 - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
715 - pr_debug("Calling wake_up from isert_cq_comp_err\n");
716 + while (atomic_read(&isert_conn->post_send_buf_count))
717 + msleep(3000);
718
719 - mutex_lock(&isert_conn->conn_mutex);
720 - if (isert_conn->state != ISER_CONN_DOWN)
721 - isert_conn->state = ISER_CONN_TERMINATING;
722 - mutex_unlock(&isert_conn->conn_mutex);
723 + mutex_lock(&isert_conn->conn_mutex);
724 + isert_conn->state = ISER_CONN_DOWN;
725 + mutex_unlock(&isert_conn->conn_mutex);
726
727 - wake_up(&isert_conn->conn_wait_comp_err);
728 - }
729 + complete(&isert_conn->conn_wait_comp_err);
730 }
731
732 static void
733 @@ -1475,7 +1484,7 @@ isert_cq_tx_work(struct work_struct *work)
734 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
735 pr_debug("TX wc.status: 0x%08x\n", wc.status);
736 atomic_dec(&isert_conn->post_send_buf_count);
737 - isert_cq_comp_err(tx_desc, isert_conn);
738 + isert_cq_tx_comp_err(tx_desc, isert_conn);
739 }
740 }
741
742 @@ -1517,7 +1526,7 @@ isert_cq_rx_work(struct work_struct *work)
743 pr_debug("RX wc.status: 0x%08x\n", wc.status);
744
745 isert_conn->post_recv_buf_count--;
746 - isert_cq_comp_err(NULL, isert_conn);
747 + isert_cq_rx_comp_err(isert_conn);
748 }
749 }
750
751 @@ -1827,12 +1836,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
752 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
753 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
754
755 - atomic_inc(&isert_conn->post_send_buf_count);
756 + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
757
758 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
759 if (rc) {
760 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
761 - atomic_dec(&isert_conn->post_send_buf_count);
762 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
763 }
764 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
765 return 1;
766 @@ -1935,12 +1944,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
767 data_left -= data_len;
768 }
769
770 - atomic_inc(&isert_conn->post_send_buf_count);
771 + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
772
773 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
774 if (rc) {
775 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
776 - atomic_dec(&isert_conn->post_send_buf_count);
777 + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
778 }
779 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
780 return 0;
781 @@ -2218,22 +2227,11 @@ isert_free_np(struct iscsi_np *np)
782 kfree(isert_np);
783 }
784
785 -static int isert_check_state(struct isert_conn *isert_conn, int state)
786 -{
787 - int ret;
788 -
789 - mutex_lock(&isert_conn->conn_mutex);
790 - ret = (isert_conn->state == state);
791 - mutex_unlock(&isert_conn->conn_mutex);
792 -
793 - return ret;
794 -}
795 -
796 -static void isert_free_conn(struct iscsi_conn *conn)
797 +static void isert_wait_conn(struct iscsi_conn *conn)
798 {
799 struct isert_conn *isert_conn = conn->context;
800
801 - pr_debug("isert_free_conn: Starting \n");
802 + pr_debug("isert_wait_conn: Starting \n");
803 /*
804 * Decrement post_send_buf_count for special case when called
805 * from isert_do_control_comp() -> iscsit_logout_post_handler()
806 @@ -2243,38 +2241,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
807 atomic_dec(&isert_conn->post_send_buf_count);
808
809 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
810 - pr_debug("Calling rdma_disconnect from isert_free_conn\n");
811 + pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
812 rdma_disconnect(isert_conn->conn_cm_id);
813 }
814 /*
815 * Only wait for conn_wait_comp_err if the isert_conn made it
816 * into full feature phase..
817 */
818 - if (isert_conn->state == ISER_CONN_UP) {
819 - pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
820 - isert_conn->state);
821 - mutex_unlock(&isert_conn->conn_mutex);
822 -
823 - wait_event(isert_conn->conn_wait_comp_err,
824 - (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
825 -
826 - wait_event(isert_conn->conn_wait,
827 - (isert_check_state(isert_conn, ISER_CONN_DOWN)));
828 -
829 - isert_put_conn(isert_conn);
830 - return;
831 - }
832 if (isert_conn->state == ISER_CONN_INIT) {
833 mutex_unlock(&isert_conn->conn_mutex);
834 - isert_put_conn(isert_conn);
835 return;
836 }
837 - pr_debug("isert_free_conn: wait_event conn_wait %d\n",
838 - isert_conn->state);
839 + if (isert_conn->state == ISER_CONN_UP)
840 + isert_conn->state = ISER_CONN_TERMINATING;
841 mutex_unlock(&isert_conn->conn_mutex);
842
843 - wait_event(isert_conn->conn_wait,
844 - (isert_check_state(isert_conn, ISER_CONN_DOWN)));
845 + wait_for_completion(&isert_conn->conn_wait_comp_err);
846 +
847 + wait_for_completion(&isert_conn->conn_wait);
848 +}
849 +
850 +static void isert_free_conn(struct iscsi_conn *conn)
851 +{
852 + struct isert_conn *isert_conn = conn->context;
853
854 isert_put_conn(isert_conn);
855 }
856 @@ -2286,6 +2275,7 @@ static struct iscsit_transport iser_target_transport = {
857 .iscsit_setup_np = isert_setup_np,
858 .iscsit_accept_np = isert_accept_np,
859 .iscsit_free_np = isert_free_np,
860 + .iscsit_wait_conn = isert_wait_conn,
861 .iscsit_free_conn = isert_free_conn,
862 .iscsit_alloc_cmd = isert_alloc_cmd,
863 .iscsit_get_login_rx = isert_get_login_rx,
864 diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
865 index 5795c82a2306..b9d6cc6917cf 100644
866 --- a/drivers/infiniband/ulp/isert/ib_isert.h
867 +++ b/drivers/infiniband/ulp/isert/ib_isert.h
868 @@ -103,8 +103,8 @@ struct isert_conn {
869 struct isert_device *conn_device;
870 struct work_struct conn_logout_work;
871 struct mutex conn_mutex;
872 - wait_queue_head_t conn_wait;
873 - wait_queue_head_t conn_wait_comp_err;
874 + struct completion conn_wait;
875 + struct completion conn_wait_comp_err;
876 struct kref conn_kref;
877 };
878
879 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
880 index 516f9c922bb2..1a75869d3a82 100644
881 --- a/drivers/md/dm-cache-target.c
882 +++ b/drivers/md/dm-cache-target.c
883 @@ -861,12 +861,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
884 int r;
885 struct dm_io_region o_region, c_region;
886 struct cache *cache = mg->cache;
887 + sector_t cblock = from_cblock(mg->cblock);
888
889 o_region.bdev = cache->origin_dev->bdev;
890 o_region.count = cache->sectors_per_block;
891
892 c_region.bdev = cache->cache_dev->bdev;
893 - c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
894 + c_region.sector = cblock * cache->sectors_per_block;
895 c_region.count = cache->sectors_per_block;
896
897 if (mg->writeback || mg->demote) {
898 @@ -2174,20 +2175,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
899 bool discarded_block;
900 struct dm_bio_prison_cell *cell;
901 struct policy_result lookup_result;
902 - struct per_bio_data *pb;
903 + struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
904
905 - if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
906 + if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
907 /*
908 * This can only occur if the io goes to a partial block at
909 * the end of the origin device. We don't cache these.
910 * Just remap to the origin and carry on.
911 */
912 - remap_to_origin_clear_discard(cache, bio, block);
913 + remap_to_origin(cache, bio);
914 return DM_MAPIO_REMAPPED;
915 }
916
917 - pb = init_per_bio_data(bio, pb_data_size);
918 -
919 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
920 defer_bio(cache, bio);
921 return DM_MAPIO_SUBMITTED;
922 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
923 index f63169d6af26..6d388cff8455 100644
924 --- a/drivers/net/can/flexcan.c
925 +++ b/drivers/net/can/flexcan.c
926 @@ -862,7 +862,7 @@ static int flexcan_open(struct net_device *dev)
927 /* start chip and queuing */
928 err = flexcan_chip_start(dev);
929 if (err)
930 - goto out_close;
931 + goto out_free_irq;
932
933 can_led_event(dev, CAN_LED_EVENT_OPEN);
934
935 @@ -871,6 +871,8 @@ static int flexcan_open(struct net_device *dev)
936
937 return 0;
938
939 + out_free_irq:
940 + free_irq(dev->irq, dev);
941 out_close:
942 close_candev(dev);
943 out:
944 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
945 index 68e9dc453e11..e27d5c839be5 100644
946 --- a/drivers/net/ethernet/broadcom/tg3.c
947 +++ b/drivers/net/ethernet/broadcom/tg3.c
948 @@ -6687,8 +6687,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
949
950 work_mask |= opaque_key;
951
952 - if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
953 - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
954 + if (desc->err_vlan & RXD_ERR_MASK) {
955 drop_it:
956 tg3_recycle_rx(tnapi, tpr, opaque_key,
957 desc_idx, *post_ptr);
958 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
959 index ff6e30eeae35..046059c56713 100644
960 --- a/drivers/net/ethernet/broadcom/tg3.h
961 +++ b/drivers/net/ethernet/broadcom/tg3.h
962 @@ -2587,7 +2587,11 @@ struct tg3_rx_buffer_desc {
963 #define RXD_ERR_TOO_SMALL 0x00400000
964 #define RXD_ERR_NO_RESOURCES 0x00800000
965 #define RXD_ERR_HUGE_FRAME 0x01000000
966 -#define RXD_ERR_MASK 0xffff0000
967 +
968 +#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
969 + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
970 + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
971 + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
972
973 u32 reserved;
974 u32 opaque;
975 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
976 index 9a95abf2dedf..540ad16d7807 100644
977 --- a/drivers/net/ethernet/sfc/ptp.c
978 +++ b/drivers/net/ethernet/sfc/ptp.c
979 @@ -1319,6 +1319,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
980 struct efx_ptp_data *ptp = efx->ptp_data;
981 int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
982
983 + if (!ptp) {
984 + if (net_ratelimit())
985 + netif_warn(efx, drv, efx->net_dev,
986 + "Received PTP event but PTP not set up\n");
987 + return;
988 + }
989 +
990 if (!ptp->enabled)
991 return;
992
993 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
994 index a0c05e07feeb..2835bfe151b1 100644
995 --- a/drivers/net/virtio_net.c
996 +++ b/drivers/net/virtio_net.c
997 @@ -1597,7 +1597,8 @@ static int virtnet_probe(struct virtio_device *vdev)
998 /* If we can receive ANY GSO packets, we must allocate large ones. */
999 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1000 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1001 - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1002 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
1003 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1004 vi->big_packets = true;
1005
1006 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1007 diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
1008 index 55a62cae2cb4..d0815855d877 100644
1009 --- a/drivers/net/vmxnet3/vmxnet3_drv.c
1010 +++ b/drivers/net/vmxnet3/vmxnet3_drv.c
1011 @@ -1741,11 +1741,20 @@ vmxnet3_netpoll(struct net_device *netdev)
1012 {
1013 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1014
1015 - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1016 - vmxnet3_disable_all_intrs(adapter);
1017 -
1018 - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1019 - vmxnet3_enable_all_intrs(adapter);
1020 + switch (adapter->intr.type) {
1021 +#ifdef CONFIG_PCI_MSI
1022 + case VMXNET3_IT_MSIX: {
1023 + int i;
1024 + for (i = 0; i < adapter->num_rx_queues; i++)
1025 + vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
1026 + break;
1027 + }
1028 +#endif
1029 + case VMXNET3_IT_MSI:
1030 + default:
1031 + vmxnet3_intr(0, adapter->netdev);
1032 + break;
1033 + }
1034
1035 }
1036 #endif /* CONFIG_NET_POLL_CONTROLLER */
1037 diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
1038 index 999ab08c34e6..4ae3cf7283ea 100644
1039 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
1040 +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
1041 @@ -56,7 +56,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
1042 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
1043 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1044 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1045 - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1046 + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
1047 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1048 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
1049 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
1050 @@ -95,7 +95,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
1051 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
1052 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1053 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
1054 - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
1055 + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
1056 {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
1057 };
1058
1059 diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
1060 index e04f3da1ccb3..e9d09f19f856 100644
1061 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
1062 +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
1063 @@ -739,6 +739,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1064 return ret;
1065 }
1066
1067 +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
1068 +{
1069 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
1070 + return false;
1071 + return true;
1072 +}
1073 +
1074 +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
1075 +{
1076 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
1077 + return false;
1078 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
1079 + return true;
1080 +
1081 + /* disabled by default */
1082 + return false;
1083 +}
1084 +
1085 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
1086 struct ieee80211_vif *vif,
1087 enum ieee80211_ampdu_mlme_action action,
1088 @@ -760,7 +778,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
1089
1090 switch (action) {
1091 case IEEE80211_AMPDU_RX_START:
1092 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
1093 + if (!iwl_enable_rx_ampdu(priv->cfg))
1094 break;
1095 IWL_DEBUG_HT(priv, "start Rx\n");
1096 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
1097 @@ -772,7 +790,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
1098 case IEEE80211_AMPDU_TX_START:
1099 if (!priv->trans->ops->txq_enable)
1100 break;
1101 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
1102 + if (!iwl_enable_tx_ampdu(priv->cfg))
1103 break;
1104 IWL_DEBUG_HT(priv, "start Tx\n");
1105 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
1106 diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
1107 index c3c13ce96eb0..e800002d6158 100644
1108 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c
1109 +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
1110 @@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
1111 sizeof(priv->tid_data[sta_id][tid]));
1112
1113 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
1114 + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
1115
1116 priv->num_stations--;
1117
1118 diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
1119 index 20e65d3cc3bd..2b5dbff9eadb 100644
1120 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c
1121 +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
1122 @@ -1322,8 +1322,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1123 struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1124 struct iwl_ht_agg *agg;
1125 struct sk_buff_head reclaimed_skbs;
1126 - struct ieee80211_tx_info *info;
1127 - struct ieee80211_hdr *hdr;
1128 struct sk_buff *skb;
1129 int sta_id;
1130 int tid;
1131 @@ -1410,22 +1408,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1132 freed = 0;
1133
1134 skb_queue_walk(&reclaimed_skbs, skb) {
1135 - hdr = (struct ieee80211_hdr *)skb->data;
1136 + struct ieee80211_hdr *hdr = (void *)skb->data;
1137 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1138
1139 if (ieee80211_is_data_qos(hdr->frame_control))
1140 freed++;
1141 else
1142 WARN_ON_ONCE(1);
1143
1144 - info = IEEE80211_SKB_CB(skb);
1145 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1146
1147 + memset(&info->status, 0, sizeof(info->status));
1148 + /* Packet was transmitted successfully, failures come as single
1149 + * frames because before failing a frame the firmware transmits
1150 + * it without aggregation at least once.
1151 + */
1152 + info->flags |= IEEE80211_TX_STAT_ACK;
1153 +
1154 if (freed == 1) {
1155 /* this is the first skb we deliver in this batch */
1156 /* put the rate scaling data there */
1157 info = IEEE80211_SKB_CB(skb);
1158 memset(&info->status, 0, sizeof(info->status));
1159 - info->flags |= IEEE80211_TX_STAT_ACK;
1160 info->flags |= IEEE80211_TX_STAT_AMPDU;
1161 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1162 info->status.ampdu_len = ba_resp->txed;
1163 diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
1164 index 40fed1f511e2..96050e6c3d57 100644
1165 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c
1166 +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
1167 @@ -1211,7 +1211,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
1168 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1169 module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
1170 MODULE_PARM_DESC(11n_disable,
1171 - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1172 + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1173 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1174 int, S_IRUGO);
1175 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1176 diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
1177 index d6f6c37c09fd..e99bc55046e5 100644
1178 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
1179 +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
1180 @@ -79,9 +79,12 @@ enum iwl_power_level {
1181 IWL_POWER_NUM
1182 };
1183
1184 -#define IWL_DISABLE_HT_ALL BIT(0)
1185 -#define IWL_DISABLE_HT_TXAGG BIT(1)
1186 -#define IWL_DISABLE_HT_RXAGG BIT(2)
1187 +enum iwl_disable_11n {
1188 + IWL_DISABLE_HT_ALL = BIT(0),
1189 + IWL_DISABLE_HT_TXAGG = BIT(1),
1190 + IWL_DISABLE_HT_RXAGG = BIT(2),
1191 + IWL_ENABLE_HT_TXAGG = BIT(3),
1192 +};
1193
1194 /**
1195 * struct iwl_mod_params
1196 @@ -90,7 +93,7 @@ enum iwl_power_level {
1197 *
1198 * @sw_crypto: using hardware encryption, default = 0
1199 * @disable_11n: disable 11n capabilities, default = 0,
1200 - * use IWL_DISABLE_HT_* constants
1201 + * use IWL_[DIS,EN]ABLE_HT_* constants
1202 * @amsdu_size_8K: enable 8K amsdu size, default = 0
1203 * @restart_fw: restart firmware, default = 1
1204 * @plcp_check: enable plcp health check, default = true
1205 diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1206 index 810bfa5f6de0..9649f511bd5b 100644
1207 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1208 +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
1209 @@ -523,8 +523,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1210
1211 mutex_lock(&mvm->mutex);
1212
1213 - /* Rssi update while not associated ?! */
1214 - if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
1215 + /*
1216 + * Rssi update while not associated - can happen since the statistics
1217 + * are handled asynchronously
1218 + */
1219 + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
1220 goto out_unlock;
1221
1222 /* No open connection - reports should be disabled */
1223 diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1224 index f7545e06ce2a..88b9c0964696 100644
1225 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1226 +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1227 @@ -278,6 +278,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
1228 ieee80211_free_txskb(hw, skb);
1229 }
1230
1231 +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
1232 +{
1233 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
1234 + return false;
1235 + return true;
1236 +}
1237 +
1238 +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
1239 +{
1240 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
1241 + return false;
1242 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
1243 + return true;
1244 +
1245 + /* enabled by default */
1246 + return true;
1247 +}
1248 +
1249 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
1250 struct ieee80211_vif *vif,
1251 enum ieee80211_ampdu_mlme_action action,
1252 @@ -297,7 +315,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
1253
1254 switch (action) {
1255 case IEEE80211_AMPDU_RX_START:
1256 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
1257 + if (!iwl_enable_rx_ampdu(mvm->cfg)) {
1258 ret = -EINVAL;
1259 break;
1260 }
1261 @@ -307,7 +325,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
1262 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
1263 break;
1264 case IEEE80211_AMPDU_TX_START:
1265 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
1266 + if (!iwl_enable_tx_ampdu(mvm->cfg)) {
1267 ret = -EINVAL;
1268 break;
1269 }
1270 diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
1271 index a2e6112e91e9..4ec8385e4307 100644
1272 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
1273 +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
1274 @@ -819,16 +819,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
1275 struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
1276 struct sk_buff_head reclaimed_skbs;
1277 struct iwl_mvm_tid_data *tid_data;
1278 - struct ieee80211_tx_info *info;
1279 struct ieee80211_sta *sta;
1280 struct iwl_mvm_sta *mvmsta;
1281 - struct ieee80211_hdr *hdr;
1282 struct sk_buff *skb;
1283 int sta_id, tid, freed;
1284 -
1285 /* "flow" corresponds to Tx queue */
1286 u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
1287 -
1288 /* "ssn" is start of block-ack Tx window, corresponds to index
1289 * (in Tx queue's circular buffer) of first TFD/frame in window */
1290 u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
1291 @@ -885,22 +881,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
1292 freed = 0;
1293
1294 skb_queue_walk(&reclaimed_skbs, skb) {
1295 - hdr = (struct ieee80211_hdr *)skb->data;
1296 + struct ieee80211_hdr *hdr = (void *)skb->data;
1297 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1298
1299 if (ieee80211_is_data_qos(hdr->frame_control))
1300 freed++;
1301 else
1302 WARN_ON_ONCE(1);
1303
1304 - info = IEEE80211_SKB_CB(skb);
1305 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1306
1307 + memset(&info->status, 0, sizeof(info->status));
1308 + /* Packet was transmitted successfully, failures come as single
1309 + * frames because before failing a frame the firmware transmits
1310 + * it without aggregation at least once.
1311 + */
1312 + info->flags |= IEEE80211_TX_STAT_ACK;
1313 +
1314 if (freed == 1) {
1315 /* this is the first skb we deliver in this batch */
1316 /* put the rate scaling data there */
1317 - info = IEEE80211_SKB_CB(skb);
1318 - memset(&info->status, 0, sizeof(info->status));
1319 - info->flags |= IEEE80211_TX_STAT_ACK;
1320 info->flags |= IEEE80211_TX_STAT_AMPDU;
1321 info->status.ampdu_ack_len = ba_notif->txed_2_done;
1322 info->status.ampdu_len = ba_notif->txed;
1323 diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
1324 index 5e0eec4d71c7..5d9a8084665d 100644
1325 --- a/drivers/net/wireless/mwifiex/11ac.c
1326 +++ b/drivers/net/wireless/mwifiex/11ac.c
1327 @@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
1328 vht_cap->header.len =
1329 cpu_to_le16(sizeof(struct ieee80211_vht_cap));
1330 memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
1331 - (u8 *)bss_desc->bcn_vht_cap +
1332 - sizeof(struct ieee_types_header),
1333 + (u8 *)bss_desc->bcn_vht_cap,
1334 le16_to_cpu(vht_cap->header.len));
1335
1336 mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
1337 diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
1338 index 41e9d25a2d8e..2658c8cda443 100644
1339 --- a/drivers/net/wireless/mwifiex/11n.c
1340 +++ b/drivers/net/wireless/mwifiex/11n.c
1341 @@ -307,8 +307,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
1342 ht_cap->header.len =
1343 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
1344 memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
1345 - (u8 *) bss_desc->bcn_ht_cap +
1346 - sizeof(struct ieee_types_header),
1347 + (u8 *)bss_desc->bcn_ht_cap,
1348 le16_to_cpu(ht_cap->header.len));
1349
1350 mwifiex_fill_cap_info(priv, radio_type, ht_cap);
1351 diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
1352 index 20c9c4c7b0b2..801c709656f9 100644
1353 --- a/drivers/net/wireless/mwifiex/pcie.c
1354 +++ b/drivers/net/wireless/mwifiex/pcie.c
1355 @@ -1195,6 +1195,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1356 rd_index = card->rxbd_rdptr & reg->rx_mask;
1357 skb_data = card->rx_buf_list[rd_index];
1358
1359 + /* If skb allocation was failed earlier for Rx packet,
1360 + * rx_buf_list[rd_index] would have been left with a NULL.
1361 + */
1362 + if (!skb_data)
1363 + return -ENOMEM;
1364 +
1365 MWIFIEX_SKB_PACB(skb_data, &buf_pa);
1366 pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
1367 PCI_DMA_FROMDEVICE);
1368 @@ -1509,6 +1515,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
1369 if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
1370 mwifiex_process_sleep_confirm_resp(adapter, skb->data,
1371 skb->len);
1372 + mwifiex_pcie_enable_host_int(adapter);
1373 + if (mwifiex_write_reg(adapter,
1374 + PCIE_CPU_INT_EVENT,
1375 + CPU_INTR_SLEEP_CFM_DONE)) {
1376 + dev_warn(adapter->dev,
1377 + "Write register failed\n");
1378 + return -1;
1379 + }
1380 while (reg->sleep_cookie && (count++ < 10) &&
1381 mwifiex_pcie_ok_to_access_hw(adapter))
1382 usleep_range(50, 60);
1383 @@ -1979,23 +1993,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
1384 adapter->int_status |= pcie_ireg;
1385 spin_unlock_irqrestore(&adapter->int_lock, flags);
1386
1387 - if (pcie_ireg & HOST_INTR_CMD_DONE) {
1388 - if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
1389 - (adapter->ps_state == PS_STATE_SLEEP)) {
1390 - mwifiex_pcie_enable_host_int(adapter);
1391 - if (mwifiex_write_reg(adapter,
1392 - PCIE_CPU_INT_EVENT,
1393 - CPU_INTR_SLEEP_CFM_DONE)
1394 - ) {
1395 - dev_warn(adapter->dev,
1396 - "Write register failed\n");
1397 - return;
1398 -
1399 - }
1400 - }
1401 - } else if (!adapter->pps_uapsd_mode &&
1402 - adapter->ps_state == PS_STATE_SLEEP &&
1403 - mwifiex_pcie_ok_to_access_hw(adapter)) {
1404 + if (!adapter->pps_uapsd_mode &&
1405 + adapter->ps_state == PS_STATE_SLEEP &&
1406 + mwifiex_pcie_ok_to_access_hw(adapter)) {
1407 /* Potentially for PCIe we could get other
1408 * interrupts like shared. Don't change power
1409 * state until cookie is set */
1410 diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
1411 index 50b2fe53219d..470347a0a729 100644
1412 --- a/drivers/net/wireless/mwifiex/scan.c
1413 +++ b/drivers/net/wireless/mwifiex/scan.c
1414 @@ -2040,12 +2040,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
1415 curr_bss->ht_info_offset);
1416
1417 if (curr_bss->bcn_vht_cap)
1418 - curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
1419 - curr_bss->vht_cap_offset);
1420 + curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
1421 + curr_bss->vht_cap_offset);
1422
1423 if (curr_bss->bcn_vht_oper)
1424 - curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
1425 - curr_bss->vht_info_offset);
1426 + curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
1427 + curr_bss->vht_info_offset);
1428
1429 if (curr_bss->bcn_bss_co_2040)
1430 curr_bss->bcn_bss_co_2040 =
1431 diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
1432 index b7adf3d46463..923e348dda70 100644
1433 --- a/drivers/net/wireless/mwifiex/usb.c
1434 +++ b/drivers/net/wireless/mwifiex/usb.c
1435 @@ -511,13 +511,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
1436 MWIFIEX_BSS_ROLE_ANY),
1437 MWIFIEX_ASYNC_CMD);
1438
1439 -#ifdef CONFIG_PM
1440 - /* Resume handler may be called due to remote wakeup,
1441 - * force to exit suspend anyway
1442 - */
1443 - usb_disable_autosuspend(card->udev);
1444 -#endif /* CONFIG_PM */
1445 -
1446 return 0;
1447 }
1448
1449 @@ -576,7 +569,6 @@ static struct usb_driver mwifiex_usb_driver = {
1450 .id_table = mwifiex_usb_table,
1451 .suspend = mwifiex_usb_suspend,
1452 .resume = mwifiex_usb_resume,
1453 - .supports_autosuspend = 1,
1454 };
1455
1456 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
1457 diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
1458 index ae31e8df44d7..80f72f6b6d56 100644
1459 --- a/drivers/net/wireless/mwifiex/wmm.c
1460 +++ b/drivers/net/wireless/mwifiex/wmm.c
1461 @@ -556,7 +556,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
1462 mwifiex_wmm_delete_all_ralist(priv);
1463 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
1464
1465 - if (priv->adapter->if_ops.clean_pcie_ring)
1466 + if (priv->adapter->if_ops.clean_pcie_ring &&
1467 + !priv->adapter->surprise_removed)
1468 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
1469 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1470 }
1471 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1472 index 0bb7bfd49bf6..f30acaa84037 100644
1473 --- a/drivers/pci/pci.c
1474 +++ b/drivers/pci/pci.c
1475 @@ -1130,6 +1130,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
1476 return err;
1477 pci_fixup_device(pci_fixup_enable, dev);
1478
1479 + if (dev->msi_enabled || dev->msix_enabled)
1480 + return 0;
1481 +
1482 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1483 if (pin) {
1484 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1485 diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
1486 index 9847ab163829..a8b7466196ee 100644
1487 --- a/drivers/pnp/pnpacpi/rsparser.c
1488 +++ b/drivers/pnp/pnpacpi/rsparser.c
1489 @@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
1490 struct resource r;
1491 int i, flags;
1492
1493 - if (acpi_dev_resource_memory(res, &r)
1494 - || acpi_dev_resource_io(res, &r)
1495 - || acpi_dev_resource_address_space(res, &r)
1496 + if (acpi_dev_resource_address_space(res, &r)
1497 || acpi_dev_resource_ext_address_space(res, &r)) {
1498 pnp_add_resource(dev, &r);
1499 return AE_OK;
1500 @@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
1501 }
1502
1503 switch (res->type) {
1504 + case ACPI_RESOURCE_TYPE_MEMORY24:
1505 + case ACPI_RESOURCE_TYPE_MEMORY32:
1506 + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
1507 + if (acpi_dev_resource_memory(res, &r))
1508 + pnp_add_resource(dev, &r);
1509 + break;
1510 + case ACPI_RESOURCE_TYPE_IO:
1511 + case ACPI_RESOURCE_TYPE_FIXED_IO:
1512 + if (acpi_dev_resource_io(res, &r))
1513 + pnp_add_resource(dev, &r);
1514 + break;
1515 case ACPI_RESOURCE_TYPE_DMA:
1516 dma = &res->data.dma;
1517 if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
1518 diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
1519 index b4b0d83f9ef6..7061ac0ad428 100644
1520 --- a/drivers/rapidio/devices/tsi721.h
1521 +++ b/drivers/rapidio/devices/tsi721.h
1522 @@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
1523 struct list_head free_list;
1524 dma_cookie_t completed_cookie;
1525 struct tasklet_struct tasklet;
1526 + bool active;
1527 };
1528
1529 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1530 diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
1531 index 502663f5f7c6..91245f5dbe81 100644
1532 --- a/drivers/rapidio/devices/tsi721_dma.c
1533 +++ b/drivers/rapidio/devices/tsi721_dma.c
1534 @@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
1535 {
1536 /* Disable BDMA channel interrupts */
1537 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
1538 -
1539 - tasklet_schedule(&bdma_chan->tasklet);
1540 + if (bdma_chan->active)
1541 + tasklet_schedule(&bdma_chan->tasklet);
1542 }
1543
1544 #ifdef CONFIG_PCI_MSI
1545 @@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
1546 }
1547 #endif /* CONFIG_PCI_MSI */
1548
1549 - tasklet_enable(&bdma_chan->tasklet);
1550 + bdma_chan->active = true;
1551 tsi721_bdma_interrupt_enable(bdma_chan, 1);
1552
1553 return bdma_chan->bd_num - 1;
1554 @@ -576,9 +576,7 @@ err_out:
1555 static void tsi721_free_chan_resources(struct dma_chan *dchan)
1556 {
1557 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
1558 -#ifdef CONFIG_PCI_MSI
1559 struct tsi721_device *priv = to_tsi721(dchan->device);
1560 -#endif
1561 LIST_HEAD(list);
1562
1563 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
1564 @@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
1565 BUG_ON(!list_empty(&bdma_chan->active_list));
1566 BUG_ON(!list_empty(&bdma_chan->queue));
1567
1568 - tasklet_disable(&bdma_chan->tasklet);
1569 + tsi721_bdma_interrupt_enable(bdma_chan, 0);
1570 + bdma_chan->active = false;
1571 +
1572 +#ifdef CONFIG_PCI_MSI
1573 + if (priv->flags & TSI721_USING_MSIX) {
1574 + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
1575 + bdma_chan->id].vector);
1576 + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
1577 + bdma_chan->id].vector);
1578 + } else
1579 +#endif
1580 + synchronize_irq(priv->pdev->irq);
1581 +
1582 + tasklet_kill(&bdma_chan->tasklet);
1583
1584 spin_lock_bh(&bdma_chan->lock);
1585 list_splice_init(&bdma_chan->free_list, &list);
1586 spin_unlock_bh(&bdma_chan->lock);
1587
1588 - tsi721_bdma_interrupt_enable(bdma_chan, 0);
1589 -
1590 #ifdef CONFIG_PCI_MSI
1591 if (priv->flags & TSI721_USING_MSIX) {
1592 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
1593 @@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
1594 bdma_chan->dchan.cookie = 1;
1595 bdma_chan->dchan.chan_id = i;
1596 bdma_chan->id = i;
1597 + bdma_chan->active = false;
1598
1599 spin_lock_init(&bdma_chan->lock);
1600
1601 @@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
1602
1603 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
1604 (unsigned long)bdma_chan);
1605 - tasklet_disable(&bdma_chan->tasklet);
1606 list_add_tail(&bdma_chan->dchan.device_node,
1607 &mport->dma.channels);
1608 }
1609 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
1610 index 815d6df8bd5f..c59cc6ed7adb 100644
1611 --- a/drivers/regulator/core.c
1612 +++ b/drivers/regulator/core.c
1613 @@ -919,6 +919,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
1614 return 0;
1615 }
1616
1617 +static int _regulator_do_enable(struct regulator_dev *rdev);
1618 +
1619 /**
1620 * set_machine_constraints - sets regulator constraints
1621 * @rdev: regulator source
1622 @@ -975,10 +977,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
1623 /* If the constraints say the regulator should be on at this point
1624 * and we have control then make sure it is enabled.
1625 */
1626 - if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
1627 - ops->enable) {
1628 - ret = ops->enable(rdev);
1629 - if (ret < 0) {
1630 + if (rdev->constraints->always_on || rdev->constraints->boot_on) {
1631 + ret = _regulator_do_enable(rdev);
1632 + if (ret < 0 && ret != -EINVAL) {
1633 rdev_err(rdev, "failed to enable\n");
1634 goto out;
1635 }
1636 @@ -3790,9 +3791,8 @@ int regulator_suspend_finish(void)
1637 struct regulator_ops *ops = rdev->desc->ops;
1638
1639 mutex_lock(&rdev->mutex);
1640 - if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
1641 - ops->enable) {
1642 - error = ops->enable(rdev);
1643 + if (rdev->use_count > 0 || rdev->constraints->always_on) {
1644 + error = _regulator_do_enable(rdev);
1645 if (error)
1646 ret = error;
1647 } else {
1648 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1649 index d72a9216ee2e..e91ec8cd9b09 100644
1650 --- a/drivers/s390/block/dasd.c
1651 +++ b/drivers/s390/block/dasd.c
1652 @@ -2879,12 +2879,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
1653
1654 elevator_exit(block->request_queue->elevator);
1655 block->request_queue->elevator = NULL;
1656 + mutex_lock(&block->request_queue->sysfs_lock);
1657 rc = elevator_init(block->request_queue, "deadline");
1658 - if (rc) {
1659 + if (rc)
1660 blk_cleanup_queue(block->request_queue);
1661 - return rc;
1662 - }
1663 - return 0;
1664 + mutex_unlock(&block->request_queue->sysfs_lock);
1665 + return rc;
1666 }
1667
1668 /*
1669 diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
1670 index 4911310a38f5..22a9bb1abae1 100644
1671 --- a/drivers/scsi/isci/host.h
1672 +++ b/drivers/scsi/isci/host.h
1673 @@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
1674 }
1675
1676 #define for_each_isci_host(id, ihost, pdev) \
1677 - for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
1678 - id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
1679 - ihost = to_pci_info(pdev)->hosts[++id])
1680 + for (id = 0; id < SCI_MAX_CONTROLLERS && \
1681 + (ihost = to_pci_info(pdev)->hosts[id]); id++)
1682
1683 static inline void wait_for_start(struct isci_host *ihost)
1684 {
1685 diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
1686 index cd962da4a57a..5017bde3b366 100644
1687 --- a/drivers/scsi/isci/port_config.c
1688 +++ b/drivers/scsi/isci/port_config.c
1689 @@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
1690 SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
1691 } else {
1692 /* the phy is already the part of the port */
1693 - u32 port_state = iport->sm.current_state_id;
1694 -
1695 - /* if the PORT'S state is resetting then the link up is from
1696 - * port hard reset in this case, we need to tell the port
1697 - * that link up is recieved
1698 - */
1699 - BUG_ON(port_state != SCI_PORT_RESETTING);
1700 port_agent->phy_ready_mask |= 1 << phy_index;
1701 sci_port_link_up(iport, iphy);
1702 }
1703 diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
1704 index 0d30ca849e8f..5d6fda72d659 100644
1705 --- a/drivers/scsi/isci/task.c
1706 +++ b/drivers/scsi/isci/task.c
1707 @@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
1708 /* XXX: need to cleanup any ireqs targeting this
1709 * domain_device
1710 */
1711 - ret = TMF_RESP_FUNC_COMPLETE;
1712 + ret = -ENODEV;
1713 goto out;
1714 }
1715
1716 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
1717 index c32efc753229..799c266b0bb5 100644
1718 --- a/drivers/scsi/qla2xxx/qla_def.h
1719 +++ b/drivers/scsi/qla2xxx/qla_def.h
1720 @@ -2980,8 +2980,7 @@ struct qla_hw_data {
1721 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
1722 IS_QLA82XX(ha) || IS_QLA83XX(ha))
1723 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
1724 -#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
1725 - IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
1726 +#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
1727 #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
1728 #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
1729 #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
1730 diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
1731 index fb7437dd5b7a..91b76cea3e3c 100644
1732 --- a/drivers/scsi/storvsc_drv.c
1733 +++ b/drivers/scsi/storvsc_drv.c
1734 @@ -1189,6 +1189,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
1735 {
1736 struct stor_mem_pools *memp = sdevice->hostdata;
1737
1738 + if (!memp)
1739 + return;
1740 +
1741 mempool_destroy(memp->request_mempool);
1742 kmem_cache_destroy(memp->request_pool);
1743 kfree(memp);
1744 diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
1745 index e504b7636058..23f1ba6e9ccf 100644
1746 --- a/drivers/spi/spi-ath79.c
1747 +++ b/drivers/spi/spi-ath79.c
1748 @@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
1749
1750 flags = GPIOF_DIR_OUT;
1751 if (spi->mode & SPI_CS_HIGH)
1752 - flags |= GPIOF_INIT_HIGH;
1753 - else
1754 flags |= GPIOF_INIT_LOW;
1755 + else
1756 + flags |= GPIOF_INIT_HIGH;
1757
1758 status = gpio_request_one(cdata->gpio, flags,
1759 dev_name(&spi->dev));
1760 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1761 index 5b07fd156bd7..5232ac7b0745 100644
1762 --- a/drivers/target/iscsi/iscsi_target.c
1763 +++ b/drivers/target/iscsi/iscsi_target.c
1764 @@ -3653,7 +3653,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
1765 break;
1766 case ISTATE_REMOVE:
1767 spin_lock_bh(&conn->cmd_lock);
1768 - list_del(&cmd->i_conn_node);
1769 + list_del_init(&cmd->i_conn_node);
1770 spin_unlock_bh(&conn->cmd_lock);
1771
1772 iscsit_free_cmd(cmd, false);
1773 @@ -4099,7 +4099,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
1774 spin_lock_bh(&conn->cmd_lock);
1775 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
1776
1777 - list_del(&cmd->i_conn_node);
1778 + list_del_init(&cmd->i_conn_node);
1779 spin_unlock_bh(&conn->cmd_lock);
1780
1781 iscsit_increment_maxcmdsn(cmd, sess);
1782 @@ -4144,6 +4144,10 @@ int iscsit_close_connection(
1783 iscsit_stop_timers_for_cmds(conn);
1784 iscsit_stop_nopin_response_timer(conn);
1785 iscsit_stop_nopin_timer(conn);
1786 +
1787 + if (conn->conn_transport->iscsit_wait_conn)
1788 + conn->conn_transport->iscsit_wait_conn(conn);
1789 +
1790 iscsit_free_queue_reqs_for_conn(conn);
1791
1792 /*
1793 diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
1794 index 45a5afd5ea13..0d2d013076c4 100644
1795 --- a/drivers/target/iscsi/iscsi_target_erl2.c
1796 +++ b/drivers/target/iscsi/iscsi_target_erl2.c
1797 @@ -140,7 +140,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
1798 list_for_each_entry_safe(cmd, cmd_tmp,
1799 &cr->conn_recovery_cmd_list, i_conn_node) {
1800
1801 - list_del(&cmd->i_conn_node);
1802 + list_del_init(&cmd->i_conn_node);
1803 cmd->conn = NULL;
1804 spin_unlock(&cr->conn_recovery_cmd_lock);
1805 iscsit_free_cmd(cmd, true);
1806 @@ -162,7 +162,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
1807 list_for_each_entry_safe(cmd, cmd_tmp,
1808 &cr->conn_recovery_cmd_list, i_conn_node) {
1809
1810 - list_del(&cmd->i_conn_node);
1811 + list_del_init(&cmd->i_conn_node);
1812 cmd->conn = NULL;
1813 spin_unlock(&cr->conn_recovery_cmd_lock);
1814 iscsit_free_cmd(cmd, true);
1815 @@ -218,7 +218,7 @@ int iscsit_remove_cmd_from_connection_recovery(
1816 }
1817 cr = cmd->cr;
1818
1819 - list_del(&cmd->i_conn_node);
1820 + list_del_init(&cmd->i_conn_node);
1821 return --cr->cmd_count;
1822 }
1823
1824 @@ -299,7 +299,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
1825 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
1826 continue;
1827
1828 - list_del(&cmd->i_conn_node);
1829 + list_del_init(&cmd->i_conn_node);
1830
1831 spin_unlock_bh(&conn->cmd_lock);
1832 iscsit_free_cmd(cmd, true);
1833 @@ -337,7 +337,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
1834 /*
1835 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
1836 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
1837 - * list_del(&cmd->i_conn_node); to release the command to the
1838 + * list_del_init(&cmd->i_conn_node); to release the command to the
1839 * session pool and remove it from the connection's list.
1840 *
1841 * Also stop the DataOUT timer, which will be restarted after
1842 @@ -353,7 +353,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
1843 " CID: %hu\n", cmd->iscsi_opcode,
1844 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
1845
1846 - list_del(&cmd->i_conn_node);
1847 + list_del_init(&cmd->i_conn_node);
1848 spin_unlock_bh(&conn->cmd_lock);
1849 iscsit_free_cmd(cmd, true);
1850 spin_lock_bh(&conn->cmd_lock);
1851 @@ -373,7 +373,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
1852 */
1853 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
1854 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
1855 - list_del(&cmd->i_conn_node);
1856 + list_del_init(&cmd->i_conn_node);
1857 spin_unlock_bh(&conn->cmd_lock);
1858 iscsit_free_cmd(cmd, true);
1859 spin_lock_bh(&conn->cmd_lock);
1860 @@ -395,7 +395,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
1861
1862 cmd->sess = conn->sess;
1863
1864 - list_del(&cmd->i_conn_node);
1865 + list_del_init(&cmd->i_conn_node);
1866 spin_unlock_bh(&conn->cmd_lock);
1867
1868 iscsit_free_all_datain_reqs(cmd);
1869 diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
1870 index 439260b7d87f..f31b4c5cdf3f 100644
1871 --- a/drivers/target/iscsi/iscsi_target_tpg.c
1872 +++ b/drivers/target/iscsi/iscsi_target_tpg.c
1873 @@ -138,7 +138,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
1874 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
1875
1876 spin_lock(&tpg->tpg_state_lock);
1877 - if (tpg->tpg_state == TPG_STATE_FREE) {
1878 + if (tpg->tpg_state != TPG_STATE_ACTIVE) {
1879 spin_unlock(&tpg->tpg_state_lock);
1880 continue;
1881 }
1882 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1883 index 548d1996590f..652438325197 100644
1884 --- a/drivers/usb/core/config.c
1885 +++ b/drivers/usb/core/config.c
1886 @@ -718,6 +718,10 @@ int usb_get_configuration(struct usb_device *dev)
1887 result = -ENOMEM;
1888 goto err;
1889 }
1890 +
1891 + if (dev->quirks & USB_QUIRK_DELAY_INIT)
1892 + msleep(100);
1893 +
1894 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
1895 bigbuffer, length);
1896 if (result < 0) {
1897 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1898 index 01fe36273f3b..1053eb651b2f 100644
1899 --- a/drivers/usb/core/quirks.c
1900 +++ b/drivers/usb/core/quirks.c
1901 @@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1902 /* Microsoft LifeCam-VX700 v2.0 */
1903 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
1904
1905 + /* Logitech HD Pro Webcams C920 and C930e */
1906 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1907 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
1908 +
1909 /* Logitech Quickcam Fusion */
1910 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
1911
1912 diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
1913 index 8dccf73025b3..433c3b828e1d 100644
1914 --- a/fs/bio-integrity.c
1915 +++ b/fs/bio-integrity.c
1916 @@ -458,7 +458,7 @@ static int bio_integrity_verify(struct bio *bio)
1917 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
1918 bix.sector_size = bi->sector_size;
1919
1920 - bio_for_each_segment(bv, bio, i) {
1921 + bio_for_each_segment_all(bv, bio, i) {
1922 void *kaddr = kmap_atomic(bv->bv_page);
1923 bix.data_buf = kaddr + bv->bv_offset;
1924 bix.data_size = bv->bv_len;
1925 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
1926 index b189bd1e7a3e..ce7067881d36 100644
1927 --- a/fs/btrfs/compression.c
1928 +++ b/fs/btrfs/compression.c
1929 @@ -1009,6 +1009,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1930 bytes = min(bytes, working_bytes);
1931 kaddr = kmap_atomic(page_out);
1932 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1933 + if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1934 + memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1935 kunmap_atomic(kaddr);
1936 flush_dcache_page(page_out);
1937
1938 diff --git a/fs/namei.c b/fs/namei.c
1939 index cccaf77e76c5..1211ee5a1cb3 100644
1940 --- a/fs/namei.c
1941 +++ b/fs/namei.c
1942 @@ -3655,6 +3655,7 @@ retry:
1943 out_dput:
1944 done_path_create(&new_path, new_dentry);
1945 if (retry_estale(error, how)) {
1946 + path_put(&old_path);
1947 how |= LOOKUP_REVAL;
1948 goto retry;
1949 }
1950 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
1951 index 57db3244f4d9..4b49a8c6ccad 100644
1952 --- a/fs/nfs/delegation.c
1953 +++ b/fs/nfs/delegation.c
1954 @@ -656,16 +656,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
1955
1956 rcu_read_lock();
1957 delegation = rcu_dereference(NFS_I(inode)->delegation);
1958 + if (delegation == NULL)
1959 + goto out_enoent;
1960
1961 - if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
1962 - rcu_read_unlock();
1963 - return -ENOENT;
1964 - }
1965 + if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
1966 + goto out_enoent;
1967 nfs_mark_return_delegation(server, delegation);
1968 rcu_read_unlock();
1969
1970 nfs_delegation_run_state_manager(clp);
1971 return 0;
1972 +out_enoent:
1973 + rcu_read_unlock();
1974 + return -ENOENT;
1975 }
1976
1977 static struct inode *
1978 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1979 index 26e71bdb5b33..1ae7dd5956c5 100644
1980 --- a/fs/nfs/nfs4proc.c
1981 +++ b/fs/nfs/nfs4proc.c
1982 @@ -3607,8 +3607,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
1983 {
1984 nfs4_stateid current_stateid;
1985
1986 - if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
1987 - return false;
1988 + /* If the current stateid represents a lost lock, then exit */
1989 + if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
1990 + return true;
1991 return nfs4_stateid_match(stateid, &current_stateid);
1992 }
1993
1994 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1995 index ff54014a24ec..46387e49aa46 100644
1996 --- a/fs/ocfs2/file.c
1997 +++ b/fs/ocfs2/file.c
1998 @@ -2374,8 +2374,8 @@ out_dio:
1999
2000 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2001 ((file->f_flags & O_DIRECT) && !direct_io)) {
2002 - ret = filemap_fdatawrite_range(file->f_mapping, pos,
2003 - pos + count - 1);
2004 + ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
2005 + *ppos + count - 1);
2006 if (ret < 0)
2007 written = ret;
2008
2009 @@ -2388,8 +2388,8 @@ out_dio:
2010 }
2011
2012 if (!ret)
2013 - ret = filemap_fdatawait_range(file->f_mapping, pos,
2014 - pos + count - 1);
2015 + ret = filemap_fdatawait_range(file->f_mapping, *ppos,
2016 + *ppos + count - 1);
2017 }
2018
2019 /*
2020 diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
2021 index 332a281f217e..e49b4f1cb26b 100644
2022 --- a/fs/ocfs2/quota_global.c
2023 +++ b/fs/ocfs2/quota_global.c
2024 @@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
2025 */
2026 if (status < 0)
2027 mlog_errno(status);
2028 + /*
2029 + * Clear dq_off so that we search for the structure in quota file next
2030 + * time we acquire it. The structure might be deleted and reallocated
2031 + * elsewhere by another node while our dquot structure is on freelist.
2032 + */
2033 + dquot->dq_off = 0;
2034 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
2035 out_trans:
2036 ocfs2_commit_trans(osb, handle);
2037 @@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
2038 status = ocfs2_lock_global_qf(info, 1);
2039 if (status < 0)
2040 goto out;
2041 - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
2042 - status = ocfs2_qinfo_lock(info, 0);
2043 - if (status < 0)
2044 - goto out_dq;
2045 - status = qtree_read_dquot(&info->dqi_gi, dquot);
2046 - ocfs2_qinfo_unlock(info, 0);
2047 - if (status < 0)
2048 - goto out_dq;
2049 - }
2050 - set_bit(DQ_READ_B, &dquot->dq_flags);
2051 + status = ocfs2_qinfo_lock(info, 0);
2052 + if (status < 0)
2053 + goto out_dq;
2054 + /*
2055 + * We always want to read dquot structure from disk because we don't
2056 + * know what happened with it while it was on freelist.
2057 + */
2058 + status = qtree_read_dquot(&info->dqi_gi, dquot);
2059 + ocfs2_qinfo_unlock(info, 0);
2060 + if (status < 0)
2061 + goto out_dq;
2062
2063 OCFS2_DQUOT(dquot)->dq_use_count++;
2064 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
2065 diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
2066 index 27fe7ee4874c..d0f323da0b5c 100644
2067 --- a/fs/ocfs2/quota_local.c
2068 +++ b/fs/ocfs2/quota_local.c
2069 @@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
2070 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
2071
2072 out:
2073 - /* Clear the read bit so that next time someone uses this
2074 - * dquot he reads fresh info from disk and allocates local
2075 - * dquot structure */
2076 - clear_bit(DQ_READ_B, &dquot->dq_flags);
2077 return status;
2078 }
2079
2080 diff --git a/fs/proc/base.c b/fs/proc/base.c
2081 index c3834dad09b3..de12b8128b95 100644
2082 --- a/fs/proc/base.c
2083 +++ b/fs/proc/base.c
2084 @@ -1825,6 +1825,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
2085 if (rc)
2086 goto out_mmput;
2087
2088 + rc = -ENOENT;
2089 down_read(&mm->mmap_sem);
2090 vma = find_exact_vma(mm, vm_start, vm_end);
2091 if (vma && vma->vm_file) {
2092 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
2093 index 217e4b42b7c8..5d838bf10cbd 100644
2094 --- a/include/linux/firewire.h
2095 +++ b/include/linux/firewire.h
2096 @@ -200,6 +200,7 @@ struct fw_device {
2097 unsigned irmc:1;
2098 unsigned bc_implemented:2;
2099
2100 + work_func_t workfn;
2101 struct delayed_work work;
2102 struct fw_attribute_group attribute_group;
2103 };
2104 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
2105 index 8fb8edf12417..7b5d4a8ab199 100644
2106 --- a/include/linux/jiffies.h
2107 +++ b/include/linux/jiffies.h
2108 @@ -101,13 +101,13 @@ static inline u64 get_jiffies_64(void)
2109 #define time_after(a,b) \
2110 (typecheck(unsigned long, a) && \
2111 typecheck(unsigned long, b) && \
2112 - ((long)(b) - (long)(a) < 0))
2113 + ((long)((b) - (a)) < 0))
2114 #define time_before(a,b) time_after(b,a)
2115
2116 #define time_after_eq(a,b) \
2117 (typecheck(unsigned long, a) && \
2118 typecheck(unsigned long, b) && \
2119 - ((long)(a) - (long)(b) >= 0))
2120 + ((long)((a) - (b)) >= 0))
2121 #define time_before_eq(a,b) time_after_eq(b,a)
2122
2123 /*
2124 @@ -130,13 +130,13 @@ static inline u64 get_jiffies_64(void)
2125 #define time_after64(a,b) \
2126 (typecheck(__u64, a) && \
2127 typecheck(__u64, b) && \
2128 - ((__s64)(b) - (__s64)(a) < 0))
2129 + ((__s64)((b) - (a)) < 0))
2130 #define time_before64(a,b) time_after64(b,a)
2131
2132 #define time_after_eq64(a,b) \
2133 (typecheck(__u64, a) && \
2134 typecheck(__u64, b) && \
2135 - ((__s64)(a) - (__s64)(b) >= 0))
2136 + ((__s64)((a) - (b)) >= 0))
2137 #define time_before_eq64(a,b) time_after_eq64(b,a)
2138
2139 /*
2140 diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
2141 index f8e084d0fc77..ba605015c4d8 100644
2142 --- a/include/linux/tracepoint.h
2143 +++ b/include/linux/tracepoint.h
2144 @@ -60,6 +60,12 @@ struct tp_module {
2145 unsigned int num_tracepoints;
2146 struct tracepoint * const *tracepoints_ptrs;
2147 };
2148 +bool trace_module_has_bad_taint(struct module *mod);
2149 +#else
2150 +static inline bool trace_module_has_bad_taint(struct module *mod)
2151 +{
2152 + return false;
2153 +}
2154 #endif /* CONFIG_MODULES */
2155
2156 struct tracepoint_iter {
2157 diff --git a/include/net/tcp.h b/include/net/tcp.h
2158 index 3fc77e90624a..6f87f0873843 100644
2159 --- a/include/net/tcp.h
2160 +++ b/include/net/tcp.h
2161 @@ -1308,7 +1308,8 @@ struct tcp_fastopen_request {
2162 /* Fast Open cookie. Size 0 means a cookie request */
2163 struct tcp_fastopen_cookie cookie;
2164 struct msghdr *data; /* data in MSG_FASTOPEN */
2165 - u16 copied; /* queued in tcp_connect() */
2166 + size_t size;
2167 + int copied; /* queued in tcp_connect() */
2168 };
2169 void tcp_free_fastopen_req(struct tcp_sock *tp);
2170
2171 diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
2172 index c5aade523863..4a5f00e2e6cd 100644
2173 --- a/include/target/iscsi/iscsi_transport.h
2174 +++ b/include/target/iscsi/iscsi_transport.h
2175 @@ -11,6 +11,7 @@ struct iscsit_transport {
2176 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
2177 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
2178 void (*iscsit_free_np)(struct iscsi_np *);
2179 + void (*iscsit_wait_conn)(struct iscsi_conn *);
2180 void (*iscsit_free_conn)(struct iscsi_conn *);
2181 struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t);
2182 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
2183 diff --git a/ipc/msg.c b/ipc/msg.c
2184 index 558aa91186b6..52770bfde2a5 100644
2185 --- a/ipc/msg.c
2186 +++ b/ipc/msg.c
2187 @@ -885,6 +885,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
2188 return -EINVAL;
2189
2190 if (msgflg & MSG_COPY) {
2191 + if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
2192 + return -EINVAL;
2193 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
2194 if (IS_ERR(copy))
2195 return PTR_ERR(copy);
2196 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2197 index d313870dcd02..d9dd521ddd6b 100644
2198 --- a/kernel/cpuset.c
2199 +++ b/kernel/cpuset.c
2200 @@ -2422,9 +2422,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2201
2202 task_lock(current);
2203 cs = nearest_hardwall_ancestor(task_cs(current));
2204 + allowed = node_isset(node, cs->mems_allowed);
2205 task_unlock(current);
2206
2207 - allowed = node_isset(node, cs->mems_allowed);
2208 mutex_unlock(&callback_mutex);
2209 return allowed;
2210 }
2211 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2212 index dc4db3228dcd..9bd5c8a6c8ee 100644
2213 --- a/kernel/irq/manage.c
2214 +++ b/kernel/irq/manage.c
2215 @@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
2216
2217 static void wake_threads_waitq(struct irq_desc *desc)
2218 {
2219 - if (atomic_dec_and_test(&desc->threads_active) &&
2220 - waitqueue_active(&desc->wait_for_threads))
2221 + if (atomic_dec_and_test(&desc->threads_active))
2222 wake_up(&desc->wait_for_threads);
2223 }
2224
2225 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2226 index 305ef886219e..c7ab8eab5427 100644
2227 --- a/kernel/sched/fair.c
2228 +++ b/kernel/sched/fair.c
2229 @@ -5862,15 +5862,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
2230 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2231
2232 /*
2233 - * Ensure the task's vruntime is normalized, so that when its
2234 + * Ensure the task's vruntime is normalized, so that when it's
2235 * switched back to the fair class the enqueue_entity(.flags=0) will
2236 * do the right thing.
2237 *
2238 - * If it was on_rq, then the dequeue_entity(.flags=0) will already
2239 - * have normalized the vruntime, if it was !on_rq, then only when
2240 + * If it's on_rq, then the dequeue_entity(.flags=0) will already
2241 + * have normalized the vruntime, if it's !on_rq, then only when
2242 * the task is sleeping will it still have non-normalized vruntime.
2243 */
2244 - if (!se->on_rq && p->state != TASK_RUNNING) {
2245 + if (!p->on_rq && p->state != TASK_RUNNING) {
2246 /*
2247 * Fix up our vruntime so that the current sleep doesn't
2248 * cause 'unlimited' sleep bonus.
2249 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
2250 index f681da32a2ff..19ee339a1d0d 100644
2251 --- a/kernel/time/tick-broadcast.c
2252 +++ b/kernel/time/tick-broadcast.c
2253 @@ -594,6 +594,13 @@ again:
2254 cpumask_clear(tick_broadcast_force_mask);
2255
2256 /*
2257 + * Sanity check. Catch the case where we try to broadcast to
2258 + * offline cpus.
2259 + */
2260 + if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
2261 + cpumask_and(tmpmask, tmpmask, cpu_online_mask);
2262 +
2263 + /*
2264 * Wakeup the cpus which have an expired event.
2265 */
2266 tick_do_broadcast(tmpmask);
2267 @@ -834,10 +841,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
2268 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
2269
2270 /*
2271 - * Clear the broadcast mask flag for the dead cpu, but do not
2272 - * stop the broadcast device!
2273 + * Clear the broadcast masks for the dead cpu, but do not stop
2274 + * the broadcast device!
2275 */
2276 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
2277 + cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
2278 + cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
2279
2280 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
2281 }
2282 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2283 index 3d18aadef493..2f4b185bfc23 100644
2284 --- a/kernel/trace/trace_events.c
2285 +++ b/kernel/trace/trace_events.c
2286 @@ -1860,6 +1860,16 @@ static void trace_module_add_events(struct module *mod)
2287 struct ftrace_module_file_ops *file_ops = NULL;
2288 struct ftrace_event_call **call, **start, **end;
2289
2290 + if (!mod->num_trace_events)
2291 + return;
2292 +
2293 + /* Don't add infrastructure for mods without tracepoints */
2294 + if (trace_module_has_bad_taint(mod)) {
2295 + pr_err("%s: module has bad taint, not creating trace events\n",
2296 + mod->name);
2297 + return;
2298 + }
2299 +
2300 start = mod->trace_events;
2301 end = mod->trace_events + mod->num_trace_events;
2302
2303 diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
2304 index 29f26540e9c9..031cc5655a51 100644
2305 --- a/kernel/tracepoint.c
2306 +++ b/kernel/tracepoint.c
2307 @@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
2308 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
2309
2310 #ifdef CONFIG_MODULES
2311 +bool trace_module_has_bad_taint(struct module *mod)
2312 +{
2313 + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
2314 +}
2315 +
2316 static int tracepoint_module_coming(struct module *mod)
2317 {
2318 struct tp_module *tp_mod, *iter;
2319 @@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
2320 * module headers (for forced load), to make sure we don't cause a crash.
2321 * Staging and out-of-tree GPL modules are fine.
2322 */
2323 - if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
2324 + if (trace_module_has_bad_taint(mod))
2325 return 0;
2326 mutex_lock(&tracepoints_mutex);
2327 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
2328 diff --git a/mm/compaction.c b/mm/compaction.c
2329 index 9a3e351da29b..18a90b4d0bfc 100644
2330 --- a/mm/compaction.c
2331 +++ b/mm/compaction.c
2332 @@ -252,7 +252,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
2333 {
2334 int nr_scanned = 0, total_isolated = 0;
2335 struct page *cursor, *valid_page = NULL;
2336 - unsigned long nr_strict_required = end_pfn - blockpfn;
2337 unsigned long flags;
2338 bool locked = false;
2339
2340 @@ -265,11 +264,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
2341
2342 nr_scanned++;
2343 if (!pfn_valid_within(blockpfn))
2344 - continue;
2345 + goto isolate_fail;
2346 +
2347 if (!valid_page)
2348 valid_page = page;
2349 if (!PageBuddy(page))
2350 - continue;
2351 + goto isolate_fail;
2352
2353 /*
2354 * The zone lock must be held to isolate freepages.
2355 @@ -290,12 +290,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
2356
2357 /* Recheck this is a buddy page under lock */
2358 if (!PageBuddy(page))
2359 - continue;
2360 + goto isolate_fail;
2361
2362 /* Found a free page, break it into order-0 pages */
2363 isolated = split_free_page(page);
2364 - if (!isolated && strict)
2365 - break;
2366 total_isolated += isolated;
2367 for (i = 0; i < isolated; i++) {
2368 list_add(&page->lru, freelist);
2369 @@ -306,7 +304,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
2370 if (isolated) {
2371 blockpfn += isolated - 1;
2372 cursor += isolated - 1;
2373 + continue;
2374 }
2375 +
2376 +isolate_fail:
2377 + if (strict)
2378 + break;
2379 + else
2380 + continue;
2381 +
2382 }
2383
2384 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
2385 @@ -316,7 +322,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
2386 * pages requested were isolated. If there were any failures, 0 is
2387 * returned and CMA will fail.
2388 */
2389 - if (strict && nr_strict_required > total_isolated)
2390 + if (strict && blockpfn < end_pfn)
2391 total_isolated = 0;
2392
2393 if (locked)
2394 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2395 index 6115b2bbd6ea..f45e21ab9cea 100644
2396 --- a/mm/memcontrol.c
2397 +++ b/mm/memcontrol.c
2398 @@ -6326,9 +6326,23 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
2399 static void mem_cgroup_css_offline(struct cgroup *cont)
2400 {
2401 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2402 + struct cgroup *iter;
2403
2404 mem_cgroup_invalidate_reclaim_iterators(memcg);
2405 +
2406 + /*
2407 + * This requires that offlining is serialized. Right now that is
2408 + * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
2409 + */
2410 + rcu_read_lock();
2411 + cgroup_for_each_descendant_post(iter, cont) {
2412 + rcu_read_unlock();
2413 + mem_cgroup_reparent_charges(mem_cgroup_from_cont(iter));
2414 + rcu_read_lock();
2415 + }
2416 + rcu_read_unlock();
2417 mem_cgroup_reparent_charges(memcg);
2418 +
2419 mem_cgroup_destroy_all_caches(memcg);
2420 }
2421
2422 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2423 index 49aeab86f317..b49e8bafab17 100644
2424 --- a/net/core/neighbour.c
2425 +++ b/net/core/neighbour.c
2426 @@ -764,9 +764,6 @@ static void neigh_periodic_work(struct work_struct *work)
2427 nht = rcu_dereference_protected(tbl->nht,
2428 lockdep_is_held(&tbl->lock));
2429
2430 - if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
2431 - goto out;
2432 -
2433 /*
2434 * periodically recompute ReachableTime from random function
2435 */
2436 @@ -779,6 +776,9 @@ static void neigh_periodic_work(struct work_struct *work)
2437 neigh_rand_reach_time(p->base_reachable_time);
2438 }
2439
2440 + if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
2441 + goto out;
2442 +
2443 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
2444 np = &nht->hash_buckets[i];
2445
2446 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2447 index 1a2e249cef49..39bdb14b3214 100644
2448 --- a/net/ipv4/tcp.c
2449 +++ b/net/ipv4/tcp.c
2450 @@ -1001,7 +1001,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
2451 }
2452 }
2453
2454 -static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
2455 +static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
2456 + int *copied, size_t size)
2457 {
2458 struct tcp_sock *tp = tcp_sk(sk);
2459 int err, flags;
2460 @@ -1016,11 +1017,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
2461 if (unlikely(tp->fastopen_req == NULL))
2462 return -ENOBUFS;
2463 tp->fastopen_req->data = msg;
2464 + tp->fastopen_req->size = size;
2465
2466 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
2467 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
2468 msg->msg_namelen, flags);
2469 - *size = tp->fastopen_req->copied;
2470 + *copied = tp->fastopen_req->copied;
2471 tcp_free_fastopen_req(tp);
2472 return err;
2473 }
2474 @@ -1040,7 +1042,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2475
2476 flags = msg->msg_flags;
2477 if (flags & MSG_FASTOPEN) {
2478 - err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
2479 + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
2480 if (err == -EINPROGRESS && copied_syn > 0)
2481 goto out;
2482 else if (err)
2483 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2484 index d2df17940e07..6da3d94a114b 100644
2485 --- a/net/ipv4/tcp_output.c
2486 +++ b/net/ipv4/tcp_output.c
2487 @@ -2892,7 +2892,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2488 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2489 MAX_TCP_OPTION_SPACE;
2490
2491 - syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
2492 + space = min_t(size_t, space, fo->size);
2493 +
2494 + /* limit to order-0 allocations */
2495 + space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2496 +
2497 + syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
2498 sk->sk_allocation);
2499 if (syn_data == NULL)
2500 goto fallback;
2501 diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
2502 index c5e83fae4df4..51af9d0d019a 100644
2503 --- a/net/ipv6/exthdrs_core.c
2504 +++ b/net/ipv6/exthdrs_core.c
2505 @@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2506 found = (nexthdr == target);
2507
2508 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2509 - if (target < 0)
2510 + if (target < 0 || found)
2511 break;
2512 return -ENOENT;
2513 }
2514 diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
2515 index 3696aa28784a..2f65b022627b 100644
2516 --- a/net/ipv6/udp_offload.c
2517 +++ b/net/ipv6/udp_offload.c
2518 @@ -108,7 +108,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
2519 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
2520 fptr->nexthdr = nexthdr;
2521 fptr->reserved = 0;
2522 - ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
2523 + fptr->identification = skb_shinfo(skb)->ip6_frag_id;
2524
2525 /* Fragment the skb. ipv6 header and the remaining fields of the
2526 * fragment header are updated in ipv6_gso_segment()
2527 diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
2528 index 3b7bfc01ee36..ddda201832b3 100644
2529 --- a/net/mac80211/mesh_ps.c
2530 +++ b/net/mac80211/mesh_ps.c
2531 @@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
2532 sdata->vif.addr);
2533 nullfunc->frame_control = fc;
2534 nullfunc->duration_id = 0;
2535 + nullfunc->seq_ctrl = 0;
2536 /* no address resolution for this frame -> set addr 1 immediately */
2537 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
2538 memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
2539 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2540 index 5b4328dcbe4e..49bc2246bd86 100644
2541 --- a/net/mac80211/mlme.c
2542 +++ b/net/mac80211/mlme.c
2543 @@ -310,6 +310,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
2544 switch (vht_oper->chan_width) {
2545 case IEEE80211_VHT_CHANWIDTH_USE_HT:
2546 vht_chandef.width = chandef->width;
2547 + vht_chandef.center_freq1 = chandef->center_freq1;
2548 break;
2549 case IEEE80211_VHT_CHANWIDTH_80MHZ:
2550 vht_chandef.width = NL80211_CHAN_WIDTH_80;
2551 @@ -359,6 +360,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
2552 ret = 0;
2553
2554 out:
2555 + /*
2556 + * When tracking the current AP, don't do any further checks if the
2557 + * new chandef is identical to the one we're currently using for the
2558 + * connection. This keeps us from playing ping-pong with regulatory,
2559 + * without it the following can happen (for example):
2560 + * - connect to an AP with 80 MHz, world regdom allows 80 MHz
2561 + * - AP advertises regdom US
2562 + * - CRDA loads regdom US with 80 MHz prohibited (old database)
2563 + * - the code below detects an unsupported channel, downgrades, and
2564 + * we disconnect from the AP in the caller
2565 + * - disconnect causes CRDA to reload world regdomain and the game
2566 + * starts anew.
2567 + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
2568 + *
2569 + * It seems possible that there are still scenarios with CSA or real
2570 + * bandwidth changes where a this could happen, but those cases are
2571 + * less common and wouldn't completely prevent using the AP.
2572 + */
2573 + if (tracking &&
2574 + cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
2575 + return ret;
2576 +
2577 /* don't print the message below for VHT mismatch if VHT is disabled */
2578 if (ret & IEEE80211_STA_DISABLE_VHT)
2579 vht_chandef = *chandef;
2580 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
2581 index 11216bc13b27..0418777c361f 100644
2582 --- a/net/mac80211/sta_info.c
2583 +++ b/net/mac80211/sta_info.c
2584 @@ -339,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
2585 return NULL;
2586
2587 spin_lock_init(&sta->lock);
2588 + spin_lock_init(&sta->ps_lock);
2589 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
2590 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
2591 mutex_init(&sta->ampdu_mlme.mtx);
2592 @@ -1045,6 +1046,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
2593
2594 skb_queue_head_init(&pending);
2595
2596 + /* sync with ieee80211_tx_h_unicast_ps_buf */
2597 + spin_lock(&sta->ps_lock);
2598 /* Send all buffered frames to the station */
2599 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
2600 int count = skb_queue_len(&pending), tmp;
2601 @@ -1064,6 +1067,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
2602 }
2603
2604 ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
2605 + spin_unlock(&sta->ps_lock);
2606
2607 local->total_ps_buffered -= buffered;
2608
2609 @@ -1110,6 +1114,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
2610 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
2611 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
2612 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
2613 + nullfunc->seq_ctrl = 0;
2614
2615 skb->priority = tid;
2616 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
2617 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
2618 index adc30045f99e..3184b2b2853c 100644
2619 --- a/net/mac80211/sta_info.h
2620 +++ b/net/mac80211/sta_info.h
2621 @@ -244,6 +244,7 @@ struct sta_ampdu_mlme {
2622 * @drv_unblock_wk: used for driver PS unblocking
2623 * @listen_interval: listen interval of this station, when we're acting as AP
2624 * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
2625 + * @ps_lock: used for powersave (when mac80211 is the AP) related locking
2626 * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
2627 * when it leaves power saving state or polls
2628 * @tx_filtered: buffers (per AC) of frames we already tried to
2629 @@ -324,10 +325,8 @@ struct sta_info {
2630 /* use the accessors defined below */
2631 unsigned long _flags;
2632
2633 - /*
2634 - * STA powersave frame queues, no more than the internal
2635 - * locking required.
2636 - */
2637 + /* STA powersave lock and frame queues */
2638 + spinlock_t ps_lock;
2639 struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
2640 struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
2641 unsigned long driver_buffered_tids;
2642 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
2643 index fe9d6e7b904b..6d5791d735f3 100644
2644 --- a/net/mac80211/tx.c
2645 +++ b/net/mac80211/tx.c
2646 @@ -461,6 +461,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
2647 sta->sta.addr, sta->sta.aid, ac);
2648 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
2649 purge_old_ps_buffers(tx->local);
2650 +
2651 + /* sync with ieee80211_sta_ps_deliver_wakeup */
2652 + spin_lock(&sta->ps_lock);
2653 + /*
2654 + * STA woke up the meantime and all the frames on ps_tx_buf have
2655 + * been queued to pending queue. No reordering can happen, go
2656 + * ahead and Tx the packet.
2657 + */
2658 + if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
2659 + !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
2660 + spin_unlock(&sta->ps_lock);
2661 + return TX_CONTINUE;
2662 + }
2663 +
2664 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
2665 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
2666 ps_dbg(tx->sdata,
2667 @@ -474,6 +488,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
2668 info->control.vif = &tx->sdata->vif;
2669 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2670 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
2671 + spin_unlock(&sta->ps_lock);
2672
2673 if (!timer_pending(&local->sta_cleanup))
2674 mod_timer(&local->sta_cleanup,
2675 diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
2676 index afba19cb6f87..a282fddf8b00 100644
2677 --- a/net/mac80211/wme.c
2678 +++ b/net/mac80211/wme.c
2679 @@ -153,6 +153,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
2680 return IEEE80211_AC_BE;
2681 }
2682
2683 + if (skb->protocol == sdata->control_port_protocol) {
2684 + skb->priority = 7;
2685 + return ieee80211_downgrade_queue(sdata, skb);
2686 + }
2687 +
2688 /* use the data classifier to determine what 802.1d tag the
2689 * data frame has */
2690 skb->priority = cfg80211_classify8021d(skb);
2691 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2692 index de1a0138317f..7ceb25ba85b8 100644
2693 --- a/net/sctp/sm_statefuns.c
2694 +++ b/net/sctp/sm_statefuns.c
2695 @@ -765,6 +765,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
2696 struct sctp_chunk auth;
2697 sctp_ierror_t ret;
2698
2699 + /* Make sure that we and the peer are AUTH capable */
2700 + if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
2701 + kfree_skb(chunk->auth_chunk);
2702 + sctp_association_free(new_asoc);
2703 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2704 + }
2705 +
2706 /* set-up our fake chunk so that we can process it */
2707 auth.skb = chunk->auth_chunk;
2708 auth.asoc = chunk->asoc;
2709 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2710 index 3ca7927520b0..94d334781554 100644
2711 --- a/net/unix/af_unix.c
2712 +++ b/net/unix/af_unix.c
2713 @@ -160,9 +160,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
2714
2715 static inline unsigned int unix_hash_fold(__wsum n)
2716 {
2717 - unsigned int hash = (__force unsigned int)n;
2718 + unsigned int hash = (__force unsigned int)csum_fold(n);
2719
2720 - hash ^= hash>>16;
2721 hash ^= hash>>8;
2722 return hash&(UNIX_HASH_SIZE-1);
2723 }
2724 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
2725 index 5a6527668c07..290e09825b82 100644
2726 --- a/sound/pci/hda/patch_analog.c
2727 +++ b/sound/pci/hda/patch_analog.c
2728 @@ -3667,6 +3667,7 @@ static int ad1884_parse_auto_config(struct hda_codec *codec)
2729 spec = codec->spec;
2730
2731 spec->gen.mixer_nid = 0x20;
2732 + spec->gen.mixer_merge_nid = 0x21;
2733 spec->gen.beep_nid = 0x10;
2734 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
2735
2736 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2737 index e0bdcb3ecf0e..34548589f419 100644
2738 --- a/sound/pci/hda/patch_realtek.c
2739 +++ b/sound/pci/hda/patch_realtek.c
2740 @@ -3648,6 +3648,7 @@ static const struct hda_fixup alc269_fixups[] = {
2741 };
2742
2743 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2744 + SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
2745 SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
2746 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
2747 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2748 diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
2749 index 77acd790ea47..eb7ad7706205 100644
2750 --- a/sound/pci/oxygen/xonar_dg.c
2751 +++ b/sound/pci/oxygen/xonar_dg.c
2752 @@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
2753 oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
2754 data->output_sel == 1 ? GPIO_HP_REAR : 0,
2755 GPIO_HP_REAR);
2756 + oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
2757 + data->output_sel == 0 ?
2758 + OXYGEN_PLAY_MUTE01 :
2759 + OXYGEN_PLAY_MUTE23 |
2760 + OXYGEN_PLAY_MUTE45 |
2761 + OXYGEN_PLAY_MUTE67,
2762 + OXYGEN_PLAY_MUTE01 |
2763 + OXYGEN_PLAY_MUTE23 |
2764 + OXYGEN_PLAY_MUTE45 |
2765 + OXYGEN_PLAY_MUTE67);
2766 }
2767 mutex_unlock(&chip->mutex);
2768 return changed;
2769 @@ -596,7 +606,7 @@ struct oxygen_model model_xonar_dg = {
2770 .model_data_size = sizeof(struct dg),
2771 .device_config = PLAYBACK_0_TO_I2S |
2772 PLAYBACK_1_TO_SPDIF |
2773 - CAPTURE_0_FROM_I2S_2 |
2774 + CAPTURE_0_FROM_I2S_1 |
2775 CAPTURE_1_FROM_SPDIF,
2776 .dac_channels_pcm = 6,
2777 .dac_channels_mixer = 0,
2778 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2779 index 95558ef4a7a0..be4db47cb2d9 100644
2780 --- a/sound/usb/mixer.c
2781 +++ b/sound/usb/mixer.c
2782 @@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
2783 }
2784 break;
2785
2786 + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
2787 case USB_ID(0x046d, 0x0808):
2788 case USB_ID(0x046d, 0x0809):
2789 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */

Properties

Name Value
svn:mime-type application/x-xz