Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0109-3.10.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 95711 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Makefile b/Makefile
2     index 4b31d62..b119684 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 9
9     +SUBLEVEL = 10
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
14     index 6179de7..2046a89 100644
15     --- a/arch/arc/include/asm/ptrace.h
16     +++ b/arch/arc/include/asm/ptrace.h
17     @@ -52,12 +52,14 @@ struct pt_regs {
18    
19     /*to distinguish bet excp, syscall, irq */
20     union {
21     + struct {
22     #ifdef CONFIG_CPU_BIG_ENDIAN
23     /* so that assembly code is same for LE/BE */
24     unsigned long orig_r8:16, event:16;
25     #else
26     unsigned long event:16, orig_r8:16;
27     #endif
28     + };
29     long orig_r8_word;
30     };
31     };
32     diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
33     index 33ab304..29de098 100644
34     --- a/arch/arc/include/asm/syscall.h
35     +++ b/arch/arc/include/asm/syscall.h
36     @@ -18,7 +18,7 @@ static inline long
37     syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
38     {
39     if (user_mode(regs) && in_syscall(regs))
40     - return regs->orig_r8;
41     + return regs->r8;
42     else
43     return -1;
44     }
45     @@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
46     static inline void
47     syscall_rollback(struct task_struct *task, struct pt_regs *regs)
48     {
49     - /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
50     - regs->r8 = regs->orig_r8;
51     + regs->r0 = regs->orig_r0;
52     }
53    
54     static inline long
55     diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
56     index 0c6d664..6dbe359 100644
57     --- a/arch/arc/kernel/entry.S
58     +++ b/arch/arc/kernel/entry.S
59     @@ -498,7 +498,7 @@ tracesys_exit:
60     trap_with_param:
61    
62     ; stop_pc info by gdb needs this info
63     - stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
64     + st orig_r8_IS_BRKPT, [sp, PT_orig_r8]
65    
66     mov r0, r12
67     lr r1, [efa]
68     @@ -723,7 +723,7 @@ not_exception:
69     ; things to what they were, before returning from L2 context
70     ;----------------------------------------------------------------
71    
72     - ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
73     + ld r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
74     brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
75    
76     ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
77     diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
78     index 99c1047..9c548c7 100644
79     --- a/arch/arc/lib/strchr-700.S
80     +++ b/arch/arc/lib/strchr-700.S
81     @@ -39,9 +39,18 @@ ARC_ENTRY strchr
82     ld.a r2,[r0,4]
83     sub r12,r6,r7
84     bic r12,r12,r6
85     +#ifdef __LITTLE_ENDIAN__
86     and r7,r12,r4
87     breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
88     b .Lfound_char ; Likewise this one.
89     +#else
90     + and r12,r12,r4
91     + breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
92     + lsr_s r12,r12,7
93     + bic r2,r7,r6
94     + b.d .Lfound_char_b
95     + and_s r2,r2,r12
96     +#endif
97     ; /* We require this code address to be unaligned for speed... */
98     .Laligned:
99     ld_s r2,[r0]
100     @@ -95,6 +104,7 @@ ARC_ENTRY strchr
101     lsr r7,r7,7
102    
103     bic r2,r7,r6
104     +.Lfound_char_b:
105     norm r2,r2
106     sub_s r0,r0,4
107     asr_s r2,r2,3
108     diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
109     index d30e48b..28ba798 100644
110     --- a/arch/arm/boot/dts/at91sam9n12ek.dts
111     +++ b/arch/arm/boot/dts/at91sam9n12ek.dts
112     @@ -14,11 +14,11 @@
113     compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
114    
115     chosen {
116     - bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
117     + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
118     };
119    
120     memory {
121     - reg = <0x20000000 0x10000000>;
122     + reg = <0x20000000 0x8000000>;
123     };
124    
125     clocks {
126     diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
127     index 1145ac3..b5833d1f 100644
128     --- a/arch/arm/boot/dts/at91sam9x5.dtsi
129     +++ b/arch/arm/boot/dts/at91sam9x5.dtsi
130     @@ -643,7 +643,7 @@
131     };
132    
133     rtc@fffffeb0 {
134     - compatible = "atmel,at91rm9200-rtc";
135     + compatible = "atmel,at91sam9x5-rtc";
136     reg = <0xfffffeb0 0x40>;
137     interrupts = <1 4 7>;
138     status = "disabled";
139     diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
140     index dff4ddc..139e42d 100644
141     --- a/arch/arm/mach-davinci/board-dm355-leopard.c
142     +++ b/arch/arm/mach-davinci/board-dm355-leopard.c
143     @@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
144     .parts = davinci_nand_partitions,
145     .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
146     .ecc_mode = NAND_ECC_HW_SYNDROME,
147     + .ecc_bits = 4,
148     .bbt_options = NAND_BBT_USE_FLASH,
149     };
150    
151     diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
152     index a33686a..fa4bfaf 100644
153     --- a/arch/arm/mach-davinci/board-dm644x-evm.c
154     +++ b/arch/arm/mach-davinci/board-dm644x-evm.c
155     @@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
156     .parts = davinci_evm_nandflash_partition,
157     .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
158     .ecc_mode = NAND_ECC_HW,
159     + .ecc_bits = 1,
160     .bbt_options = NAND_BBT_USE_FLASH,
161     .timing = &davinci_evm_nandflash_timing,
162     };
163     diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
164     index fbb8e5a..0c005e8 100644
165     --- a/arch/arm/mach-davinci/board-dm646x-evm.c
166     +++ b/arch/arm/mach-davinci/board-dm646x-evm.c
167     @@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
168     .parts = davinci_nand_partitions,
169     .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
170     .ecc_mode = NAND_ECC_HW,
171     + .ecc_bits = 1,
172     .options = 0,
173     };
174    
175     diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
176     index 2bc112a..808233b 100644
177     --- a/arch/arm/mach-davinci/board-neuros-osd2.c
178     +++ b/arch/arm/mach-davinci/board-neuros-osd2.c
179     @@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
180     .parts = davinci_ntosd2_nandflash_partition,
181     .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
182     .ecc_mode = NAND_ECC_HW,
183     + .ecc_bits = 1,
184     .bbt_options = NAND_BBT_USE_FLASH,
185     };
186    
187     diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
188     index 2950082..08c9fe9 100644
189     --- a/arch/arm/mm/Kconfig
190     +++ b/arch/arm/mm/Kconfig
191     @@ -789,15 +789,18 @@ config KUSER_HELPERS
192     the CPU type fitted to the system. This permits binaries to be
193     run on ARMv4 through to ARMv7 without modification.
194    
195     + See Documentation/arm/kernel_user_helpers.txt for details.
196     +
197     However, the fixed address nature of these helpers can be used
198     by ROP (return orientated programming) authors when creating
199     exploits.
200    
201     If all of the binaries and libraries which run on your platform
202     are built specifically for your platform, and make no use of
203     - these helpers, then you can turn this option off. However,
204     - when such an binary or library is run, it will receive a SIGILL
205     - signal, which will terminate the program.
206     + these helpers, then you can turn this option off to hinder
207     + such exploits. However, in that case, if a binary or library
208     + relying on those helpers is run, it will receive a SIGILL signal,
209     + which will terminate the program.
210    
211     Say N here only if you are absolutely certain that you do not
212     need these helpers; otherwise, the safe option is to say Y.
213     diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
214     index 9ba33c4..12e6ccb 100644
215     --- a/arch/arm64/kernel/perf_event.c
216     +++ b/arch/arm64/kernel/perf_event.c
217     @@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
218     static int
219     armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
220     {
221     - int mapping = (*event_map)[config];
222     + int mapping;
223     +
224     + if (config >= PERF_COUNT_HW_MAX)
225     + return -EINVAL;
226     +
227     + mapping = (*event_map)[config];
228     return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
229     }
230    
231     @@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
232     struct hw_perf_event fake_event = event->hw;
233     struct pmu *leader_pmu = event->group_leader->pmu;
234    
235     + if (is_software_event(event))
236     + return 1;
237     +
238     if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
239     return 1;
240    
241     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
242     index c1c7c68..698fb82 100644
243     --- a/arch/s390/kvm/kvm-s390.c
244     +++ b/arch/s390/kvm/kvm-s390.c
245     @@ -622,14 +622,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
246     kvm_s390_deliver_pending_interrupts(vcpu);
247    
248     vcpu->arch.sie_block->icptcode = 0;
249     - preempt_disable();
250     - kvm_guest_enter();
251     - preempt_enable();
252     VCPU_EVENT(vcpu, 6, "entering sie flags %x",
253     atomic_read(&vcpu->arch.sie_block->cpuflags));
254     trace_kvm_s390_sie_enter(vcpu,
255     atomic_read(&vcpu->arch.sie_block->cpuflags));
256     +
257     + /*
258     + * As PF_VCPU will be used in fault handler, between guest_enter
259     + * and guest_exit should be no uaccess.
260     + */
261     + preempt_disable();
262     + kvm_guest_enter();
263     + preempt_enable();
264     rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
265     + kvm_guest_exit();
266     +
267     + VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
268     + vcpu->arch.sie_block->icptcode);
269     + trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
270     +
271     if (rc) {
272     if (kvm_is_ucontrol(vcpu->kvm)) {
273     rc = SIE_INTERCEPT_UCONTROL;
274     @@ -639,10 +650,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
275     rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
276     }
277     }
278     - VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
279     - vcpu->arch.sie_block->icptcode);
280     - trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
281     - kvm_guest_exit();
282    
283     memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
284     return rc;
285     diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
286     index 653668d..4a8cb8d 100644
287     --- a/arch/x86/include/asm/bootparam_utils.h
288     +++ b/arch/x86/include/asm/bootparam_utils.h
289     @@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
290     */
291     if (boot_params->sentinel) {
292     /* fields in boot_params are left uninitialized, clear them */
293     - memset(&boot_params->olpc_ofw_header, 0,
294     + memset(&boot_params->ext_ramdisk_image, 0,
295     (char *)&boot_params->efi_info -
296     - (char *)&boot_params->olpc_ofw_header);
297     + (char *)&boot_params->ext_ramdisk_image);
298     memset(&boot_params->kbd_status, 0,
299     (char *)&boot_params->hdr -
300     (char *)&boot_params->kbd_status);
301     diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
302     index 48f8375..30277e2 100644
303     --- a/arch/x86/kernel/sys_x86_64.c
304     +++ b/arch/x86/kernel/sys_x86_64.c
305     @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
306     *begin = new_begin;
307     }
308     } else {
309     - *begin = mmap_legacy_base();
310     + *begin = current->mm->mmap_legacy_base;
311     *end = TASK_SIZE;
312     }
313     }
314     diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
315     index c1af323..5c1ae28 100644
316     --- a/arch/x86/mm/mmap.c
317     +++ b/arch/x86/mm/mmap.c
318     @@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
319     * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
320     * does, but not when emulating X86_32
321     */
322     -unsigned long mmap_legacy_base(void)
323     +static unsigned long mmap_legacy_base(void)
324     {
325     if (mmap_is_ia32())
326     return TASK_UNMAPPED_BASE;
327     @@ -112,12 +112,14 @@ unsigned long mmap_legacy_base(void)
328     */
329     void arch_pick_mmap_layout(struct mm_struct *mm)
330     {
331     + mm->mmap_legacy_base = mmap_legacy_base();
332     + mm->mmap_base = mmap_base();
333     +
334     if (mmap_is_legacy()) {
335     - mm->mmap_base = mmap_legacy_base();
336     + mm->mmap_base = mm->mmap_legacy_base;
337     mm->get_unmapped_area = arch_get_unmapped_area;
338     mm->unmap_area = arch_unmap_area;
339     } else {
340     - mm->mmap_base = mmap_base();
341     mm->get_unmapped_area = arch_get_unmapped_area_topdown;
342     mm->unmap_area = arch_unmap_area_topdown;
343     }
344     diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
345     index 94eac5c..0a9fb7a 100644
346     --- a/arch/x86/xen/setup.c
347     +++ b/arch/x86/xen/setup.c
348     @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
349     e820_add_region(start, end - start, type);
350     }
351    
352     +void xen_ignore_unusable(struct e820entry *list, size_t map_size)
353     +{
354     + struct e820entry *entry;
355     + unsigned int i;
356     +
357     + for (i = 0, entry = list; i < map_size; i++, entry++) {
358     + if (entry->type == E820_UNUSABLE)
359     + entry->type = E820_RAM;
360     + }
361     +}
362     +
363     /**
364     * machine_specific_memory_setup - Hook for machine specific memory setup.
365     **/
366     @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
367     }
368     BUG_ON(rc);
369    
370     + /*
371     + * Xen won't allow a 1:1 mapping to be created to UNUSABLE
372     + * regions, so if we're using the machine memory map leave the
373     + * region as RAM as it is in the pseudo-physical map.
374     + *
375     + * UNUSABLE regions in domUs are not handled and will need
376     + * a patch in the future.
377     + */
378     + if (xen_initial_domain())
379     + xen_ignore_unusable(map, memmap.nr_entries);
380     +
381     /* Make sure the Xen-supplied memory map is well-ordered. */
382     sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
383    
384     diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
385     index d99cae8..a1e58e1 100644
386     --- a/arch/x86/xen/smp.c
387     +++ b/arch/x86/xen/smp.c
388     @@ -667,8 +667,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
389     static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
390     {
391     int rc;
392     - rc = native_cpu_up(cpu, tidle);
393     - WARN_ON (xen_smp_intr_init(cpu));
394     + /*
395     + * xen_smp_intr_init() needs to run before native_cpu_up()
396     + * so that IPI vectors are set up on the booting CPU before
397     + * it is marked online in native_cpu_up().
398     + */
399     + rc = xen_smp_intr_init(cpu);
400     + WARN_ON(rc);
401     + if (!rc)
402     + rc = native_cpu_up(cpu, tidle);
403     return rc;
404     }
405    
406     diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
407     index 40a84cc..2384120 100644
408     --- a/drivers/acpi/glue.c
409     +++ b/drivers/acpi/glue.c
410     @@ -78,32 +78,99 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
411     return ret;
412     }
413    
414     -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
415     - void *addr_p, void **ret_p)
416     +static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
417     + void *not_used, void **ret_p)
418     {
419     - unsigned long long addr;
420     - acpi_status status;
421     + struct acpi_device *adev = NULL;
422    
423     - status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
424     - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
425     + acpi_bus_get_device(handle, &adev);
426     + if (adev) {
427     *ret_p = handle;
428     return AE_CTRL_TERMINATE;
429     }
430     return AE_OK;
431     }
432    
433     -acpi_handle acpi_get_child(acpi_handle parent, u64 address)
434     +static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
435     {
436     - void *ret = NULL;
437     + unsigned long long sta;
438     + acpi_status status;
439    
440     - if (!parent)
441     - return NULL;
442     + status = acpi_bus_get_status_handle(handle, &sta);
443     + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
444     + return false;
445     +
446     + if (is_bridge) {
447     + void *test = NULL;
448     +
449     + /* Check if this object has at least one child device. */
450     + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
451     + acpi_dev_present, NULL, NULL, &test);
452     + return !!test;
453     + }
454     + return true;
455     +}
456     +
457     +struct find_child_context {
458     + u64 addr;
459     + bool is_bridge;
460     + acpi_handle ret;
461     + bool ret_checked;
462     +};
463     +
464     +static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
465     + void *data, void **not_used)
466     +{
467     + struct find_child_context *context = data;
468     + unsigned long long addr;
469     + acpi_status status;
470    
471     - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
472     - do_acpi_find_child, &address, &ret);
473     - return (acpi_handle)ret;
474     + status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
475     + if (ACPI_FAILURE(status) || addr != context->addr)
476     + return AE_OK;
477     +
478     + if (!context->ret) {
479     + /* This is the first matching object. Save its handle. */
480     + context->ret = handle;
481     + return AE_OK;
482     + }
483     + /*
484     + * There is more than one matching object with the same _ADR value.
485     + * That really is unexpected, so we are kind of beyond the scope of the
486     + * spec here. We have to choose which one to return, though.
487     + *
488     + * First, check if the previously found object is good enough and return
489     + * its handle if so. Second, check the same for the object that we've
490     + * just found.
491     + */
492     + if (!context->ret_checked) {
493     + if (acpi_extra_checks_passed(context->ret, context->is_bridge))
494     + return AE_CTRL_TERMINATE;
495     + else
496     + context->ret_checked = true;
497     + }
498     + if (acpi_extra_checks_passed(handle, context->is_bridge)) {
499     + context->ret = handle;
500     + return AE_CTRL_TERMINATE;
501     + }
502     + return AE_OK;
503     +}
504     +
505     +acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
506     +{
507     + if (parent) {
508     + struct find_child_context context = {
509     + .addr = addr,
510     + .is_bridge = is_bridge,
511     + };
512     +
513     + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
514     + NULL, &context, NULL);
515     + return context.ret;
516     + }
517     + return NULL;
518     }
519     -EXPORT_SYMBOL(acpi_get_child);
520     +EXPORT_SYMBOL_GPL(acpi_find_child);
521    
522     static int acpi_bind_one(struct device *dev, acpi_handle handle)
523     {
524     diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
525     index 1c41722..20fd337 100644
526     --- a/drivers/ata/libata-pmp.c
527     +++ b/drivers/ata/libata-pmp.c
528     @@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
529    
530     /* Disable sending Early R_OK.
531     * With "cached read" HDD testing and multiple ports busy on a SATA
532     - * host controller, 3726 PMP will very rarely drop a deferred
533     + * host controller, 3x26 PMP will very rarely drop a deferred
534     * R_OK that was intended for the host. Symptom will be all
535     * 5 drives under test will timeout, get reset, and recover.
536     */
537     - if (vendor == 0x1095 && devid == 0x3726) {
538     + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
539     u32 reg;
540    
541     err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
542     if (err_mask) {
543     rc = -EIO;
544     - reason = "failed to read Sil3726 Private Register";
545     + reason = "failed to read Sil3x26 Private Register";
546     goto fail;
547     }
548     reg &= ~0x1;
549     err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
550     if (err_mask) {
551     rc = -EIO;
552     - reason = "failed to write Sil3726 Private Register";
553     + reason = "failed to write Sil3x26 Private Register";
554     goto fail;
555     }
556     }
557     @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
558     u16 devid = sata_pmp_gscr_devid(gscr);
559     struct ata_link *link;
560    
561     - if (vendor == 0x1095 && devid == 0x3726) {
562     - /* sil3726 quirks */
563     + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
564     + /* sil3x26 quirks */
565     ata_for_each_link(link, ap, EDGE) {
566     /* link reports offline after LPM */
567     link->flags |= ATA_LFLAG_NO_LPM;
568     diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
569     index d40e403..8401061 100644
570     --- a/drivers/ata/sata_fsl.c
571     +++ b/drivers/ata/sata_fsl.c
572     @@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
573     {
574     struct sata_fsl_host_priv *host_priv = host->private_data;
575     void __iomem *hcr_base = host_priv->hcr_base;
576     + unsigned long flags;
577    
578     if (count > ICC_MAX_INT_COUNT_THRESHOLD)
579     count = ICC_MAX_INT_COUNT_THRESHOLD;
580     @@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
581     (count > ICC_MIN_INT_COUNT_THRESHOLD))
582     ticks = ICC_SAFE_INT_TICKS;
583    
584     - spin_lock(&host->lock);
585     + spin_lock_irqsave(&host->lock, flags);
586     iowrite32((count << 24 | ticks), hcr_base + ICC);
587    
588     intr_coalescing_count = count;
589     intr_coalescing_ticks = ticks;
590     - spin_unlock(&host->lock);
591     + spin_unlock_irqrestore(&host->lock, flags);
592    
593     DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
594     intr_coalescing_count, intr_coalescing_ticks);
595     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
596     index 80b0a66..01f6c2c 100644
597     --- a/drivers/gpu/drm/i915/i915_reg.h
598     +++ b/drivers/gpu/drm/i915/i915_reg.h
599     @@ -617,6 +617,8 @@
600     will not assert AGPBUSY# and will only
601     be delivered when out of C3. */
602     #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
603     +#define INSTPM_TLB_INVALIDATE (1<<9)
604     +#define INSTPM_SYNC_FLUSH (1<<5)
605     #define ACTHD 0x020c8
606     #define FW_BLC 0x020d8
607     #define FW_BLC2 0x020dc
608     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
609     index 1424f20..48fe23e 100644
610     --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
611     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
612     @@ -907,6 +907,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
613    
614     I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
615     POSTING_READ(mmio);
616     +
617     + /* Flush the TLB for this page */
618     + if (INTEL_INFO(dev)->gen >= 6) {
619     + u32 reg = RING_INSTPM(ring->mmio_base);
620     + I915_WRITE(reg,
621     + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
622     + INSTPM_SYNC_FLUSH));
623     + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
624     + 1000))
625     + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
626     + ring->name);
627     + }
628     }
629    
630     static int
631     diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
632     index bdd9d56..d4ff48c 100644
633     --- a/drivers/gpu/drm/radeon/radeon.h
634     +++ b/drivers/gpu/drm/radeon/radeon.h
635     @@ -1764,7 +1764,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
636     WREG32(reg, tmp_); \
637     } while (0)
638     #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
639     -#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
640     +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
641     #define WREG32_PLL_P(reg, val, mask) \
642     do { \
643     uint32_t tmp_ = RREG32_PLL(reg); \
644     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
645     index 97002a0..f3ccf6d 100644
646     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
647     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
648     @@ -359,6 +359,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
649     return -EINVAL;
650     }
651    
652     + if (bo->tbo.sync_obj) {
653     + r = radeon_fence_wait(bo->tbo.sync_obj, false);
654     + if (r) {
655     + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
656     + return r;
657     + }
658     + }
659     +
660     r = radeon_bo_kmap(bo, &ptr);
661     if (r)
662     return r;
663     diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
664     index bcc68ec..f5e92cf 100644
665     --- a/drivers/gpu/drm/radeon/rv770.c
666     +++ b/drivers/gpu/drm/radeon/rv770.c
667     @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
668     (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
669     radeon_program_register_sequence(rdev,
670     rv730_golden_registers,
671     - (const u32)ARRAY_SIZE(rv770_golden_registers));
672     + (const u32)ARRAY_SIZE(rv730_golden_registers));
673     radeon_program_register_sequence(rdev,
674     rv730_mgcg_init,
675     - (const u32)ARRAY_SIZE(rv770_mgcg_init));
676     + (const u32)ARRAY_SIZE(rv730_mgcg_init));
677     break;
678     case CHIP_RV710:
679     radeon_program_register_sequence(rdev,
680     @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
681     (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
682     radeon_program_register_sequence(rdev,
683     rv710_golden_registers,
684     - (const u32)ARRAY_SIZE(rv770_golden_registers));
685     + (const u32)ARRAY_SIZE(rv710_golden_registers));
686     radeon_program_register_sequence(rdev,
687     rv710_mgcg_init,
688     - (const u32)ARRAY_SIZE(rv770_mgcg_init));
689     + (const u32)ARRAY_SIZE(rv710_mgcg_init));
690     break;
691     case CHIP_RV740:
692     radeon_program_register_sequence(rdev,
693     rv740_golden_registers,
694     - (const u32)ARRAY_SIZE(rv770_golden_registers));
695     + (const u32)ARRAY_SIZE(rv740_golden_registers));
696     radeon_program_register_sequence(rdev,
697     rv740_mgcg_init,
698     - (const u32)ARRAY_SIZE(rv770_mgcg_init));
699     + (const u32)ARRAY_SIZE(rv740_mgcg_init));
700     break;
701     default:
702     break;
703     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
704     index 7b687a6..833c590 100644
705     --- a/drivers/md/bcache/btree.c
706     +++ b/drivers/md/bcache/btree.c
707     @@ -326,10 +326,25 @@ static void do_btree_write(struct btree *b)
708     i->csum = btree_csum_set(b, i);
709    
710     btree_bio_init(b);
711     - b->bio->bi_rw = REQ_META|WRITE_SYNC;
712     + b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
713     b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
714     bch_bio_map(b->bio, i);
715    
716     + /*
717     + * If we're appending to a leaf node, we don't technically need FUA -
718     + * this write just needs to be persisted before the next journal write,
719     + * which will be marked FLUSH|FUA.
720     + *
721     + * Similarly if we're writing a new btree root - the pointer is going to
722     + * be in the next journal entry.
723     + *
724     + * But if we're writing a new btree node (that isn't a root) or
725     + * appending to a non leaf btree node, we need either FUA or a flush
726     + * when we write the parent with the new pointer. FUA is cheaper than a
727     + * flush, and writes appending to leaf nodes aren't blocking anything so
728     + * just make all btree node writes FUA to keep things sane.
729     + */
730     +
731     bkey_copy(&k.key, &b->key);
732     SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
733    
734     @@ -2142,6 +2157,9 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
735     void bch_btree_set_root(struct btree *b)
736     {
737     unsigned i;
738     + struct closure cl;
739     +
740     + closure_init_stack(&cl);
741    
742     BUG_ON(!b->written);
743    
744     @@ -2155,8 +2173,9 @@ void bch_btree_set_root(struct btree *b)
745     b->c->root = b;
746     __bkey_put(b->c, &b->key);
747    
748     - bch_journal_meta(b->c, NULL);
749     + bch_journal_meta(b->c, &cl);
750     pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
751     + closure_sync(&cl);
752     }
753    
754     /* Cache lookup */
755     diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
756     index 48efd4d..d285cd4 100644
757     --- a/drivers/md/bcache/io.c
758     +++ b/drivers/md/bcache/io.c
759     @@ -97,6 +97,8 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
760    
761     if (bio->bi_rw & REQ_DISCARD) {
762     ret = bio_alloc_bioset(gfp, 1, bs);
763     + if (!ret)
764     + return NULL;
765     idx = 0;
766     goto out;
767     }
768     diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
769     index 8a54d3b..b49abb2 100644
770     --- a/drivers/md/bcache/journal.c
771     +++ b/drivers/md/bcache/journal.c
772     @@ -622,7 +622,7 @@ static void journal_write_unlocked(struct closure *cl)
773     bio_reset(bio);
774     bio->bi_sector = PTR_OFFSET(k, i);
775     bio->bi_bdev = ca->bdev;
776     - bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
777     + bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
778     bio->bi_size = sectors << 9;
779    
780     bio->bi_end_io = journal_write_endio;
781     diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
782     index 2f36743..afb9a99 100644
783     --- a/drivers/md/bcache/request.c
784     +++ b/drivers/md/bcache/request.c
785     @@ -1053,9 +1053,20 @@ static void request_write(struct cached_dev *dc, struct search *s)
786     trace_bcache_writethrough(s->orig_bio);
787     closure_bio_submit(bio, cl, s->d);
788     } else {
789     - s->op.cache_bio = bio;
790     trace_bcache_writeback(s->orig_bio);
791     bch_writeback_add(dc, bio_sectors(bio));
792     +
793     + if (s->op.flush_journal) {
794     + /* Also need to send a flush to the backing device */
795     + s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
796     + dc->disk.bio_split);
797     +
798     + bio->bi_size = 0;
799     + bio->bi_vcnt = 0;
800     + closure_bio_submit(bio, cl, s->d);
801     + } else {
802     + s->op.cache_bio = bio;
803     + }
804     }
805     out:
806     closure_call(&s->op.cl, bch_insert_data, NULL, cl);
807     diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
808     index 822170f..700fe55 100644
809     --- a/drivers/misc/mei/hw-me.c
810     +++ b/drivers/misc/mei/hw-me.c
811     @@ -238,14 +238,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
812     if (mei_me_hw_is_ready(dev))
813     return 0;
814    
815     + dev->recvd_hw_ready = false;
816     mutex_unlock(&dev->device_lock);
817     err = wait_event_interruptible_timeout(dev->wait_hw_ready,
818     - dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT);
819     + dev->recvd_hw_ready,
820     + mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
821     mutex_lock(&dev->device_lock);
822     if (!err && !dev->recvd_hw_ready) {
823     + if (!err)
824     + err = -ETIMEDOUT;
825     dev_err(&dev->pdev->dev,
826     - "wait hw ready failed. status = 0x%x\n", err);
827     - return -ETIMEDOUT;
828     + "wait hw ready failed. status = %d\n", err);
829     + return err;
830     }
831    
832     dev->recvd_hw_ready = false;
833     @@ -482,7 +486,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
834     /* check if ME wants a reset */
835     if (!mei_hw_is_ready(dev) &&
836     dev->dev_state != MEI_DEV_RESETTING &&
837     - dev->dev_state != MEI_DEV_INITIALIZING) {
838     + dev->dev_state != MEI_DEV_INITIALIZING &&
839     + dev->dev_state != MEI_DEV_POWER_DOWN &&
840     + dev->dev_state != MEI_DEV_POWER_UP) {
841     dev_dbg(&dev->pdev->dev, "FW not ready.\n");
842     mei_reset(dev, 1);
843     mutex_unlock(&dev->device_lock);
844     diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
845     index f580d30..6eec689 100644
846     --- a/drivers/misc/mei/init.c
847     +++ b/drivers/misc/mei/init.c
848     @@ -143,7 +143,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
849    
850     dev->hbm_state = MEI_HBM_IDLE;
851    
852     - if (dev->dev_state != MEI_DEV_INITIALIZING) {
853     + if (dev->dev_state != MEI_DEV_INITIALIZING &&
854     + dev->dev_state != MEI_DEV_POWER_UP) {
855     if (dev->dev_state != MEI_DEV_DISABLED &&
856     dev->dev_state != MEI_DEV_POWER_DOWN)
857     dev->dev_state = MEI_DEV_RESETTING;
858     diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
859     index ac07473..e509030 100644
860     --- a/drivers/net/wireless/hostap/hostap_ioctl.c
861     +++ b/drivers/net/wireless/hostap/hostap_ioctl.c
862     @@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
863    
864     data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
865    
866     - memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
867     + memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
868     data->flags = 1; /* has quality information */
869     - memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
870     + memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
871     sizeof(struct iw_quality) * data->length);
872    
873     kfree(addr);
874     diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
875     index cab23af..e04f3da 100644
876     --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
877     +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
878     @@ -1059,7 +1059,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
879     if (test_bit(STATUS_EXIT_PENDING, &priv->status))
880     return;
881    
882     - if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
883     + if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
884     + return;
885     +
886     + if (ctx->vif)
887     ieee80211_chswitch_done(ctx->vif, is_success);
888     }
889    
890     diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
891     index 50263e8..dc94d44 100644
892     --- a/drivers/net/wireless/iwlwifi/iwl-7000.c
893     +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
894     @@ -67,16 +67,16 @@
895     #include "iwl-agn-hw.h"
896    
897     /* Highest firmware API version supported */
898     -#define IWL7260_UCODE_API_MAX 6
899     -#define IWL3160_UCODE_API_MAX 6
900     +#define IWL7260_UCODE_API_MAX 7
901     +#define IWL3160_UCODE_API_MAX 7
902    
903     /* Oldest version we won't warn about */
904     -#define IWL7260_UCODE_API_OK 6
905     -#define IWL3160_UCODE_API_OK 6
906     +#define IWL7260_UCODE_API_OK 7
907     +#define IWL3160_UCODE_API_OK 7
908    
909     /* Lowest firmware API version supported */
910     -#define IWL7260_UCODE_API_MIN 6
911     -#define IWL3160_UCODE_API_MIN 6
912     +#define IWL7260_UCODE_API_MIN 7
913     +#define IWL3160_UCODE_API_MIN 7
914    
915     /* NVM versions */
916     #define IWL7260_NVM_VERSION 0x0a1d
917     diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
918     index 51e015d..6f8b2c1 100644
919     --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
920     +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
921     @@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags {
922     * struct iwl_d3_manager_config - D3 manager configuration command
923     * @min_sleep_time: minimum sleep time (in usec)
924     * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
925     + * @wakeup_host_timer: force wakeup after this many seconds
926     *
927     * The structure is used for the D3_CONFIG_CMD command.
928     */
929     struct iwl_d3_manager_config {
930     __le32 min_sleep_time;
931     __le32 wakeup_flags;
932     -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
933     + __le32 wakeup_host_timer;
934     +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
935    
936    
937     /* TODO: OFFLOADS_QUERY_API_S_VER_1 */
938     diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
939     index d68640e..98b1feb 100644
940     --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
941     +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
942     @@ -71,7 +71,13 @@
943     #define MAC_INDEX_MIN_DRIVER 0
944     #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
945    
946     -#define AC_NUM 4 /* Number of access categories */
947     +enum iwl_ac {
948     + AC_BK,
949     + AC_BE,
950     + AC_VI,
951     + AC_VO,
952     + AC_NUM,
953     +};
954    
955     /**
956     * enum iwl_mac_protection_flags - MAC context flags
957     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
958     index b2cc3d9..d8e858c 100644
959     --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
960     +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
961     @@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
962     u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
963     struct ieee80211_vif *vif)
964     {
965     - u32 qmask, ac;
966     + u32 qmask = 0, ac;
967    
968     if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
969     return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
970    
971     - qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
972     - BIT(vif->cab_queue) : 0;
973     -
974     for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
975     if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
976     qmask |= BIT(vif->hw_queue[ac]);
977     @@ -362,7 +359,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
978     break;
979     case NL80211_IFTYPE_AP:
980     iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
981     - IWL_MVM_TX_FIFO_VO);
982     + IWL_MVM_TX_FIFO_MCAST);
983     /* fall through */
984     default:
985     for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
986     @@ -550,6 +547,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
987     cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
988     }
989    
990     + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
991     + if (vif->type == NL80211_IFTYPE_AP)
992     + cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
993     +
994     if (vif->bss_conf.qos)
995     cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
996    
997     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
998     index b7e95b0..f7545e0 100644
999     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1000     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1001     @@ -243,7 +243,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
1002     if (ret)
1003     return ret;
1004    
1005     - return ieee80211_register_hw(mvm->hw);
1006     + ret = ieee80211_register_hw(mvm->hw);
1007     + if (ret)
1008     + iwl_mvm_leds_exit(mvm);
1009     +
1010     + return ret;
1011     }
1012    
1013     static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
1014     diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
1015     index 9f46b23..8086231 100644
1016     --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
1017     +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
1018     @@ -88,6 +88,7 @@ enum iwl_mvm_tx_fifo {
1019     IWL_MVM_TX_FIFO_BE,
1020     IWL_MVM_TX_FIFO_VI,
1021     IWL_MVM_TX_FIFO_VO,
1022     + IWL_MVM_TX_FIFO_MCAST = 5,
1023     };
1024    
1025     extern struct ieee80211_ops iwl_mvm_hw_ops;
1026     diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
1027     index 736b50b..68f0bbe 100644
1028     --- a/drivers/net/wireless/iwlwifi/mvm/sta.c
1029     +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
1030     @@ -226,9 +226,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1031     if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1032     mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1033    
1034     - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
1035     - mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
1036     -
1037     /* for HW restart - need to reset the seq_number etc... */
1038     memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
1039    
1040     @@ -1296,17 +1293,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1041     struct iwl_mvm_add_sta_cmd cmd = {
1042     .add_modify = STA_MODE_MODIFY,
1043     .sta_id = mvmsta->sta_id,
1044     - .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1045     - .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
1046     + .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1047     .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1048     };
1049     int ret;
1050    
1051     - /*
1052     - * Same modify mask for sleep_tx_count and sleep_state_flags but this
1053     - * should be fine since if we set the STA as "awake", then
1054     - * sleep_tx_count is not relevant.
1055     - */
1056     ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1057     if (ret)
1058     IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1059     diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
1060     index 48c1891..a2e6112e 100644
1061     --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
1062     +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
1063     @@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
1064     * table is controlled by LINK_QUALITY commands
1065     */
1066    
1067     - if (ieee80211_is_data(fc)) {
1068     + if (ieee80211_is_data(fc) && sta) {
1069     tx_cmd->initial_rate_index = 0;
1070     tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
1071     return;
1072     @@ -610,8 +610,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1073     !(info->flags & IEEE80211_TX_STAT_ACK))
1074     info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1075    
1076     - /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
1077     - if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
1078     + /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1079     + if (status != TX_STATUS_SUCCESS) {
1080     struct ieee80211_hdr *hdr = (void *)skb->data;
1081     seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1082     }
1083     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
1084     index 50ba0a4..aeb70e1 100644
1085     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
1086     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
1087     @@ -1481,16 +1481,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1088     spin_lock_init(&trans_pcie->reg_lock);
1089     init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1090    
1091     - /* W/A - seems to solve weird behavior. We need to remove this if we
1092     - * don't want to stay in L1 all the time. This wastes a lot of power */
1093     - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1094     - PCIE_LINK_STATE_CLKPM);
1095     -
1096     if (pci_enable_device(pdev)) {
1097     err = -ENODEV;
1098     goto out_no_pci;
1099     }
1100    
1101     + /* W/A - seems to solve weird behavior. We need to remove this if we
1102     + * don't want to stay in L1 all the time. This wastes a lot of power */
1103     + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1104     + PCIE_LINK_STATE_CLKPM);
1105     +
1106     pci_set_master(pdev);
1107    
1108     err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1109     diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
1110     index 4941f20..b8ba1f9 100644
1111     --- a/drivers/net/wireless/zd1201.c
1112     +++ b/drivers/net/wireless/zd1201.c
1113     @@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
1114     goto exit;
1115    
1116     err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
1117     - USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
1118     + USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
1119     if (err < 0)
1120     goto exit;
1121    
1122     + memcpy(&ret, buf, sizeof(ret));
1123     +
1124     if (ret & 0x80) {
1125     err = -EIO;
1126     goto exit;
1127     diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
1128     index 808be06..1187737 100644
1129     --- a/drivers/of/fdt.c
1130     +++ b/drivers/of/fdt.c
1131     @@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
1132     mem = (unsigned long)
1133     dt_alloc(size + 4, __alignof__(struct device_node));
1134    
1135     + memset((void *)mem, 0, size);
1136     +
1137     ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
1138    
1139     pr_debug(" unflattening %lx...\n", mem);
1140     diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
1141     index e4b1fb2..336b3f9 100644
1142     --- a/drivers/pci/pci-acpi.c
1143     +++ b/drivers/pci/pci-acpi.c
1144     @@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
1145     /* ACPI bus type */
1146     static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
1147     {
1148     - struct pci_dev * pci_dev;
1149     - u64 addr;
1150     + struct pci_dev *pci_dev = to_pci_dev(dev);
1151     + bool is_bridge;
1152     + u64 addr;
1153    
1154     - pci_dev = to_pci_dev(dev);
1155     + /*
1156     + * pci_is_bridge() is not suitable here, because pci_dev->subordinate
1157     + * is set only after acpi_pci_find_device() has been called for the
1158     + * given device.
1159     + */
1160     + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
1161     + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
1162     /* Please ref to ACPI spec for the syntax of _ADR */
1163     addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1164     - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
1165     + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
1166     if (!*handle)
1167     return -ENODEV;
1168     return 0;
1169     diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
1170     index 0f9f859..f911952 100644
1171     --- a/drivers/platform/olpc/olpc-ec.c
1172     +++ b/drivers/platform/olpc/olpc-ec.c
1173     @@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
1174     return platform_driver_register(&olpc_ec_plat_driver);
1175     }
1176    
1177     -module_init(olpc_ec_init_module);
1178     +arch_initcall(olpc_ec_init_module);
1179    
1180     MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
1181     MODULE_LICENSE("GPL");
1182     diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1183     index 4133ab6..8e8f353 100644
1184     --- a/drivers/s390/scsi/zfcp_erp.c
1185     +++ b/drivers/s390/scsi/zfcp_erp.c
1186     @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
1187    
1188     if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
1189     zfcp_erp_action_dismiss(&port->erp_action);
1190     - else
1191     - shost_for_each_device(sdev, port->adapter->scsi_host)
1192     + else {
1193     + spin_lock(port->adapter->scsi_host->host_lock);
1194     + __shost_for_each_device(sdev, port->adapter->scsi_host)
1195     if (sdev_to_zfcp(sdev)->port == port)
1196     zfcp_erp_action_dismiss_lun(sdev);
1197     + spin_unlock(port->adapter->scsi_host->host_lock);
1198     + }
1199     }
1200    
1201     static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
1202     @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
1203     {
1204     struct scsi_device *sdev;
1205    
1206     - shost_for_each_device(sdev, port->adapter->scsi_host)
1207     + spin_lock(port->adapter->scsi_host->host_lock);
1208     + __shost_for_each_device(sdev, port->adapter->scsi_host)
1209     if (sdev_to_zfcp(sdev)->port == port)
1210     _zfcp_erp_lun_reopen(sdev, clear, id, 0);
1211     + spin_unlock(port->adapter->scsi_host->host_lock);
1212     }
1213    
1214     static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
1215     @@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1216     atomic_set_mask(common_mask, &port->status);
1217     read_unlock_irqrestore(&adapter->port_list_lock, flags);
1218    
1219     - shost_for_each_device(sdev, adapter->scsi_host)
1220     + spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1221     + __shost_for_each_device(sdev, adapter->scsi_host)
1222     atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1223     + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1224     }
1225    
1226     /**
1227     @@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1228     }
1229     read_unlock_irqrestore(&adapter->port_list_lock, flags);
1230    
1231     - shost_for_each_device(sdev, adapter->scsi_host) {
1232     + spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1233     + __shost_for_each_device(sdev, adapter->scsi_host) {
1234     atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1235     if (clear_counter)
1236     atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1237     }
1238     + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1239     }
1240    
1241     /**
1242     @@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1243     {
1244     struct scsi_device *sdev;
1245     u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1246     + unsigned long flags;
1247    
1248     atomic_set_mask(mask, &port->status);
1249    
1250     if (!common_mask)
1251     return;
1252    
1253     - shost_for_each_device(sdev, port->adapter->scsi_host)
1254     + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1255     + __shost_for_each_device(sdev, port->adapter->scsi_host)
1256     if (sdev_to_zfcp(sdev)->port == port)
1257     atomic_set_mask(common_mask,
1258     &sdev_to_zfcp(sdev)->status);
1259     + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1260     }
1261    
1262     /**
1263     @@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1264     struct scsi_device *sdev;
1265     u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1266     u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1267     + unsigned long flags;
1268    
1269     atomic_clear_mask(mask, &port->status);
1270    
1271     @@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1272     if (clear_counter)
1273     atomic_set(&port->erp_counter, 0);
1274    
1275     - shost_for_each_device(sdev, port->adapter->scsi_host)
1276     + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1277     + __shost_for_each_device(sdev, port->adapter->scsi_host)
1278     if (sdev_to_zfcp(sdev)->port == port) {
1279     atomic_clear_mask(common_mask,
1280     &sdev_to_zfcp(sdev)->status);
1281     if (clear_counter)
1282     atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1283     }
1284     + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1285     }
1286    
1287     /**
1288     diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
1289     index 665e3cf..de0598e 100644
1290     --- a/drivers/s390/scsi/zfcp_qdio.c
1291     +++ b/drivers/s390/scsi/zfcp_qdio.c
1292     @@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
1293    
1294     static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
1295     {
1296     - spin_lock_irq(&qdio->req_q_lock);
1297     if (atomic_read(&qdio->req_q_free) ||
1298     !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
1299     return 1;
1300     - spin_unlock_irq(&qdio->req_q_lock);
1301     return 0;
1302     }
1303    
1304     @@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
1305     {
1306     long ret;
1307    
1308     - spin_unlock_irq(&qdio->req_q_lock);
1309     - ret = wait_event_interruptible_timeout(qdio->req_q_wq,
1310     - zfcp_qdio_sbal_check(qdio), 5 * HZ);
1311     + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
1312     + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
1313    
1314     if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
1315     return -EIO;
1316     @@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
1317     zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
1318     }
1319    
1320     - spin_lock_irq(&qdio->req_q_lock);
1321     return -EIO;
1322     }
1323    
1324     diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
1325     index 86af29f..1348fa4 100644
1326     --- a/drivers/scsi/Kconfig
1327     +++ b/drivers/scsi/Kconfig
1328     @@ -1353,7 +1353,6 @@ config SCSI_LPFC
1329     tristate "Emulex LightPulse Fibre Channel Support"
1330     depends on PCI && SCSI
1331     select SCSI_FC_ATTRS
1332     - select GENERIC_CSUM
1333     select CRC_T10DIF
1334     help
1335     This lpfc driver supports the Emulex LightPulse
1336     diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
1337     index 06d190f..4a2b042 100644
1338     --- a/drivers/staging/comedi/drivers.c
1339     +++ b/drivers/staging/comedi/drivers.c
1340     @@ -464,7 +464,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
1341     ret = comedi_device_postconfig(dev);
1342     if (ret < 0) {
1343     comedi_device_detach(dev);
1344     - module_put(dev->driver->module);
1345     + module_put(driv->module);
1346     }
1347     /* On success, the driver module count has been incremented. */
1348     return ret;
1349     diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
1350     index ca26628..e1859b8 100644
1351     --- a/drivers/usb/phy/phy-fsl-usb.h
1352     +++ b/drivers/usb/phy/phy-fsl-usb.h
1353     @@ -15,7 +15,7 @@
1354     * 675 Mass Ave, Cambridge, MA 02139, USA.
1355     */
1356    
1357     -#include "otg_fsm.h"
1358     +#include "phy-fsm-usb.h"
1359     #include <linux/usb/otg.h>
1360     #include <linux/ioctl.h>
1361    
1362     diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
1363     index c520b35..7f45966 100644
1364     --- a/drivers/usb/phy/phy-fsm-usb.c
1365     +++ b/drivers/usb/phy/phy-fsm-usb.c
1366     @@ -29,7 +29,7 @@
1367     #include <linux/usb/gadget.h>
1368     #include <linux/usb/otg.h>
1369    
1370     -#include "phy-otg-fsm.h"
1371     +#include "phy-fsm-usb.h"
1372    
1373     /* Change USB protocol when there is a protocol change */
1374     static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
1375     diff --git a/drivers/xen/events.c b/drivers/xen/events.c
1376     index 6a6bbe4..1faa130 100644
1377     --- a/drivers/xen/events.c
1378     +++ b/drivers/xen/events.c
1379     @@ -346,7 +346,7 @@ static void init_evtchn_cpu_bindings(void)
1380    
1381     for_each_possible_cpu(i)
1382     memset(per_cpu(cpu_evtchn_mask, i),
1383     - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
1384     + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
1385     }
1386    
1387     static inline void clear_evtchn(int port)
1388     @@ -1492,8 +1492,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
1389     /* Rebind an evtchn so that it gets delivered to a specific cpu */
1390     static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1391     {
1392     + struct shared_info *s = HYPERVISOR_shared_info;
1393     struct evtchn_bind_vcpu bind_vcpu;
1394     int evtchn = evtchn_from_irq(irq);
1395     + int masked;
1396    
1397     if (!VALID_EVTCHN(evtchn))
1398     return -1;
1399     @@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1400     bind_vcpu.vcpu = tcpu;
1401    
1402     /*
1403     + * Mask the event while changing the VCPU binding to prevent
1404     + * it being delivered on an unexpected VCPU.
1405     + */
1406     + masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1407     +
1408     + /*
1409     * If this fails, it usually just indicates that we're dealing with a
1410     * virq or IPI channel, which don't actually need to be rebound. Ignore
1411     * it, but don't do the xenlinux-level rebind in that case.
1412     @@ -1517,6 +1525,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1413     if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1414     bind_evtchn_to_cpu(evtchn, tcpu);
1415    
1416     + if (!masked)
1417     + unmask_evtchn(evtchn);
1418     +
1419     return 0;
1420     }
1421    
1422     diff --git a/fs/bio.c b/fs/bio.c
1423     index 94bbc04..c5eae72 100644
1424     --- a/fs/bio.c
1425     +++ b/fs/bio.c
1426     @@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1427     int bio_uncopy_user(struct bio *bio)
1428     {
1429     struct bio_map_data *bmd = bio->bi_private;
1430     - int ret = 0;
1431     + struct bio_vec *bvec;
1432     + int ret = 0, i;
1433    
1434     - if (!bio_flagged(bio, BIO_NULL_MAPPED))
1435     - ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1436     - bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1437     - 0, bmd->is_our_pages);
1438     + if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1439     + /*
1440     + * if we're in a workqueue, the request is orphaned, so
1441     + * don't copy into a random user address space, just free.
1442     + */
1443     + if (current->mm)
1444     + ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1445     + bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1446     + 0, bmd->is_our_pages);
1447     + else if (bmd->is_our_pages)
1448     + bio_for_each_segment_all(bvec, bio, i)
1449     + __free_page(bvec->bv_page);
1450     + }
1451     bio_free_map_data(bmd);
1452     bio_put(bio);
1453     return ret;
1454     diff --git a/fs/namespace.c b/fs/namespace.c
1455     index 7b1ca9b..a45ba4f 100644
1456     --- a/fs/namespace.c
1457     +++ b/fs/namespace.c
1458     @@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1459     CL_COPY_ALL | CL_PRIVATE);
1460     namespace_unlock();
1461     if (IS_ERR(tree))
1462     - return NULL;
1463     + return ERR_CAST(tree);
1464     return &tree->mnt;
1465     }
1466    
1467     diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
1468     index dc9a913..2d8be51 100644
1469     --- a/fs/nilfs2/segbuf.c
1470     +++ b/fs/nilfs2/segbuf.c
1471     @@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
1472    
1473     if (err == -EOPNOTSUPP) {
1474     set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
1475     - bio_put(bio);
1476     - /* to be detected by submit_seg_bio() */
1477     + /* to be detected by nilfs_segbuf_submit_bio() */
1478     }
1479    
1480     if (!uptodate)
1481     @@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
1482     bio->bi_private = segbuf;
1483     bio_get(bio);
1484     submit_bio(mode, bio);
1485     + segbuf->sb_nbio++;
1486     if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
1487     bio_put(bio);
1488     err = -EOPNOTSUPP;
1489     goto failed;
1490     }
1491     - segbuf->sb_nbio++;
1492     bio_put(bio);
1493    
1494     wi->bio = NULL;
1495     diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
1496     index c13c919..f45b2a78 100644
1497     --- a/include/acpi/acpi_bus.h
1498     +++ b/include/acpi/acpi_bus.h
1499     @@ -455,7 +455,11 @@ struct acpi_pci_root {
1500     };
1501    
1502     /* helper */
1503     -acpi_handle acpi_get_child(acpi_handle, u64);
1504     +acpi_handle acpi_find_child(acpi_handle, u64, bool);
1505     +static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
1506     +{
1507     + return acpi_find_child(handle, addr, false);
1508     +}
1509     int acpi_is_root_bridge(acpi_handle);
1510     struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
1511     #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
1512     diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
1513     index 44cdc11..120d57a 100644
1514     --- a/include/linux/ftrace_event.h
1515     +++ b/include/linux/ftrace_event.h
1516     @@ -334,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
1517     const char *name, int offset, int size,
1518     int is_signed, int filter_type);
1519     extern int trace_add_event_call(struct ftrace_event_call *call);
1520     -extern void trace_remove_event_call(struct ftrace_event_call *call);
1521     +extern int trace_remove_event_call(struct ftrace_event_call *call);
1522    
1523     #define is_signed_type(type) (((type)(-1)) < (type)1)
1524    
1525     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
1526     index ace9a5f..4a189ba 100644
1527     --- a/include/linux/mm_types.h
1528     +++ b/include/linux/mm_types.h
1529     @@ -333,6 +333,7 @@ struct mm_struct {
1530     void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
1531     #endif
1532     unsigned long mmap_base; /* base of mmap area */
1533     + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
1534     unsigned long task_size; /* size of task vm space */
1535     unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
1536     unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
1537     diff --git a/include/linux/sched.h b/include/linux/sched.h
1538     index 3aeb14b..178a8d9 100644
1539     --- a/include/linux/sched.h
1540     +++ b/include/linux/sched.h
1541     @@ -314,7 +314,6 @@ struct nsproxy;
1542     struct user_namespace;
1543    
1544     #ifdef CONFIG_MMU
1545     -extern unsigned long mmap_legacy_base(void);
1546     extern void arch_pick_mmap_layout(struct mm_struct *mm);
1547     extern unsigned long
1548     arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
1549     diff --git a/include/linux/wait.h b/include/linux/wait.h
1550     index 1133695..c8e5760 100644
1551     --- a/include/linux/wait.h
1552     +++ b/include/linux/wait.h
1553     @@ -805,6 +805,63 @@ do { \
1554     __ret; \
1555     })
1556    
1557     +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
1558     + lock, ret) \
1559     +do { \
1560     + DEFINE_WAIT(__wait); \
1561     + \
1562     + for (;;) { \
1563     + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
1564     + if (condition) \
1565     + break; \
1566     + if (signal_pending(current)) { \
1567     + ret = -ERESTARTSYS; \
1568     + break; \
1569     + } \
1570     + spin_unlock_irq(&lock); \
1571     + ret = schedule_timeout(ret); \
1572     + spin_lock_irq(&lock); \
1573     + if (!ret) \
1574     + break; \
1575     + } \
1576     + finish_wait(&wq, &__wait); \
1577     +} while (0)
1578     +
1579     +/**
1580     + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
1581     + * The condition is checked under the lock. This is expected
1582     + * to be called with the lock taken.
1583     + * @wq: the waitqueue to wait on
1584     + * @condition: a C expression for the event to wait for
1585     + * @lock: a locked spinlock_t, which will be released before schedule()
1586     + * and reacquired afterwards.
1587     + * @timeout: timeout, in jiffies
1588     + *
1589     + * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1590     + * @condition evaluates to true or signal is received. The @condition is
1591     + * checked each time the waitqueue @wq is woken up.
1592     + *
1593     + * wake_up() has to be called after changing any variable that could
1594     + * change the result of the wait condition.
1595     + *
1596     + * This is supposed to be called while holding the lock. The lock is
1597     + * dropped before going to sleep and is reacquired afterwards.
1598     + *
1599     + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1600     + * was interrupted by a signal, and the remaining jiffies otherwise
1601     + * if the condition evaluated to true before the timeout elapsed.
1602     + */
1603     +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
1604     + timeout) \
1605     +({ \
1606     + int __ret = timeout; \
1607     + \
1608     + if (!(condition)) \
1609     + __wait_event_interruptible_lock_irq_timeout( \
1610     + wq, condition, lock, __ret); \
1611     + __ret; \
1612     +})
1613     +
1614    
1615     /*
1616     * These are the old interfaces to sleep waiting for an event.
1617     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1618     index 6c508ff..f23449d 100644
1619     --- a/kernel/trace/ftrace.c
1620     +++ b/kernel/trace/ftrace.c
1621     @@ -1416,12 +1416,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1622     * the hashes are freed with call_rcu_sched().
1623     */
1624     static int
1625     -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1626     +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1627     {
1628     struct ftrace_hash *filter_hash;
1629     struct ftrace_hash *notrace_hash;
1630     int ret;
1631    
1632     +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1633     + /*
1634     + * There's a small race when adding ops that the ftrace handler
1635     + * that wants regs, may be called without them. We can not
1636     + * allow that handler to be called if regs is NULL.
1637     + */
1638     + if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1639     + return 0;
1640     +#endif
1641     +
1642     filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1643     notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1644    
1645     @@ -2134,12 +2144,57 @@ static cycle_t ftrace_update_time;
1646     static unsigned long ftrace_update_cnt;
1647     unsigned long ftrace_update_tot_cnt;
1648    
1649     -static int ops_traces_mod(struct ftrace_ops *ops)
1650     +static inline int ops_traces_mod(struct ftrace_ops *ops)
1651     {
1652     - struct ftrace_hash *hash;
1653     + /*
1654     + * Filter_hash being empty will default to trace module.
1655     + * But notrace hash requires a test of individual module functions.
1656     + */
1657     + return ftrace_hash_empty(ops->filter_hash) &&
1658     + ftrace_hash_empty(ops->notrace_hash);
1659     +}
1660     +
1661     +/*
1662     + * Check if the current ops references the record.
1663     + *
1664     + * If the ops traces all functions, then it was already accounted for.
1665     + * If the ops does not trace the current record function, skip it.
1666     + * If the ops ignores the function via notrace filter, skip it.
1667     + */
1668     +static inline bool
1669     +ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
1670     +{
1671     + /* If ops isn't enabled, ignore it */
1672     + if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1673     + return 0;
1674     +
1675     + /* If ops traces all mods, we already accounted for it */
1676     + if (ops_traces_mod(ops))
1677     + return 0;
1678    
1679     - hash = ops->filter_hash;
1680     - return ftrace_hash_empty(hash);
1681     + /* The function must be in the filter */
1682     + if (!ftrace_hash_empty(ops->filter_hash) &&
1683     + !ftrace_lookup_ip(ops->filter_hash, rec->ip))
1684     + return 0;
1685     +
1686     + /* If in notrace hash, we ignore it too */
1687     + if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
1688     + return 0;
1689     +
1690     + return 1;
1691     +}
1692     +
1693     +static int referenced_filters(struct dyn_ftrace *rec)
1694     +{
1695     + struct ftrace_ops *ops;
1696     + int cnt = 0;
1697     +
1698     + for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
1699     + if (ops_references_rec(ops, rec))
1700     + cnt++;
1701     + }
1702     +
1703     + return cnt;
1704     }
1705    
1706     static int ftrace_update_code(struct module *mod)
1707     @@ -2148,6 +2203,7 @@ static int ftrace_update_code(struct module *mod)
1708     struct dyn_ftrace *p;
1709     cycle_t start, stop;
1710     unsigned long ref = 0;
1711     + bool test = false;
1712     int i;
1713    
1714     /*
1715     @@ -2161,9 +2217,12 @@ static int ftrace_update_code(struct module *mod)
1716    
1717     for (ops = ftrace_ops_list;
1718     ops != &ftrace_list_end; ops = ops->next) {
1719     - if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1720     - ops_traces_mod(ops))
1721     - ref++;
1722     + if (ops->flags & FTRACE_OPS_FL_ENABLED) {
1723     + if (ops_traces_mod(ops))
1724     + ref++;
1725     + else
1726     + test = true;
1727     + }
1728     }
1729     }
1730    
1731     @@ -2173,12 +2232,16 @@ static int ftrace_update_code(struct module *mod)
1732     for (pg = ftrace_new_pgs; pg; pg = pg->next) {
1733    
1734     for (i = 0; i < pg->index; i++) {
1735     + int cnt = ref;
1736     +
1737     /* If something went wrong, bail without enabling anything */
1738     if (unlikely(ftrace_disabled))
1739     return -1;
1740    
1741     p = &pg->records[i];
1742     - p->flags = ref;
1743     + if (test)
1744     + cnt += referenced_filters(p);
1745     + p->flags = cnt;
1746    
1747     /*
1748     * Do the initial record conversion from mcount jump
1749     @@ -2198,7 +2261,7 @@ static int ftrace_update_code(struct module *mod)
1750     * conversion puts the module to the correct state, thus
1751     * passing the ftrace_make_call check.
1752     */
1753     - if (ftrace_start_up && ref) {
1754     + if (ftrace_start_up && cnt) {
1755     int failed = __ftrace_replace_code(p, 1);
1756     if (failed)
1757     ftrace_bug(failed, p->ip);
1758     @@ -4188,7 +4251,7 @@ static inline void ftrace_startup_enable(int command) { }
1759     # define ftrace_shutdown_sysctl() do { } while (0)
1760    
1761     static inline int
1762     -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1763     +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1764     {
1765     return 1;
1766     }
1767     @@ -4211,7 +4274,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
1768     do_for_each_ftrace_op(op, ftrace_control_list) {
1769     if (!(op->flags & FTRACE_OPS_FL_STUB) &&
1770     !ftrace_function_local_disabled(op) &&
1771     - ftrace_ops_test(op, ip))
1772     + ftrace_ops_test(op, ip, regs))
1773     op->func(ip, parent_ip, op, regs);
1774     } while_for_each_ftrace_op(op);
1775     trace_recursion_clear(TRACE_CONTROL_BIT);
1776     @@ -4244,7 +4307,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
1777     */
1778     preempt_disable_notrace();
1779     do_for_each_ftrace_op(op, ftrace_ops_list) {
1780     - if (ftrace_ops_test(op, ip))
1781     + if (ftrace_ops_test(op, ip, regs))
1782     op->func(ip, parent_ip, op, regs);
1783     } while_for_each_ftrace_op(op);
1784     preempt_enable_notrace();
1785     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1786     index 06a5bce..0582a01 100644
1787     --- a/kernel/trace/trace.c
1788     +++ b/kernel/trace/trace.c
1789     @@ -2834,6 +2834,17 @@ static int s_show(struct seq_file *m, void *v)
1790     return 0;
1791     }
1792    
1793     +/*
1794     + * Should be used after trace_array_get(), trace_types_lock
1795     + * ensures that i_cdev was already initialized.
1796     + */
1797     +static inline int tracing_get_cpu(struct inode *inode)
1798     +{
1799     + if (inode->i_cdev) /* See trace_create_cpu_file() */
1800     + return (long)inode->i_cdev - 1;
1801     + return RING_BUFFER_ALL_CPUS;
1802     +}
1803     +
1804     static const struct seq_operations tracer_seq_ops = {
1805     .start = s_start,
1806     .next = s_next,
1807     @@ -2842,9 +2853,9 @@ static const struct seq_operations tracer_seq_ops = {
1808     };
1809    
1810     static struct trace_iterator *
1811     -__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
1812     - struct inode *inode, struct file *file, bool snapshot)
1813     +__tracing_open(struct inode *inode, struct file *file, bool snapshot)
1814     {
1815     + struct trace_array *tr = inode->i_private;
1816     struct trace_iterator *iter;
1817     int cpu;
1818    
1819     @@ -2885,8 +2896,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
1820     iter->trace_buffer = &tr->trace_buffer;
1821     iter->snapshot = snapshot;
1822     iter->pos = -1;
1823     + iter->cpu_file = tracing_get_cpu(inode);
1824     mutex_init(&iter->mutex);
1825     - iter->cpu_file = tc->cpu;
1826    
1827     /* Notify the tracer early; before we stop tracing. */
1828     if (iter->trace && iter->trace->open)
1829     @@ -2962,44 +2973,22 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
1830     filp->private_data = inode->i_private;
1831    
1832     return 0;
1833     -
1834     -}
1835     -
1836     -int tracing_open_generic_tc(struct inode *inode, struct file *filp)
1837     -{
1838     - struct trace_cpu *tc = inode->i_private;
1839     - struct trace_array *tr = tc->tr;
1840     -
1841     - if (tracing_disabled)
1842     - return -ENODEV;
1843     -
1844     - if (trace_array_get(tr) < 0)
1845     - return -ENODEV;
1846     -
1847     - filp->private_data = inode->i_private;
1848     -
1849     - return 0;
1850     -
1851     }
1852    
1853     static int tracing_release(struct inode *inode, struct file *file)
1854     {
1855     + struct trace_array *tr = inode->i_private;
1856     struct seq_file *m = file->private_data;
1857     struct trace_iterator *iter;
1858     - struct trace_array *tr;
1859     int cpu;
1860    
1861     - /* Writes do not use seq_file, need to grab tr from inode */
1862     if (!(file->f_mode & FMODE_READ)) {
1863     - struct trace_cpu *tc = inode->i_private;
1864     -
1865     - trace_array_put(tc->tr);
1866     + trace_array_put(tr);
1867     return 0;
1868     }
1869    
1870     + /* Writes do not use seq_file */
1871     iter = m->private;
1872     - tr = iter->tr;
1873     -
1874     mutex_lock(&trace_types_lock);
1875    
1876     for_each_tracing_cpu(cpu) {
1877     @@ -3035,15 +3024,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
1878     return 0;
1879     }
1880    
1881     -static int tracing_release_generic_tc(struct inode *inode, struct file *file)
1882     -{
1883     - struct trace_cpu *tc = inode->i_private;
1884     - struct trace_array *tr = tc->tr;
1885     -
1886     - trace_array_put(tr);
1887     - return 0;
1888     -}
1889     -
1890     static int tracing_single_release_tr(struct inode *inode, struct file *file)
1891     {
1892     struct trace_array *tr = inode->i_private;
1893     @@ -3055,8 +3035,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
1894    
1895     static int tracing_open(struct inode *inode, struct file *file)
1896     {
1897     - struct trace_cpu *tc = inode->i_private;
1898     - struct trace_array *tr = tc->tr;
1899     + struct trace_array *tr = inode->i_private;
1900     struct trace_iterator *iter;
1901     int ret = 0;
1902    
1903     @@ -3064,16 +3043,17 @@ static int tracing_open(struct inode *inode, struct file *file)
1904     return -ENODEV;
1905    
1906     /* If this file was open for write, then erase contents */
1907     - if ((file->f_mode & FMODE_WRITE) &&
1908     - (file->f_flags & O_TRUNC)) {
1909     - if (tc->cpu == RING_BUFFER_ALL_CPUS)
1910     + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1911     + int cpu = tracing_get_cpu(inode);
1912     +
1913     + if (cpu == RING_BUFFER_ALL_CPUS)
1914     tracing_reset_online_cpus(&tr->trace_buffer);
1915     else
1916     - tracing_reset(&tr->trace_buffer, tc->cpu);
1917     + tracing_reset(&tr->trace_buffer, cpu);
1918     }
1919    
1920     if (file->f_mode & FMODE_READ) {
1921     - iter = __tracing_open(tr, tc, inode, file, false);
1922     + iter = __tracing_open(inode, file, false);
1923     if (IS_ERR(iter))
1924     ret = PTR_ERR(iter);
1925     else if (trace_flags & TRACE_ITER_LATENCY_FMT)
1926     @@ -3939,8 +3919,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1927    
1928     static int tracing_open_pipe(struct inode *inode, struct file *filp)
1929     {
1930     - struct trace_cpu *tc = inode->i_private;
1931     - struct trace_array *tr = tc->tr;
1932     + struct trace_array *tr = inode->i_private;
1933     struct trace_iterator *iter;
1934     int ret = 0;
1935    
1936     @@ -3986,9 +3965,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
1937     if (trace_clocks[tr->clock_id].in_ns)
1938     iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
1939    
1940     - iter->cpu_file = tc->cpu;
1941     - iter->tr = tc->tr;
1942     - iter->trace_buffer = &tc->tr->trace_buffer;
1943     + iter->tr = tr;
1944     + iter->trace_buffer = &tr->trace_buffer;
1945     + iter->cpu_file = tracing_get_cpu(inode);
1946     mutex_init(&iter->mutex);
1947     filp->private_data = iter;
1948    
1949     @@ -4011,8 +3990,7 @@ fail:
1950     static int tracing_release_pipe(struct inode *inode, struct file *file)
1951     {
1952     struct trace_iterator *iter = file->private_data;
1953     - struct trace_cpu *tc = inode->i_private;
1954     - struct trace_array *tr = tc->tr;
1955     + struct trace_array *tr = inode->i_private;
1956    
1957     mutex_lock(&trace_types_lock);
1958    
1959     @@ -4366,15 +4344,16 @@ static ssize_t
1960     tracing_entries_read(struct file *filp, char __user *ubuf,
1961     size_t cnt, loff_t *ppos)
1962     {
1963     - struct trace_cpu *tc = filp->private_data;
1964     - struct trace_array *tr = tc->tr;
1965     + struct inode *inode = file_inode(filp);
1966     + struct trace_array *tr = inode->i_private;
1967     + int cpu = tracing_get_cpu(inode);
1968     char buf[64];
1969     int r = 0;
1970     ssize_t ret;
1971    
1972     mutex_lock(&trace_types_lock);
1973    
1974     - if (tc->cpu == RING_BUFFER_ALL_CPUS) {
1975     + if (cpu == RING_BUFFER_ALL_CPUS) {
1976     int cpu, buf_size_same;
1977     unsigned long size;
1978    
1979     @@ -4401,7 +4380,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
1980     } else
1981     r = sprintf(buf, "X\n");
1982     } else
1983     - r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
1984     + r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
1985    
1986     mutex_unlock(&trace_types_lock);
1987    
1988     @@ -4413,7 +4392,8 @@ static ssize_t
1989     tracing_entries_write(struct file *filp, const char __user *ubuf,
1990     size_t cnt, loff_t *ppos)
1991     {
1992     - struct trace_cpu *tc = filp->private_data;
1993     + struct inode *inode = file_inode(filp);
1994     + struct trace_array *tr = inode->i_private;
1995     unsigned long val;
1996     int ret;
1997    
1998     @@ -4427,8 +4407,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
1999    
2000     /* value is in KB */
2001     val <<= 10;
2002     -
2003     - ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
2004     + ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
2005     if (ret < 0)
2006     return ret;
2007    
2008     @@ -4689,8 +4668,7 @@ struct ftrace_buffer_info {
2009     #ifdef CONFIG_TRACER_SNAPSHOT
2010     static int tracing_snapshot_open(struct inode *inode, struct file *file)
2011     {
2012     - struct trace_cpu *tc = inode->i_private;
2013     - struct trace_array *tr = tc->tr;
2014     + struct trace_array *tr = inode->i_private;
2015     struct trace_iterator *iter;
2016     struct seq_file *m;
2017     int ret = 0;
2018     @@ -4699,7 +4677,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
2019     return -ENODEV;
2020    
2021     if (file->f_mode & FMODE_READ) {
2022     - iter = __tracing_open(tr, tc, inode, file, true);
2023     + iter = __tracing_open(inode, file, true);
2024     if (IS_ERR(iter))
2025     ret = PTR_ERR(iter);
2026     } else {
2027     @@ -4716,8 +4694,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
2028     ret = 0;
2029    
2030     iter->tr = tr;
2031     - iter->trace_buffer = &tc->tr->max_buffer;
2032     - iter->cpu_file = tc->cpu;
2033     + iter->trace_buffer = &tr->max_buffer;
2034     + iter->cpu_file = tracing_get_cpu(inode);
2035     m->private = iter;
2036     file->private_data = m;
2037     }
2038     @@ -4876,11 +4854,11 @@ static const struct file_operations tracing_pipe_fops = {
2039     };
2040    
2041     static const struct file_operations tracing_entries_fops = {
2042     - .open = tracing_open_generic_tc,
2043     + .open = tracing_open_generic_tr,
2044     .read = tracing_entries_read,
2045     .write = tracing_entries_write,
2046     .llseek = generic_file_llseek,
2047     - .release = tracing_release_generic_tc,
2048     + .release = tracing_release_generic_tr,
2049     };
2050    
2051     static const struct file_operations tracing_total_entries_fops = {
2052     @@ -4932,8 +4910,7 @@ static const struct file_operations snapshot_raw_fops = {
2053    
2054     static int tracing_buffers_open(struct inode *inode, struct file *filp)
2055     {
2056     - struct trace_cpu *tc = inode->i_private;
2057     - struct trace_array *tr = tc->tr;
2058     + struct trace_array *tr = inode->i_private;
2059     struct ftrace_buffer_info *info;
2060     int ret;
2061    
2062     @@ -4952,7 +4929,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
2063     mutex_lock(&trace_types_lock);
2064    
2065     info->iter.tr = tr;
2066     - info->iter.cpu_file = tc->cpu;
2067     + info->iter.cpu_file = tracing_get_cpu(inode);
2068     info->iter.trace = tr->current_trace;
2069     info->iter.trace_buffer = &tr->trace_buffer;
2070     info->spare = NULL;
2071     @@ -5269,14 +5246,14 @@ static ssize_t
2072     tracing_stats_read(struct file *filp, char __user *ubuf,
2073     size_t count, loff_t *ppos)
2074     {
2075     - struct trace_cpu *tc = filp->private_data;
2076     - struct trace_array *tr = tc->tr;
2077     + struct inode *inode = file_inode(filp);
2078     + struct trace_array *tr = inode->i_private;
2079     struct trace_buffer *trace_buf = &tr->trace_buffer;
2080     + int cpu = tracing_get_cpu(inode);
2081     struct trace_seq *s;
2082     unsigned long cnt;
2083     unsigned long long t;
2084     unsigned long usec_rem;
2085     - int cpu = tc->cpu;
2086    
2087     s = kmalloc(sizeof(*s), GFP_KERNEL);
2088     if (!s)
2089     @@ -5329,10 +5306,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
2090     }
2091    
2092     static const struct file_operations tracing_stats_fops = {
2093     - .open = tracing_open_generic_tc,
2094     + .open = tracing_open_generic_tr,
2095     .read = tracing_stats_read,
2096     .llseek = generic_file_llseek,
2097     - .release = tracing_release_generic_tc,
2098     + .release = tracing_release_generic_tr,
2099     };
2100    
2101     #ifdef CONFIG_DYNAMIC_FTRACE
2102     @@ -5521,10 +5498,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
2103     return tr->percpu_dir;
2104     }
2105    
2106     +static struct dentry *
2107     +trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
2108     + void *data, long cpu, const struct file_operations *fops)
2109     +{
2110     + struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
2111     +
2112     + if (ret) /* See tracing_get_cpu() */
2113     + ret->d_inode->i_cdev = (void *)(cpu + 1);
2114     + return ret;
2115     +}
2116     +
2117     static void
2118     tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
2119     {
2120     - struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
2121     struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
2122     struct dentry *d_cpu;
2123     char cpu_dir[30]; /* 30 characters should be more than enough */
2124     @@ -5540,28 +5527,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
2125     }
2126    
2127     /* per cpu trace_pipe */
2128     - trace_create_file("trace_pipe", 0444, d_cpu,
2129     - (void *)&data->trace_cpu, &tracing_pipe_fops);
2130     + trace_create_cpu_file("trace_pipe", 0444, d_cpu,
2131     + tr, cpu, &tracing_pipe_fops);
2132    
2133     /* per cpu trace */
2134     - trace_create_file("trace", 0644, d_cpu,
2135     - (void *)&data->trace_cpu, &tracing_fops);
2136     + trace_create_cpu_file("trace", 0644, d_cpu,
2137     + tr, cpu, &tracing_fops);
2138    
2139     - trace_create_file("trace_pipe_raw", 0444, d_cpu,
2140     - (void *)&data->trace_cpu, &tracing_buffers_fops);
2141     + trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
2142     + tr, cpu, &tracing_buffers_fops);
2143    
2144     - trace_create_file("stats", 0444, d_cpu,
2145     - (void *)&data->trace_cpu, &tracing_stats_fops);
2146     + trace_create_cpu_file("stats", 0444, d_cpu,
2147     + tr, cpu, &tracing_stats_fops);
2148    
2149     - trace_create_file("buffer_size_kb", 0444, d_cpu,
2150     - (void *)&data->trace_cpu, &tracing_entries_fops);
2151     + trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
2152     + tr, cpu, &tracing_entries_fops);
2153    
2154     #ifdef CONFIG_TRACER_SNAPSHOT
2155     - trace_create_file("snapshot", 0644, d_cpu,
2156     - (void *)&data->trace_cpu, &snapshot_fops);
2157     + trace_create_cpu_file("snapshot", 0644, d_cpu,
2158     + tr, cpu, &snapshot_fops);
2159    
2160     - trace_create_file("snapshot_raw", 0444, d_cpu,
2161     - (void *)&data->trace_cpu, &snapshot_raw_fops);
2162     + trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
2163     + tr, cpu, &snapshot_raw_fops);
2164     #endif
2165     }
2166    
2167     @@ -6124,13 +6111,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
2168     tr, &tracing_iter_fops);
2169    
2170     trace_create_file("trace", 0644, d_tracer,
2171     - (void *)&tr->trace_cpu, &tracing_fops);
2172     + tr, &tracing_fops);
2173    
2174     trace_create_file("trace_pipe", 0444, d_tracer,
2175     - (void *)&tr->trace_cpu, &tracing_pipe_fops);
2176     + tr, &tracing_pipe_fops);
2177    
2178     trace_create_file("buffer_size_kb", 0644, d_tracer,
2179     - (void *)&tr->trace_cpu, &tracing_entries_fops);
2180     + tr, &tracing_entries_fops);
2181    
2182     trace_create_file("buffer_total_size_kb", 0444, d_tracer,
2183     tr, &tracing_total_entries_fops);
2184     @@ -6145,11 +6132,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
2185     &trace_clock_fops);
2186    
2187     trace_create_file("tracing_on", 0644, d_tracer,
2188     - tr, &rb_simple_fops);
2189     + tr, &rb_simple_fops);
2190    
2191     #ifdef CONFIG_TRACER_SNAPSHOT
2192     trace_create_file("snapshot", 0644, d_tracer,
2193     - (void *)&tr->trace_cpu, &snapshot_fops);
2194     + tr, &snapshot_fops);
2195     #endif
2196    
2197     for_each_tracing_cpu(cpu)
2198     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2199     index 6953263..3d18aad 100644
2200     --- a/kernel/trace/trace_events.c
2201     +++ b/kernel/trace/trace_events.c
2202     @@ -114,7 +114,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
2203    
2204     field = kmem_cache_alloc(field_cachep, GFP_TRACE);
2205     if (!field)
2206     - goto err;
2207     + return -ENOMEM;
2208    
2209     field->name = name;
2210     field->type = type;
2211     @@ -131,11 +131,6 @@ static int __trace_define_field(struct list_head *head, const char *type,
2212     list_add(&field->link, head);
2213    
2214     return 0;
2215     -
2216     -err:
2217     - kmem_cache_free(field_cachep, field);
2218     -
2219     - return -ENOMEM;
2220     }
2221    
2222     int trace_define_field(struct ftrace_event_call *call, const char *type,
2223     @@ -412,33 +407,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
2224     mutex_unlock(&event_mutex);
2225     }
2226    
2227     -/*
2228     - * Open and update trace_array ref count.
2229     - * Must have the current trace_array passed to it.
2230     - */
2231     -static int tracing_open_generic_file(struct inode *inode, struct file *filp)
2232     +static void remove_subsystem(struct ftrace_subsystem_dir *dir)
2233     {
2234     - struct ftrace_event_file *file = inode->i_private;
2235     - struct trace_array *tr = file->tr;
2236     - int ret;
2237     + if (!dir)
2238     + return;
2239    
2240     - if (trace_array_get(tr) < 0)
2241     - return -ENODEV;
2242     + if (!--dir->nr_events) {
2243     + debugfs_remove_recursive(dir->entry);
2244     + list_del(&dir->list);
2245     + __put_system_dir(dir);
2246     + }
2247     +}
2248    
2249     - ret = tracing_open_generic(inode, filp);
2250     - if (ret < 0)
2251     - trace_array_put(tr);
2252     - return ret;
2253     +static void *event_file_data(struct file *filp)
2254     +{
2255     + return ACCESS_ONCE(file_inode(filp)->i_private);
2256     }
2257    
2258     -static int tracing_release_generic_file(struct inode *inode, struct file *filp)
2259     +static void remove_event_file_dir(struct ftrace_event_file *file)
2260     {
2261     - struct ftrace_event_file *file = inode->i_private;
2262     - struct trace_array *tr = file->tr;
2263     + struct dentry *dir = file->dir;
2264     + struct dentry *child;
2265    
2266     - trace_array_put(tr);
2267     + if (dir) {
2268     + spin_lock(&dir->d_lock); /* probably unneeded */
2269     + list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
2270     + if (child->d_inode) /* probably unneeded */
2271     + child->d_inode->i_private = NULL;
2272     + }
2273     + spin_unlock(&dir->d_lock);
2274    
2275     - return 0;
2276     + debugfs_remove_recursive(dir);
2277     + }
2278     +
2279     + list_del(&file->list);
2280     + remove_subsystem(file->system);
2281     + kmem_cache_free(file_cachep, file);
2282     }
2283    
2284     /*
2285     @@ -682,13 +686,23 @@ static ssize_t
2286     event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
2287     loff_t *ppos)
2288     {
2289     - struct ftrace_event_file *file = filp->private_data;
2290     + struct ftrace_event_file *file;
2291     + unsigned long flags;
2292     char *buf;
2293    
2294     - if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2295     - if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
2296     + mutex_lock(&event_mutex);
2297     + file = event_file_data(filp);
2298     + if (likely(file))
2299     + flags = file->flags;
2300     + mutex_unlock(&event_mutex);
2301     +
2302     + if (!file)
2303     + return -ENODEV;
2304     +
2305     + if (flags & FTRACE_EVENT_FL_ENABLED) {
2306     + if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
2307     buf = "0*\n";
2308     - else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
2309     + else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
2310     buf = "1*\n";
2311     else
2312     buf = "1\n";
2313     @@ -702,13 +716,10 @@ static ssize_t
2314     event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2315     loff_t *ppos)
2316     {
2317     - struct ftrace_event_file *file = filp->private_data;
2318     + struct ftrace_event_file *file;
2319     unsigned long val;
2320     int ret;
2321    
2322     - if (!file)
2323     - return -EINVAL;
2324     -
2325     ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2326     if (ret)
2327     return ret;
2328     @@ -720,8 +731,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2329     switch (val) {
2330     case 0:
2331     case 1:
2332     + ret = -ENODEV;
2333     mutex_lock(&event_mutex);
2334     - ret = ftrace_event_enable_disable(file, val);
2335     + file = event_file_data(filp);
2336     + if (likely(file))
2337     + ret = ftrace_event_enable_disable(file, val);
2338     mutex_unlock(&event_mutex);
2339     break;
2340    
2341     @@ -828,7 +842,7 @@ enum {
2342    
2343     static void *f_next(struct seq_file *m, void *v, loff_t *pos)
2344     {
2345     - struct ftrace_event_call *call = m->private;
2346     + struct ftrace_event_call *call = event_file_data(m->private);
2347     struct ftrace_event_field *field;
2348     struct list_head *common_head = &ftrace_common_fields;
2349     struct list_head *head = trace_get_fields(call);
2350     @@ -872,6 +886,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
2351     loff_t l = 0;
2352     void *p;
2353    
2354     + /* ->stop() is called even if ->start() fails */
2355     + mutex_lock(&event_mutex);
2356     + if (!event_file_data(m->private))
2357     + return ERR_PTR(-ENODEV);
2358     +
2359     /* Start by showing the header */
2360     if (!*pos)
2361     return (void *)FORMAT_HEADER;
2362     @@ -886,7 +905,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
2363    
2364     static int f_show(struct seq_file *m, void *v)
2365     {
2366     - struct ftrace_event_call *call = m->private;
2367     + struct ftrace_event_call *call = event_file_data(m->private);
2368     struct ftrace_event_field *field;
2369     const char *array_descriptor;
2370    
2371     @@ -937,6 +956,7 @@ static int f_show(struct seq_file *m, void *v)
2372    
2373     static void f_stop(struct seq_file *m, void *p)
2374     {
2375     + mutex_unlock(&event_mutex);
2376     }
2377    
2378     static const struct seq_operations trace_format_seq_ops = {
2379     @@ -948,7 +968,6 @@ static const struct seq_operations trace_format_seq_ops = {
2380    
2381     static int trace_format_open(struct inode *inode, struct file *file)
2382     {
2383     - struct ftrace_event_call *call = inode->i_private;
2384     struct seq_file *m;
2385     int ret;
2386    
2387     @@ -957,7 +976,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
2388     return ret;
2389    
2390     m = file->private_data;
2391     - m->private = call;
2392     + m->private = file;
2393    
2394     return 0;
2395     }
2396     @@ -965,19 +984,22 @@ static int trace_format_open(struct inode *inode, struct file *file)
2397     static ssize_t
2398     event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2399     {
2400     - struct ftrace_event_call *call = filp->private_data;
2401     + int id = (long)event_file_data(filp);
2402     struct trace_seq *s;
2403     int r;
2404    
2405     if (*ppos)
2406     return 0;
2407    
2408     + if (unlikely(!id))
2409     + return -ENODEV;
2410     +
2411     s = kmalloc(sizeof(*s), GFP_KERNEL);
2412     if (!s)
2413     return -ENOMEM;
2414    
2415     trace_seq_init(s);
2416     - trace_seq_printf(s, "%d\n", call->event.type);
2417     + trace_seq_printf(s, "%d\n", id);
2418    
2419     r = simple_read_from_buffer(ubuf, cnt, ppos,
2420     s->buffer, s->len);
2421     @@ -989,21 +1011,28 @@ static ssize_t
2422     event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2423     loff_t *ppos)
2424     {
2425     - struct ftrace_event_call *call = filp->private_data;
2426     + struct ftrace_event_call *call;
2427     struct trace_seq *s;
2428     - int r;
2429     + int r = -ENODEV;
2430    
2431     if (*ppos)
2432     return 0;
2433    
2434     s = kmalloc(sizeof(*s), GFP_KERNEL);
2435     +
2436     if (!s)
2437     return -ENOMEM;
2438    
2439     trace_seq_init(s);
2440    
2441     - print_event_filter(call, s);
2442     - r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
2443     + mutex_lock(&event_mutex);
2444     + call = event_file_data(filp);
2445     + if (call)
2446     + print_event_filter(call, s);
2447     + mutex_unlock(&event_mutex);
2448     +
2449     + if (call)
2450     + r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
2451    
2452     kfree(s);
2453    
2454     @@ -1014,9 +1043,9 @@ static ssize_t
2455     event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2456     loff_t *ppos)
2457     {
2458     - struct ftrace_event_call *call = filp->private_data;
2459     + struct ftrace_event_call *call;
2460     char *buf;
2461     - int err;
2462     + int err = -ENODEV;
2463    
2464     if (cnt >= PAGE_SIZE)
2465     return -EINVAL;
2466     @@ -1031,7 +1060,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2467     }
2468     buf[cnt] = '\0';
2469    
2470     - err = apply_event_filter(call, buf);
2471     + mutex_lock(&event_mutex);
2472     + call = event_file_data(filp);
2473     + if (call)
2474     + err = apply_event_filter(call, buf);
2475     + mutex_unlock(&event_mutex);
2476     +
2477     free_page((unsigned long) buf);
2478     if (err < 0)
2479     return err;
2480     @@ -1253,10 +1287,9 @@ static const struct file_operations ftrace_set_event_fops = {
2481     };
2482    
2483     static const struct file_operations ftrace_enable_fops = {
2484     - .open = tracing_open_generic_file,
2485     + .open = tracing_open_generic,
2486     .read = event_enable_read,
2487     .write = event_enable_write,
2488     - .release = tracing_release_generic_file,
2489     .llseek = default_llseek,
2490     };
2491    
2492     @@ -1268,7 +1301,6 @@ static const struct file_operations ftrace_event_format_fops = {
2493     };
2494    
2495     static const struct file_operations ftrace_event_id_fops = {
2496     - .open = tracing_open_generic,
2497     .read = event_id_read,
2498     .llseek = default_llseek,
2499     };
2500     @@ -1516,8 +1548,8 @@ event_create_dir(struct dentry *parent,
2501    
2502     #ifdef CONFIG_PERF_EVENTS
2503     if (call->event.type && call->class->reg)
2504     - trace_create_file("id", 0444, file->dir, call,
2505     - id);
2506     + trace_create_file("id", 0444, file->dir,
2507     + (void *)(long)call->event.type, id);
2508     #endif
2509    
2510     /*
2511     @@ -1542,33 +1574,16 @@ event_create_dir(struct dentry *parent,
2512     return 0;
2513     }
2514    
2515     -static void remove_subsystem(struct ftrace_subsystem_dir *dir)
2516     -{
2517     - if (!dir)
2518     - return;
2519     -
2520     - if (!--dir->nr_events) {
2521     - debugfs_remove_recursive(dir->entry);
2522     - list_del(&dir->list);
2523     - __put_system_dir(dir);
2524     - }
2525     -}
2526     -
2527     static void remove_event_from_tracers(struct ftrace_event_call *call)
2528     {
2529     struct ftrace_event_file *file;
2530     struct trace_array *tr;
2531    
2532     do_for_each_event_file_safe(tr, file) {
2533     -
2534     if (file->event_call != call)
2535     continue;
2536    
2537     - list_del(&file->list);
2538     - debugfs_remove_recursive(file->dir);
2539     - remove_subsystem(file->system);
2540     - kmem_cache_free(file_cachep, file);
2541     -
2542     + remove_event_file_dir(file);
2543     /*
2544     * The do_for_each_event_file_safe() is
2545     * a double loop. After finding the call for this
2546     @@ -1720,16 +1735,47 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
2547     destroy_preds(call);
2548     }
2549    
2550     +static int probe_remove_event_call(struct ftrace_event_call *call)
2551     +{
2552     + struct trace_array *tr;
2553     + struct ftrace_event_file *file;
2554     +
2555     +#ifdef CONFIG_PERF_EVENTS
2556     + if (call->perf_refcount)
2557     + return -EBUSY;
2558     +#endif
2559     + do_for_each_event_file(tr, file) {
2560     + if (file->event_call != call)
2561     + continue;
2562     + /*
2563     + * We can't rely on ftrace_event_enable_disable(enable => 0)
2564     + * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
2565     + * TRACE_REG_UNREGISTER.
2566     + */
2567     + if (file->flags & FTRACE_EVENT_FL_ENABLED)
2568     + return -EBUSY;
2569     + break;
2570     + } while_for_each_event_file();
2571     +
2572     + __trace_remove_event_call(call);
2573     +
2574     + return 0;
2575     +}
2576     +
2577     /* Remove an event_call */
2578     -void trace_remove_event_call(struct ftrace_event_call *call)
2579     +int trace_remove_event_call(struct ftrace_event_call *call)
2580     {
2581     + int ret;
2582     +
2583     mutex_lock(&trace_types_lock);
2584     mutex_lock(&event_mutex);
2585     down_write(&trace_event_sem);
2586     - __trace_remove_event_call(call);
2587     + ret = probe_remove_event_call(call);
2588     up_write(&trace_event_sem);
2589     mutex_unlock(&event_mutex);
2590     mutex_unlock(&trace_types_lock);
2591     +
2592     + return ret;
2593     }
2594    
2595     #define for_each_event(event, start, end) \
2596     @@ -2301,12 +2347,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
2597     {
2598     struct ftrace_event_file *file, *next;
2599    
2600     - list_for_each_entry_safe(file, next, &tr->events, list) {
2601     - list_del(&file->list);
2602     - debugfs_remove_recursive(file->dir);
2603     - remove_subsystem(file->system);
2604     - kmem_cache_free(file_cachep, file);
2605     - }
2606     + list_for_each_entry_safe(file, next, &tr->events, list)
2607     + remove_event_file_dir(file);
2608     }
2609    
2610     static void
2611     diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
2612     index e1b653f..0a1edc6 100644
2613     --- a/kernel/trace/trace_events_filter.c
2614     +++ b/kernel/trace/trace_events_filter.c
2615     @@ -631,17 +631,15 @@ static void append_filter_err(struct filter_parse_state *ps,
2616     free_page((unsigned long) buf);
2617     }
2618    
2619     +/* caller must hold event_mutex */
2620     void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
2621     {
2622     - struct event_filter *filter;
2623     + struct event_filter *filter = call->filter;
2624    
2625     - mutex_lock(&event_mutex);
2626     - filter = call->filter;
2627     if (filter && filter->filter_string)
2628     trace_seq_printf(s, "%s\n", filter->filter_string);
2629     else
2630     trace_seq_printf(s, "none\n");
2631     - mutex_unlock(&event_mutex);
2632     }
2633    
2634     void print_subsystem_event_filter(struct event_subsystem *system,
2635     @@ -1835,23 +1833,22 @@ static int create_system_filter(struct event_subsystem *system,
2636     return err;
2637     }
2638    
2639     +/* caller must hold event_mutex */
2640     int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
2641     {
2642     struct event_filter *filter;
2643     - int err = 0;
2644     -
2645     - mutex_lock(&event_mutex);
2646     + int err;
2647    
2648     if (!strcmp(strstrip(filter_string), "0")) {
2649     filter_disable(call);
2650     filter = call->filter;
2651     if (!filter)
2652     - goto out_unlock;
2653     + return 0;
2654     RCU_INIT_POINTER(call->filter, NULL);
2655     /* Make sure the filter is not being used */
2656     synchronize_sched();
2657     __free_filter(filter);
2658     - goto out_unlock;
2659     + return 0;
2660     }
2661    
2662     err = create_filter(call, filter_string, true, &filter);
2663     @@ -1878,8 +1875,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
2664     __free_filter(tmp);
2665     }
2666     }
2667     -out_unlock:
2668     - mutex_unlock(&event_mutex);
2669    
2670     return err;
2671     }
2672     diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
2673     index 9f46e98..64abc8c 100644
2674     --- a/kernel/trace/trace_kprobe.c
2675     +++ b/kernel/trace/trace_kprobe.c
2676     @@ -90,7 +90,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
2677     }
2678    
2679     static int register_probe_event(struct trace_probe *tp);
2680     -static void unregister_probe_event(struct trace_probe *tp);
2681     +static int unregister_probe_event(struct trace_probe *tp);
2682    
2683     static DEFINE_MUTEX(probe_lock);
2684     static LIST_HEAD(probe_list);
2685     @@ -281,6 +281,8 @@ trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
2686     static int
2687     disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2688     {
2689     + struct ftrace_event_file **old = NULL;
2690     + int wait = 0;
2691     int ret = 0;
2692    
2693     mutex_lock(&probe_enable_lock);
2694     @@ -314,10 +316,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2695     }
2696    
2697     rcu_assign_pointer(tp->files, new);
2698     -
2699     - /* Make sure the probe is done with old files */
2700     - synchronize_sched();
2701     - kfree(old);
2702     + wait = 1;
2703     } else
2704     tp->flags &= ~TP_FLAG_PROFILE;
2705    
2706     @@ -326,11 +325,25 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2707     disable_kretprobe(&tp->rp);
2708     else
2709     disable_kprobe(&tp->rp.kp);
2710     + wait = 1;
2711     }
2712    
2713     out_unlock:
2714     mutex_unlock(&probe_enable_lock);
2715    
2716     + if (wait) {
2717     + /*
2718     + * Synchronize with kprobe_trace_func/kretprobe_trace_func
2719     + * to ensure disabled (all running handlers are finished).
2720     + * This is not only for kfree(), but also the caller,
2721     + * trace_remove_event_call() supposes it for releasing
2722     + * event_call related objects, which will be accessed in
2723     + * the kprobe_trace_func/kretprobe_trace_func.
2724     + */
2725     + synchronize_sched();
2726     + kfree(old); /* Ignored if link == NULL */
2727     + }
2728     +
2729     return ret;
2730     }
2731    
2732     @@ -398,9 +411,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
2733     if (trace_probe_is_enabled(tp))
2734     return -EBUSY;
2735    
2736     + /* Will fail if probe is being used by ftrace or perf */
2737     + if (unregister_probe_event(tp))
2738     + return -EBUSY;
2739     +
2740     __unregister_trace_probe(tp);
2741     list_del(&tp->list);
2742     - unregister_probe_event(tp);
2743    
2744     return 0;
2745     }
2746     @@ -679,7 +695,9 @@ static int release_all_trace_probes(void)
2747     /* TODO: Use batch unregistration */
2748     while (!list_empty(&probe_list)) {
2749     tp = list_entry(probe_list.next, struct trace_probe, list);
2750     - unregister_trace_probe(tp);
2751     + ret = unregister_trace_probe(tp);
2752     + if (ret)
2753     + goto end;
2754     free_trace_probe(tp);
2755     }
2756    
2757     @@ -1312,11 +1330,15 @@ static int register_probe_event(struct trace_probe *tp)
2758     return ret;
2759     }
2760    
2761     -static void unregister_probe_event(struct trace_probe *tp)
2762     +static int unregister_probe_event(struct trace_probe *tp)
2763     {
2764     + int ret;
2765     +
2766     /* tp->event is unregistered in trace_remove_event_call() */
2767     - trace_remove_event_call(&tp->call);
2768     - kfree(tp->call.print_fmt);
2769     + ret = trace_remove_event_call(&tp->call);
2770     + if (!ret)
2771     + kfree(tp->call.print_fmt);
2772     + return ret;
2773     }
2774    
2775     /* Make a debugfs interface for controlling probe points */
2776     diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2777     index d5d0cd3..6fd72b7 100644
2778     --- a/kernel/trace/trace_uprobe.c
2779     +++ b/kernel/trace/trace_uprobe.c
2780     @@ -70,7 +70,7 @@ struct trace_uprobe {
2781     (sizeof(struct probe_arg) * (n)))
2782    
2783     static int register_uprobe_event(struct trace_uprobe *tu);
2784     -static void unregister_uprobe_event(struct trace_uprobe *tu);
2785     +static int unregister_uprobe_event(struct trace_uprobe *tu);
2786    
2787     static DEFINE_MUTEX(uprobe_lock);
2788     static LIST_HEAD(uprobe_list);
2789     @@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
2790     }
2791    
2792     /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
2793     -static void unregister_trace_uprobe(struct trace_uprobe *tu)
2794     +static int unregister_trace_uprobe(struct trace_uprobe *tu)
2795     {
2796     + int ret;
2797     +
2798     + ret = unregister_uprobe_event(tu);
2799     + if (ret)
2800     + return ret;
2801     +
2802     list_del(&tu->list);
2803     - unregister_uprobe_event(tu);
2804     free_trace_uprobe(tu);
2805     + return 0;
2806     }
2807    
2808     /* Register a trace_uprobe and probe_event */
2809     @@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
2810    
2811     /* register as an event */
2812     old_tp = find_probe_event(tu->call.name, tu->call.class->system);
2813     - if (old_tp)
2814     + if (old_tp) {
2815     /* delete old event */
2816     - unregister_trace_uprobe(old_tp);
2817     + ret = unregister_trace_uprobe(old_tp);
2818     + if (ret)
2819     + goto end;
2820     + }
2821    
2822     ret = register_uprobe_event(tu);
2823     if (ret) {
2824     @@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
2825     group = UPROBE_EVENT_SYSTEM;
2826    
2827     if (is_delete) {
2828     + int ret;
2829     +
2830     if (!event) {
2831     pr_info("Delete command needs an event name.\n");
2832     return -EINVAL;
2833     @@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
2834     return -ENOENT;
2835     }
2836     /* delete an event */
2837     - unregister_trace_uprobe(tu);
2838     + ret = unregister_trace_uprobe(tu);
2839     mutex_unlock(&uprobe_lock);
2840     - return 0;
2841     + return ret;
2842     }
2843    
2844     if (argc < 2) {
2845     @@ -408,16 +419,20 @@ fail_address_parse:
2846     return ret;
2847     }
2848    
2849     -static void cleanup_all_probes(void)
2850     +static int cleanup_all_probes(void)
2851     {
2852     struct trace_uprobe *tu;
2853     + int ret = 0;
2854    
2855     mutex_lock(&uprobe_lock);
2856     while (!list_empty(&uprobe_list)) {
2857     tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
2858     - unregister_trace_uprobe(tu);
2859     + ret = unregister_trace_uprobe(tu);
2860     + if (ret)
2861     + break;
2862     }
2863     mutex_unlock(&uprobe_lock);
2864     + return ret;
2865     }
2866    
2867     /* Probes listing interfaces */
2868     @@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
2869    
2870     static int probes_open(struct inode *inode, struct file *file)
2871     {
2872     - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
2873     - cleanup_all_probes();
2874     + int ret;
2875     +
2876     + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2877     + ret = cleanup_all_probes();
2878     + if (ret)
2879     + return ret;
2880     + }
2881    
2882     return seq_open(file, &probes_seq_op);
2883     }
2884     @@ -970,12 +990,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
2885     return ret;
2886     }
2887    
2888     -static void unregister_uprobe_event(struct trace_uprobe *tu)
2889     +static int unregister_uprobe_event(struct trace_uprobe *tu)
2890     {
2891     + int ret;
2892     +
2893     /* tu->event is unregistered in trace_remove_event_call() */
2894     - trace_remove_event_call(&tu->call);
2895     + ret = trace_remove_event_call(&tu->call);
2896     + if (ret)
2897     + return ret;
2898     kfree(tu->call.print_fmt);
2899     tu->call.print_fmt = NULL;
2900     + return 0;
2901     }
2902    
2903     /* Make a trace interface for controling probe points */
2904     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2905     index 55a42f9..5b4328d 100644
2906     --- a/net/mac80211/mlme.c
2907     +++ b/net/mac80211/mlme.c
2908     @@ -31,10 +31,12 @@
2909     #include "led.h"
2910    
2911     #define IEEE80211_AUTH_TIMEOUT (HZ / 5)
2912     +#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
2913     #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
2914     #define IEEE80211_AUTH_MAX_TRIES 3
2915     #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
2916     #define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
2917     +#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
2918     #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
2919     #define IEEE80211_ASSOC_MAX_TRIES 3
2920    
2921     @@ -3470,10 +3472,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2922    
2923     if (tx_flags == 0) {
2924     auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
2925     - ifmgd->auth_data->timeout_started = true;
2926     + auth_data->timeout_started = true;
2927     run_again(ifmgd, auth_data->timeout);
2928     } else {
2929     - auth_data->timeout_started = false;
2930     + auth_data->timeout =
2931     + round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
2932     + auth_data->timeout_started = true;
2933     + run_again(ifmgd, auth_data->timeout);
2934     }
2935    
2936     return 0;
2937     @@ -3510,7 +3515,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2938     assoc_data->timeout_started = true;
2939     run_again(&sdata->u.mgd, assoc_data->timeout);
2940     } else {
2941     - assoc_data->timeout_started = false;
2942     + assoc_data->timeout =
2943     + round_jiffies_up(jiffies +
2944     + IEEE80211_ASSOC_TIMEOUT_LONG);
2945     + assoc_data->timeout_started = true;
2946     + run_again(&sdata->u.mgd, assoc_data->timeout);
2947     }
2948    
2949     return 0;
2950     diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
2951     index ff8c434..f924dd2 100644
2952     --- a/net/nfc/llcp.h
2953     +++ b/net/nfc/llcp.h
2954     @@ -19,6 +19,7 @@
2955    
2956     enum llcp_state {
2957     LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
2958     + LLCP_CONNECTING,
2959     LLCP_CLOSED,
2960     LLCP_BOUND,
2961     LLCP_LISTEN,
2962     diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
2963     index 380253e..7522c37 100644
2964     --- a/net/nfc/llcp_sock.c
2965     +++ b/net/nfc/llcp_sock.c
2966     @@ -571,7 +571,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
2967     if (sk->sk_shutdown == SHUTDOWN_MASK)
2968     mask |= POLLHUP;
2969    
2970     - if (sock_writeable(sk))
2971     + if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
2972     mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2973     else
2974     set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2975     @@ -722,14 +722,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
2976     if (ret)
2977     goto sock_unlink;
2978    
2979     + sk->sk_state = LLCP_CONNECTING;
2980     +
2981     ret = sock_wait_state(sk, LLCP_CONNECTED,
2982     sock_sndtimeo(sk, flags & O_NONBLOCK));
2983     - if (ret)
2984     + if (ret && ret != -EINPROGRESS)
2985     goto sock_unlink;
2986    
2987     release_sock(sk);
2988    
2989     - return 0;
2990     + return ret;
2991    
2992     sock_unlink:
2993     nfc_llcp_put_ssap(local, llcp_sock->ssap);