Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0120-4.1.21-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2777 - (hide annotations) (download)
Thu Apr 7 12:10:17 2016 UTC (8 years, 1 month ago) by niro
File size: 67012 byte(s)
-linux-4.1.21
1 niro 2777 diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
2     index 4f6a82cef1d1..cbe35b3de9e9 100644
3     --- a/Documentation/devicetree/bindings/arm/omap/omap.txt
4     +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
5     @@ -23,6 +23,7 @@ Optional properties:
6     during suspend.
7     - ti,no-reset-on-init: When present, the module should not be reset at init
8     - ti,no-idle-on-init: When present, the module should not be idled at init
9     +- ti,no-idle: When present, the module is never allowed to idle.
10    
11     Example:
12    
13     diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
14     index c59bd9bc41ef..4176ab076f1c 100644
15     --- a/Documentation/virtual/kvm/mmu.txt
16     +++ b/Documentation/virtual/kvm/mmu.txt
17     @@ -352,7 +352,8 @@ In the first case there are two additional complications:
18     - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
19     the kernel may now execute it. We handle this by also setting spte.nx.
20     If we get a user fetch or read fault, we'll change spte.u=1 and
21     - spte.nx=gpte.nx back.
22     + spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
23     + shadow paging is in use.
24     - if CR4.SMAP is disabled: since the page has been changed to a kernel
25     page, it can not be reused when CR4.SMAP is enabled. We set
26     CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
27     diff --git a/Makefile b/Makefile
28     index 39be1bbd373a..79fab0d55218 100644
29     --- a/Makefile
30     +++ b/Makefile
31     @@ -1,6 +1,6 @@
32     VERSION = 4
33     PATCHLEVEL = 1
34     -SUBLEVEL = 20
35     +SUBLEVEL = 21
36     EXTRAVERSION =
37     NAME = Series 4800
38    
39     diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
40     index dfcc0dd637e5..bc04b754fe36 100644
41     --- a/arch/arm/boot/dts/dra7.dtsi
42     +++ b/arch/arm/boot/dts/dra7.dtsi
43     @@ -1411,6 +1411,16 @@
44     0x48485200 0x2E00>;
45     #address-cells = <1>;
46     #size-cells = <1>;
47     +
48     + /*
49     + * Do not allow gating of cpsw clock as workaround
50     + * for errata i877. Keeping internal clock disabled
51     + * causes the device switching characteristics
52     + * to degrade over time and eventually fail to meet
53     + * the data manual delay time/skew specs.
54     + */
55     + ti,no-idle;
56     +
57     /*
58     * rx_thresh_pend
59     * rx_pend
60     diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
61     index 5286e7773ed4..9185bb958503 100644
62     --- a/arch/arm/mach-omap2/omap_hwmod.c
63     +++ b/arch/arm/mach-omap2/omap_hwmod.c
64     @@ -876,6 +876,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
65     return ret;
66     }
67    
68     +static void _enable_optional_clocks(struct omap_hwmod *oh)
69     +{
70     + struct omap_hwmod_opt_clk *oc;
71     + int i;
72     +
73     + pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
74     +
75     + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
76     + if (oc->_clk) {
77     + pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
78     + __clk_get_name(oc->_clk));
79     + clk_enable(oc->_clk);
80     + }
81     +}
82     +
83     +static void _disable_optional_clocks(struct omap_hwmod *oh)
84     +{
85     + struct omap_hwmod_opt_clk *oc;
86     + int i;
87     +
88     + pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
89     +
90     + for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
91     + if (oc->_clk) {
92     + pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
93     + __clk_get_name(oc->_clk));
94     + clk_disable(oc->_clk);
95     + }
96     +}
97     +
98     /**
99     * _enable_clocks - enable hwmod main clock and interface clocks
100     * @oh: struct omap_hwmod *
101     @@ -903,6 +933,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
102     clk_enable(os->_clk);
103     }
104    
105     + if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
106     + _enable_optional_clocks(oh);
107     +
108     /* The opt clocks are controlled by the device driver. */
109    
110     return 0;
111     @@ -934,41 +967,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
112     clk_disable(os->_clk);
113     }
114    
115     + if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
116     + _disable_optional_clocks(oh);
117     +
118     /* The opt clocks are controlled by the device driver. */
119    
120     return 0;
121     }
122    
123     -static void _enable_optional_clocks(struct omap_hwmod *oh)
124     -{
125     - struct omap_hwmod_opt_clk *oc;
126     - int i;
127     -
128     - pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
129     -
130     - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
131     - if (oc->_clk) {
132     - pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
133     - __clk_get_name(oc->_clk));
134     - clk_enable(oc->_clk);
135     - }
136     -}
137     -
138     -static void _disable_optional_clocks(struct omap_hwmod *oh)
139     -{
140     - struct omap_hwmod_opt_clk *oc;
141     - int i;
142     -
143     - pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
144     -
145     - for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
146     - if (oc->_clk) {
147     - pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
148     - __clk_get_name(oc->_clk));
149     - clk_disable(oc->_clk);
150     - }
151     -}
152     -
153     /**
154     * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
155     * @oh: struct omap_hwmod *
156     @@ -2180,6 +2186,11 @@ static int _enable(struct omap_hwmod *oh)
157     */
158     static int _idle(struct omap_hwmod *oh)
159     {
160     + if (oh->flags & HWMOD_NO_IDLE) {
161     + oh->_int_flags |= _HWMOD_SKIP_ENABLE;
162     + return 0;
163     + }
164     +
165     pr_debug("omap_hwmod: %s: idling\n", oh->name);
166    
167     if (oh->_state != _HWMOD_STATE_ENABLED) {
168     @@ -2484,6 +2495,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
169     oh->flags |= HWMOD_INIT_NO_RESET;
170     if (of_find_property(np, "ti,no-idle-on-init", NULL))
171     oh->flags |= HWMOD_INIT_NO_IDLE;
172     + if (of_find_property(np, "ti,no-idle", NULL))
173     + oh->flags |= HWMOD_NO_IDLE;
174     }
175    
176     oh->_state = _HWMOD_STATE_INITIALIZED;
177     @@ -2610,7 +2623,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
178     * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
179     * it should be set by the core code as a runtime flag during startup
180     */
181     - if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
182     + if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
183     (postsetup_state == _HWMOD_STATE_IDLE)) {
184     oh->_int_flags |= _HWMOD_SKIP_ENABLE;
185     postsetup_state = _HWMOD_STATE_ENABLED;
186     diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
187     index 9611c91d9b82..ec289c5f099a 100644
188     --- a/arch/arm/mach-omap2/omap_hwmod.h
189     +++ b/arch/arm/mach-omap2/omap_hwmod.h
190     @@ -517,6 +517,10 @@ struct omap_hwmod_omap4_prcm {
191     * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up
192     * events by calling _reconfigure_io_chain() when a device is enabled
193     * or idled.
194     + * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
195     + * operate and they need to be handled at the same time as the main_clk.
196     + * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
197     + * IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
198     */
199     #define HWMOD_SWSUP_SIDLE (1 << 0)
200     #define HWMOD_SWSUP_MSTANDBY (1 << 1)
201     @@ -532,6 +536,8 @@ struct omap_hwmod_omap4_prcm {
202     #define HWMOD_FORCE_MSTANDBY (1 << 11)
203     #define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
204     #define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
205     +#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
206     +#define HWMOD_NO_IDLE (1 << 15)
207    
208     /*
209     * omap_hwmod._int_flags definitions
210     diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
211     index b056369fd47d..0d1d675f2cce 100644
212     --- a/arch/arm64/kernel/debug-monitors.c
213     +++ b/arch/arm64/kernel/debug-monitors.c
214     @@ -184,20 +184,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
215    
216     /* EL1 Single Step Handler hooks */
217     static LIST_HEAD(step_hook);
218     -static DEFINE_RWLOCK(step_hook_lock);
219     +static DEFINE_SPINLOCK(step_hook_lock);
220    
221     void register_step_hook(struct step_hook *hook)
222     {
223     - write_lock(&step_hook_lock);
224     - list_add(&hook->node, &step_hook);
225     - write_unlock(&step_hook_lock);
226     + spin_lock(&step_hook_lock);
227     + list_add_rcu(&hook->node, &step_hook);
228     + spin_unlock(&step_hook_lock);
229     }
230    
231     void unregister_step_hook(struct step_hook *hook)
232     {
233     - write_lock(&step_hook_lock);
234     - list_del(&hook->node);
235     - write_unlock(&step_hook_lock);
236     + spin_lock(&step_hook_lock);
237     + list_del_rcu(&hook->node);
238     + spin_unlock(&step_hook_lock);
239     + synchronize_rcu();
240     }
241    
242     /*
243     @@ -211,15 +212,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
244     struct step_hook *hook;
245     int retval = DBG_HOOK_ERROR;
246    
247     - read_lock(&step_hook_lock);
248     + rcu_read_lock();
249    
250     - list_for_each_entry(hook, &step_hook, node) {
251     + list_for_each_entry_rcu(hook, &step_hook, node) {
252     retval = hook->fn(regs, esr);
253     if (retval == DBG_HOOK_HANDLED)
254     break;
255     }
256    
257     - read_unlock(&step_hook_lock);
258     + rcu_read_unlock();
259    
260     return retval;
261     }
262     @@ -271,20 +272,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
263     * Use reader/writer locks instead of plain spinlock.
264     */
265     static LIST_HEAD(break_hook);
266     -static DEFINE_RWLOCK(break_hook_lock);
267     +static DEFINE_SPINLOCK(break_hook_lock);
268    
269     void register_break_hook(struct break_hook *hook)
270     {
271     - write_lock(&break_hook_lock);
272     - list_add(&hook->node, &break_hook);
273     - write_unlock(&break_hook_lock);
274     + spin_lock(&break_hook_lock);
275     + list_add_rcu(&hook->node, &break_hook);
276     + spin_unlock(&break_hook_lock);
277     }
278    
279     void unregister_break_hook(struct break_hook *hook)
280     {
281     - write_lock(&break_hook_lock);
282     - list_del(&hook->node);
283     - write_unlock(&break_hook_lock);
284     + spin_lock(&break_hook_lock);
285     + list_del_rcu(&hook->node);
286     + spin_unlock(&break_hook_lock);
287     + synchronize_rcu();
288     }
289    
290     static int call_break_hook(struct pt_regs *regs, unsigned int esr)
291     @@ -292,11 +294,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
292     struct break_hook *hook;
293     int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
294    
295     - read_lock(&break_hook_lock);
296     - list_for_each_entry(hook, &break_hook, node)
297     + rcu_read_lock();
298     + list_for_each_entry_rcu(hook, &break_hook, node)
299     if ((esr & hook->esr_mask) == hook->esr_val)
300     fn = hook->fn;
301     - read_unlock(&break_hook_lock);
302     + rcu_read_unlock();
303    
304     return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
305     }
306     diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
307     index a3b1ffe50aa0..c99e8a32bea4 100644
308     --- a/arch/mips/Kconfig
309     +++ b/arch/mips/Kconfig
310     @@ -2103,11 +2103,11 @@ config CPU_R4K_CACHE_TLB
311    
312     config MIPS_MT_SMP
313     bool "MIPS MT SMP support (1 TC on each available VPE)"
314     - depends on SYS_SUPPORTS_MULTITHREADING
315     + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
316     select CPU_MIPSR2_IRQ_VI
317     select CPU_MIPSR2_IRQ_EI
318     select SYNC_R4K
319     - select MIPS_GIC_IPI
320     + select MIPS_GIC_IPI if MIPS_GIC
321     select MIPS_MT
322     select SMP
323     select SMP_UP
324     @@ -2204,8 +2204,8 @@ config MIPS_VPE_APSP_API_MT
325    
326     config MIPS_CMP
327     bool "MIPS CMP framework support (DEPRECATED)"
328     - depends on SYS_SUPPORTS_MIPS_CMP
329     - select MIPS_GIC_IPI
330     + depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
331     + select MIPS_GIC_IPI if MIPS_GIC
332     select SMP
333     select SYNC_R4K
334     select SYS_SUPPORTS_SMP
335     @@ -2221,11 +2221,11 @@ config MIPS_CMP
336    
337     config MIPS_CPS
338     bool "MIPS Coherent Processing System support"
339     - depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
340     + depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
341     select MIPS_CM
342     select MIPS_CPC
343     select MIPS_CPS_PM if HOTPLUG_CPU
344     - select MIPS_GIC_IPI
345     + select MIPS_GIC_IPI if MIPS_GIC
346     select SMP
347     select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
348     select SYS_SUPPORTS_HOTPLUG_CPU
349     @@ -2244,6 +2244,7 @@ config MIPS_CPS_PM
350     bool
351    
352     config MIPS_GIC_IPI
353     + depends on MIPS_GIC
354     bool
355    
356     config MIPS_CM
357     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
358     index f8338e6d3dd7..a34e43eec658 100644
359     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
360     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
361     @@ -1273,6 +1273,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
362     std r6, VCPU_ACOP(r9)
363     stw r7, VCPU_GUEST_PID(r9)
364     std r8, VCPU_WORT(r9)
365     + /*
366     + * Restore various registers to 0, where non-zero values
367     + * set by the guest could disrupt the host.
368     + */
369     + li r0, 0
370     + mtspr SPRN_IAMR, r0
371     + mtspr SPRN_CIABR, r0
372     + mtspr SPRN_DAWRX, r0
373     + mtspr SPRN_TCSCR, r0
374     + mtspr SPRN_WORT, r0
375     + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
376     + li r0, 1
377     + sldi r0, r0, 31
378     + mtspr SPRN_MMCRS, r0
379     8:
380    
381     /* Save and reset AMR and UAMOR before turning on the MMU */
382     diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
383     index fb1b93ea3e3f..e485817f7b1a 100644
384     --- a/arch/s390/include/asm/mmu_context.h
385     +++ b/arch/s390/include/asm/mmu_context.h
386     @@ -15,17 +15,25 @@
387     static inline int init_new_context(struct task_struct *tsk,
388     struct mm_struct *mm)
389     {
390     + spin_lock_init(&mm->context.list_lock);
391     + INIT_LIST_HEAD(&mm->context.pgtable_list);
392     + INIT_LIST_HEAD(&mm->context.gmap_list);
393     cpumask_clear(&mm->context.cpu_attach_mask);
394     atomic_set(&mm->context.attach_count, 0);
395     mm->context.flush_mm = 0;
396     - mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
397     - mm->context.asce_bits |= _ASCE_TYPE_REGION3;
398     #ifdef CONFIG_PGSTE
399     mm->context.alloc_pgste = page_table_allocate_pgste;
400     mm->context.has_pgste = 0;
401     mm->context.use_skey = 0;
402     #endif
403     - mm->context.asce_limit = STACK_TOP_MAX;
404     + if (mm->context.asce_limit == 0) {
405     + /* context created by exec, set asce limit to 4TB */
406     + mm->context.asce_bits = _ASCE_TABLE_LENGTH |
407     + _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
408     + mm->context.asce_limit = STACK_TOP_MAX;
409     + } else if (mm->context.asce_limit == (1UL << 31)) {
410     + mm_inc_nr_pmds(mm);
411     + }
412     crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
413     return 0;
414     }
415     @@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
416     static inline void arch_dup_mmap(struct mm_struct *oldmm,
417     struct mm_struct *mm)
418     {
419     - if (oldmm->context.asce_limit < mm->context.asce_limit)
420     - crst_table_downgrade(mm, oldmm->context.asce_limit);
421     }
422    
423     static inline void arch_exit_mmap(struct mm_struct *mm)
424     diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
425     index 7b7858f158b4..d7cc79fb6191 100644
426     --- a/arch/s390/include/asm/pgalloc.h
427     +++ b/arch/s390/include/asm/pgalloc.h
428     @@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
429    
430     static inline pgd_t *pgd_alloc(struct mm_struct *mm)
431     {
432     - spin_lock_init(&mm->context.list_lock);
433     - INIT_LIST_HEAD(&mm->context.pgtable_list);
434     - INIT_LIST_HEAD(&mm->context.gmap_list);
435     - return (pgd_t *) crst_table_alloc(mm);
436     + unsigned long *table = crst_table_alloc(mm);
437     +
438     + if (!table)
439     + return NULL;
440     + if (mm->context.asce_limit == (1UL << 31)) {
441     + /* Forking a compat process with 2 page table levels */
442     + if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
443     + crst_table_free(mm, table);
444     + return NULL;
445     + }
446     + }
447     + return (pgd_t *) table;
448     +}
449     +
450     +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
451     +{
452     + if (mm->context.asce_limit == (1UL << 31))
453     + pgtable_pmd_page_dtor(virt_to_page(pgd));
454     + crst_table_free(mm, (unsigned long *) pgd);
455     }
456     -#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
457    
458     static inline void pmd_populate(struct mm_struct *mm,
459     pmd_t *pmd, pgtable_t pte)
460     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
461     index 945f9e13f1aa..917148620f49 100644
462     --- a/arch/x86/kvm/vmx.c
463     +++ b/arch/x86/kvm/vmx.c
464     @@ -1674,6 +1674,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
465     return;
466     }
467     break;
468     + case MSR_IA32_PEBS_ENABLE:
469     + /* PEBS needs a quiescent period after being disabled (to write
470     + * a record). Disabling PEBS through VMX MSR swapping doesn't
471     + * provide that period, so a CPU could write host's record into
472     + * guest's memory.
473     + */
474     + wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
475     }
476    
477     for (i = 0; i < m->nr; ++i)
478     @@ -1711,26 +1718,31 @@ static void reload_tss(void)
479    
480     static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
481     {
482     - u64 guest_efer;
483     - u64 ignore_bits;
484     + u64 guest_efer = vmx->vcpu.arch.efer;
485     + u64 ignore_bits = 0;
486    
487     - guest_efer = vmx->vcpu.arch.efer;
488     + if (!enable_ept) {
489     + /*
490     + * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
491     + * host CPUID is more efficient than testing guest CPUID
492     + * or CR4. Host SMEP is anyway a requirement for guest SMEP.
493     + */
494     + if (boot_cpu_has(X86_FEATURE_SMEP))
495     + guest_efer |= EFER_NX;
496     + else if (!(guest_efer & EFER_NX))
497     + ignore_bits |= EFER_NX;
498     + }
499    
500     /*
501     - * NX is emulated; LMA and LME handled by hardware; SCE meaningless
502     - * outside long mode
503     + * LMA and LME handled by hardware; SCE meaningless outside long mode.
504     */
505     - ignore_bits = EFER_NX | EFER_SCE;
506     + ignore_bits |= EFER_SCE;
507     #ifdef CONFIG_X86_64
508     ignore_bits |= EFER_LMA | EFER_LME;
509     /* SCE is meaningful only in long mode on Intel */
510     if (guest_efer & EFER_LMA)
511     ignore_bits &= ~(u64)EFER_SCE;
512     #endif
513     - guest_efer &= ~ignore_bits;
514     - guest_efer |= host_efer & ignore_bits;
515     - vmx->guest_msrs[efer_offset].data = guest_efer;
516     - vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
517    
518     clear_atomic_switch_msr(vmx, MSR_EFER);
519    
520     @@ -1741,16 +1753,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
521     */
522     if (cpu_has_load_ia32_efer ||
523     (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
524     - guest_efer = vmx->vcpu.arch.efer;
525     if (!(guest_efer & EFER_LMA))
526     guest_efer &= ~EFER_LME;
527     if (guest_efer != host_efer)
528     add_atomic_switch_msr(vmx, MSR_EFER,
529     guest_efer, host_efer);
530     return false;
531     - }
532     + } else {
533     + guest_efer &= ~ignore_bits;
534     + guest_efer |= host_efer & ignore_bits;
535    
536     - return true;
537     + vmx->guest_msrs[efer_offset].data = guest_efer;
538     + vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
539     +
540     + return true;
541     + }
542     }
543    
544     static unsigned long segment_base(u16 selector)
545     diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
546     index 8d8c35623f2a..ffa809f30b19 100644
547     --- a/drivers/dma/at_xdmac.c
548     +++ b/drivers/dma/at_xdmac.c
549     @@ -176,6 +176,7 @@
550     #define AT_XDMAC_MAX_CHAN 0x20
551     #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
552     #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
553     +#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
554    
555     #define AT_XDMAC_DMA_BUSWIDTHS\
556     (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
557     @@ -925,8 +926,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
558     struct at_xdmac_desc *desc, *_desc;
559     struct list_head *descs_list;
560     enum dma_status ret;
561     - int residue;
562     - u32 cur_nda, mask, value;
563     + int residue, retry;
564     + u32 cur_nda, check_nda, cur_ubc, mask, value;
565     u8 dwidth = 0;
566     unsigned long flags;
567    
568     @@ -963,7 +964,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
569     cpu_relax();
570     }
571    
572     + /*
573     + * When processing the residue, we need to read two registers but we
574     + * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
575     + * we stand in the descriptor list and AT_XDMAC_CUBC is used
576     + * to know how many data are remaining for the current descriptor.
577     + * Since the dma channel is not paused to not loose data, between the
578     + * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
579     + * descriptor.
580     + * For that reason, after reading AT_XDMAC_CUBC, we check if we are
581     + * still using the same descriptor by reading a second time
582     + * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
583     + * read again AT_XDMAC_CUBC.
584     + * Memory barriers are used to ensure the read order of the registers.
585     + * A max number of retries is set because unlikely it can never ends if
586     + * we are transferring a lot of data with small buffers.
587     + */
588     cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
589     + rmb();
590     + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
591     + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
592     + rmb();
593     + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
594     +
595     + if (likely(cur_nda == check_nda))
596     + break;
597     +
598     + cur_nda = check_nda;
599     + rmb();
600     + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
601     + }
602     +
603     + if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
604     + ret = DMA_ERROR;
605     + goto spin_unlock;
606     + }
607     +
608     /*
609     * Remove size of all microblocks already transferred and the current
610     * one. Then add the remaining size to transfer of the current
611     @@ -976,7 +1012,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
612     if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
613     break;
614     }
615     - residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
616     + residue += cur_ubc << dwidth;
617    
618     dma_set_residue(txstate, residue);
619    
620     diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
621     index 447dbfa6c793..7ac42d063574 100644
622     --- a/drivers/gpu/drm/radeon/atombios_dp.c
623     +++ b/drivers/gpu/drm/radeon/atombios_dp.c
624     @@ -254,7 +254,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
625     #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
626     #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
627    
628     -static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
629     +static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
630     int lane_count,
631     u8 train_set[4])
632     {
633     @@ -301,77 +301,43 @@ static int convert_bpc_to_bpp(int bpc)
634     return bpc * 3;
635     }
636    
637     -/* get the max pix clock supported by the link rate and lane num */
638     -static int dp_get_max_dp_pix_clock(int link_rate,
639     - int lane_num,
640     - int bpp)
641     -{
642     - return (link_rate * lane_num * 8) / bpp;
643     -}
644     -
645     /***** radeon specific DP functions *****/
646    
647     -int radeon_dp_get_max_link_rate(struct drm_connector *connector,
648     - u8 dpcd[DP_DPCD_SIZE])
649     -{
650     - int max_link_rate;
651     -
652     - if (radeon_connector_is_dp12_capable(connector))
653     - max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
654     - else
655     - max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
656     -
657     - return max_link_rate;
658     -}
659     -
660     -/* First get the min lane# when low rate is used according to pixel clock
661     - * (prefer low rate), second check max lane# supported by DP panel,
662     - * if the max lane# < low rate lane# then use max lane# instead.
663     - */
664     -static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
665     - u8 dpcd[DP_DPCD_SIZE],
666     - int pix_clock)
667     +int radeon_dp_get_dp_link_config(struct drm_connector *connector,
668     + const u8 dpcd[DP_DPCD_SIZE],
669     + unsigned pix_clock,
670     + unsigned *dp_lanes, unsigned *dp_rate)
671     {
672     int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
673     - int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
674     - int max_lane_num = drm_dp_max_lane_count(dpcd);
675     - int lane_num;
676     - int max_dp_pix_clock;
677     -
678     - for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
679     - max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
680     - if (pix_clock <= max_dp_pix_clock)
681     - break;
682     - }
683     -
684     - return lane_num;
685     -}
686     -
687     -static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
688     - u8 dpcd[DP_DPCD_SIZE],
689     - int pix_clock)
690     -{
691     - int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
692     - int lane_num, max_pix_clock;
693     + static const unsigned link_rates[3] = { 162000, 270000, 540000 };
694     + unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
695     + unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
696     + unsigned lane_num, i, max_pix_clock;
697    
698     if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
699     - ENCODER_OBJECT_ID_NUTMEG)
700     - return 270000;
701     -
702     - lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
703     - max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
704     - if (pix_clock <= max_pix_clock)
705     - return 162000;
706     - max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
707     - if (pix_clock <= max_pix_clock)
708     - return 270000;
709     - if (radeon_connector_is_dp12_capable(connector)) {
710     - max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
711     - if (pix_clock <= max_pix_clock)
712     - return 540000;
713     + ENCODER_OBJECT_ID_NUTMEG) {
714     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
715     + max_pix_clock = (lane_num * 270000 * 8) / bpp;
716     + if (max_pix_clock >= pix_clock) {
717     + *dp_lanes = lane_num;
718     + *dp_rate = 270000;
719     + return 0;
720     + }
721     + }
722     + } else {
723     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
724     + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
725     + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
726     + if (max_pix_clock >= pix_clock) {
727     + *dp_lanes = lane_num;
728     + *dp_rate = link_rates[i];
729     + return 0;
730     + }
731     + }
732     + }
733     }
734    
735     - return radeon_dp_get_max_link_rate(connector, dpcd);
736     + return -EINVAL;
737     }
738    
739     static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
740     @@ -490,6 +456,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
741     {
742     struct radeon_connector *radeon_connector = to_radeon_connector(connector);
743     struct radeon_connector_atom_dig *dig_connector;
744     + int ret;
745    
746     if (!radeon_connector->con_priv)
747     return;
748     @@ -497,10 +464,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
749    
750     if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
751     (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
752     - dig_connector->dp_clock =
753     - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
754     - dig_connector->dp_lane_count =
755     - radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
756     + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
757     + mode->clock,
758     + &dig_connector->dp_lane_count,
759     + &dig_connector->dp_clock);
760     + if (ret) {
761     + dig_connector->dp_clock = 0;
762     + dig_connector->dp_lane_count = 0;
763     + }
764     }
765     }
766    
767     @@ -509,7 +480,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
768     {
769     struct radeon_connector *radeon_connector = to_radeon_connector(connector);
770     struct radeon_connector_atom_dig *dig_connector;
771     - int dp_clock;
772     + unsigned dp_clock, dp_lanes;
773     + int ret;
774    
775     if ((mode->clock > 340000) &&
776     (!radeon_connector_is_dp12_capable(connector)))
777     @@ -519,8 +491,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
778     return MODE_CLOCK_HIGH;
779     dig_connector = radeon_connector->con_priv;
780    
781     - dp_clock =
782     - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
783     + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
784     + mode->clock,
785     + &dp_lanes,
786     + &dp_clock);
787     + if (ret)
788     + return MODE_CLOCK_HIGH;
789    
790     if ((dp_clock == 540000) &&
791     (!radeon_connector_is_dp12_capable(connector)))
792     diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
793     index c9ff4cf4c4e7..c4b4c0233937 100644
794     --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
795     +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
796     @@ -520,11 +520,17 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
797     drm_mode_set_crtcinfo(adjusted_mode, 0);
798     {
799     struct radeon_connector_atom_dig *dig_connector;
800     + int ret;
801    
802     dig_connector = mst_enc->connector->con_priv;
803     - dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
804     - dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
805     - dig_connector->dpcd);
806     + ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
807     + dig_connector->dpcd, adjusted_mode->clock,
808     + &dig_connector->dp_lane_count,
809     + &dig_connector->dp_clock);
810     + if (ret) {
811     + dig_connector->dp_lane_count = 0;
812     + dig_connector->dp_clock = 0;
813     + }
814     DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
815     dig_connector->dp_lane_count, dig_connector->dp_clock);
816     }
817     diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
818     index 9af2d8398e90..43ba333949c7 100644
819     --- a/drivers/gpu/drm/radeon/radeon_mode.h
820     +++ b/drivers/gpu/drm/radeon/radeon_mode.h
821     @@ -752,8 +752,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
822     extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
823     extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
824     struct drm_connector *connector);
825     -int radeon_dp_get_max_link_rate(struct drm_connector *connector,
826     - u8 *dpcd);
827     +extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
828     + const u8 *dpcd,
829     + unsigned pix_clock,
830     + unsigned *dp_lanes, unsigned *dp_rate);
831     extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
832     u8 power_state);
833     extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
834     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
835     index a56eab7f0ab1..8319eed613b0 100644
836     --- a/drivers/gpu/drm/radeon/radeon_pm.c
837     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
838     @@ -1079,6 +1079,8 @@ force:
839    
840     /* update display watermarks based on new power state */
841     radeon_bandwidth_update(rdev);
842     + /* update displays */
843     + radeon_dpm_display_configuration_changed(rdev);
844    
845     /* wait for the rings to drain */
846     for (i = 0; i < RADEON_NUM_RINGS; i++) {
847     @@ -1095,9 +1097,6 @@ force:
848    
849     radeon_dpm_post_set_power_state(rdev);
850    
851     - /* update displays */
852     - radeon_dpm_display_configuration_changed(rdev);
853     -
854     rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
855     rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
856     rdev->pm.dpm.single_display = single_display;
857     diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
858     index 8b4d3e6875eb..21924f52863f 100644
859     --- a/drivers/net/can/usb/gs_usb.c
860     +++ b/drivers/net/can/usb/gs_usb.c
861     @@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
862     static void gs_destroy_candev(struct gs_can *dev)
863     {
864     unregister_candev(dev->netdev);
865     - free_candev(dev->netdev);
866     usb_kill_anchored_urbs(&dev->tx_submitted);
867     - kfree(dev);
868     + free_candev(dev->netdev);
869     }
870    
871     static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
872     @@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
873     for (i = 0; i < icount; i++) {
874     dev->canch[i] = gs_make_candev(i, intf);
875     if (IS_ERR_OR_NULL(dev->canch[i])) {
876     + /* save error code to return later */
877     + rc = PTR_ERR(dev->canch[i]);
878     +
879     /* on failure destroy previously created candevs */
880     icount = i;
881     - for (i = 0; i < icount; i++) {
882     + for (i = 0; i < icount; i++)
883     gs_destroy_candev(dev->canch[i]);
884     - dev->canch[i] = NULL;
885     - }
886     +
887     + usb_kill_anchored_urbs(&dev->rx_submitted);
888     kfree(dev);
889     return rc;
890     }
891     @@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
892     return;
893     }
894    
895     - for (i = 0; i < GS_MAX_INTF; i++) {
896     - struct gs_can *can = dev->canch[i];
897     -
898     - if (!can)
899     - continue;
900     -
901     - gs_destroy_candev(can);
902     - }
903     + for (i = 0; i < GS_MAX_INTF; i++)
904     + if (dev->canch[i])
905     + gs_destroy_candev(dev->canch[i]);
906    
907     usb_kill_anchored_urbs(&dev->rx_submitted);
908     + kfree(dev);
909     }
910    
911     static const struct usb_device_id gs_usb_table[] = {
912     diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
913     index 281451c274ca..771097f2162d 100644
914     --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
915     +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
916     @@ -370,6 +370,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
917     return -1;
918     }
919    
920     + /*
921     + * Increase the pending frames counter, so that later when a reply comes
922     + * in and the counter is decreased - we don't start getting negative
923     + * values.
924     + * Note that we don't need to make sure it isn't agg'd, since we're
925     + * TXing non-sta
926     + */
927     + atomic_inc(&mvm->pending_frames[sta_id]);
928     +
929     return 0;
930     }
931    
932     diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
933     index ad48837ead42..eed7c5a31b15 100644
934     --- a/drivers/target/target_core_tmr.c
935     +++ b/drivers/target/target_core_tmr.c
936     @@ -181,7 +181,6 @@ void core_tmr_abort_task(
937    
938     if (!__target_check_io_state(se_cmd, se_sess, 0)) {
939     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
940     - target_put_sess_cmd(se_cmd);
941     goto out;
942     }
943    
944     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
945     index 9a83f149ac85..95dfff88de11 100644
946     --- a/fs/ext4/ext4.h
947     +++ b/fs/ext4/ext4.h
948     @@ -873,6 +873,15 @@ struct ext4_inode_info {
949     * by other means, so we have i_data_sem.
950     */
951     struct rw_semaphore i_data_sem;
952     + /*
953     + * i_mmap_sem is for serializing page faults with truncate / punch hole
954     + * operations. We have to make sure that new page cannot be faulted in
955     + * a section of the inode that is being punched. We cannot easily use
956     + * i_data_sem for this since we need protection for the whole punch
957     + * operation and i_data_sem ranks below transaction start so we have
958     + * to occasionally drop it.
959     + */
960     + struct rw_semaphore i_mmap_sem;
961     struct inode vfs_inode;
962     struct jbd2_inode *jinode;
963    
964     @@ -2287,6 +2296,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
965     extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
966     loff_t lstart, loff_t lend);
967     extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
968     +extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
969     extern qsize_t *ext4_get_reserved_space(struct inode *inode);
970     extern void ext4_da_update_reserve_space(struct inode *inode,
971     int used, int quota_claim);
972     @@ -2632,6 +2642,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
973     return changed;
974     }
975    
976     +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
977     + loff_t len);
978     +
979     struct ext4_group_info {
980     unsigned long bb_state;
981     struct rb_root bb_free_root;
982     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
983     index 87ba10d1d3bc..ea12f565be24 100644
984     --- a/fs/ext4/extents.c
985     +++ b/fs/ext4/extents.c
986     @@ -4741,7 +4741,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
987     int partial_begin, partial_end;
988     loff_t start, end;
989     ext4_lblk_t lblk;
990     - struct address_space *mapping = inode->i_mapping;
991     unsigned int blkbits = inode->i_blkbits;
992    
993     trace_ext4_zero_range(inode, offset, len, mode);
994     @@ -4757,17 +4756,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
995     }
996    
997     /*
998     - * Write out all dirty pages to avoid race conditions
999     - * Then release them.
1000     - */
1001     - if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
1002     - ret = filemap_write_and_wait_range(mapping, offset,
1003     - offset + len - 1);
1004     - if (ret)
1005     - return ret;
1006     - }
1007     -
1008     - /*
1009     * Round up offset. This is not fallocate, we neet to zero out
1010     * blocks, so convert interior block aligned part of the range to
1011     * unwritten and possibly manually zero out unaligned parts of the
1012     @@ -4810,6 +4798,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
1013     if (mode & FALLOC_FL_KEEP_SIZE)
1014     flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
1015    
1016     + /* Wait all existing dio workers, newcomers will block on i_mutex */
1017     + ext4_inode_block_unlocked_dio(inode);
1018     + inode_dio_wait(inode);
1019     +
1020     /* Preallocate the range including the unaligned edges */
1021     if (partial_begin || partial_end) {
1022     ret = ext4_alloc_file_blocks(file,
1023     @@ -4818,7 +4810,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
1024     round_down(offset, 1 << blkbits)) >> blkbits,
1025     new_size, flags, mode);
1026     if (ret)
1027     - goto out_mutex;
1028     + goto out_dio;
1029    
1030     }
1031    
1032     @@ -4827,16 +4819,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
1033     flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
1034     EXT4_EX_NOCACHE);
1035    
1036     - /* Now release the pages and zero block aligned part of pages*/
1037     + /*
1038     + * Prevent page faults from reinstantiating pages we have
1039     + * released from page cache.
1040     + */
1041     + down_write(&EXT4_I(inode)->i_mmap_sem);
1042     + ret = ext4_update_disksize_before_punch(inode, offset, len);
1043     + if (ret) {
1044     + up_write(&EXT4_I(inode)->i_mmap_sem);
1045     + goto out_dio;
1046     + }
1047     + /* Now release the pages and zero block aligned part of pages */
1048     truncate_pagecache_range(inode, start, end - 1);
1049     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
1050    
1051     - /* Wait all existing dio workers, newcomers will block on i_mutex */
1052     - ext4_inode_block_unlocked_dio(inode);
1053     - inode_dio_wait(inode);
1054     -
1055     ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
1056     flags, mode);
1057     + up_write(&EXT4_I(inode)->i_mmap_sem);
1058     if (ret)
1059     goto out_dio;
1060     }
1061     @@ -4964,8 +4963,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1062     goto out;
1063     }
1064    
1065     + /* Wait all existing dio workers, newcomers will block on i_mutex */
1066     + ext4_inode_block_unlocked_dio(inode);
1067     + inode_dio_wait(inode);
1068     +
1069     ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
1070     flags, mode);
1071     + ext4_inode_resume_unlocked_dio(inode);
1072     if (ret)
1073     goto out;
1074    
1075     @@ -5424,21 +5428,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1076     return ret;
1077     }
1078    
1079     - /*
1080     - * Need to round down offset to be aligned with page size boundary
1081     - * for page size > block size.
1082     - */
1083     - ioffset = round_down(offset, PAGE_SIZE);
1084     -
1085     - /* Write out all dirty pages */
1086     - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
1087     - LLONG_MAX);
1088     - if (ret)
1089     - return ret;
1090     -
1091     - /* Take mutex lock */
1092     mutex_lock(&inode->i_mutex);
1093     -
1094     /*
1095     * There is no need to overlap collapse range with EOF, in which case
1096     * it is effectively a truncate operation
1097     @@ -5454,17 +5444,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1098     goto out_mutex;
1099     }
1100    
1101     - truncate_pagecache(inode, ioffset);
1102     -
1103     /* Wait for existing dio to complete */
1104     ext4_inode_block_unlocked_dio(inode);
1105     inode_dio_wait(inode);
1106    
1107     + /*
1108     + * Prevent page faults from reinstantiating pages we have released from
1109     + * page cache.
1110     + */
1111     + down_write(&EXT4_I(inode)->i_mmap_sem);
1112     + /*
1113     + * Need to round down offset to be aligned with page size boundary
1114     + * for page size > block size.
1115     + */
1116     + ioffset = round_down(offset, PAGE_SIZE);
1117     + /*
1118     + * Write tail of the last page before removed range since it will get
1119     + * removed from the page cache below.
1120     + */
1121     + ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
1122     + if (ret)
1123     + goto out_mmap;
1124     + /*
1125     + * Write data that will be shifted to preserve them when discarding
1126     + * page cache below. We are also protected from pages becoming dirty
1127     + * by i_mmap_sem.
1128     + */
1129     + ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
1130     + LLONG_MAX);
1131     + if (ret)
1132     + goto out_mmap;
1133     + truncate_pagecache(inode, ioffset);
1134     +
1135     credits = ext4_writepage_trans_blocks(inode);
1136     handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
1137     if (IS_ERR(handle)) {
1138     ret = PTR_ERR(handle);
1139     - goto out_dio;
1140     + goto out_mmap;
1141     }
1142    
1143     down_write(&EXT4_I(inode)->i_data_sem);
1144     @@ -5503,7 +5519,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1145    
1146     out_stop:
1147     ext4_journal_stop(handle);
1148     -out_dio:
1149     +out_mmap:
1150     + up_write(&EXT4_I(inode)->i_mmap_sem);
1151     ext4_inode_resume_unlocked_dio(inode);
1152     out_mutex:
1153     mutex_unlock(&inode->i_mutex);
1154     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1155     index 0613c256c344..dd65fac5ff2f 100644
1156     --- a/fs/ext4/file.c
1157     +++ b/fs/ext4/file.c
1158     @@ -213,7 +213,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = {
1159     #endif
1160    
1161     static const struct vm_operations_struct ext4_file_vm_ops = {
1162     - .fault = filemap_fault,
1163     + .fault = ext4_filemap_fault,
1164     .map_pages = filemap_map_pages,
1165     .page_mkwrite = ext4_page_mkwrite,
1166     };
1167     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1168     index 2b3a53a51582..3291e1af0e24 100644
1169     --- a/fs/ext4/inode.c
1170     +++ b/fs/ext4/inode.c
1171     @@ -3524,6 +3524,35 @@ int ext4_can_truncate(struct inode *inode)
1172     }
1173    
1174     /*
1175     + * We have to make sure i_disksize gets properly updated before we truncate
1176     + * page cache due to hole punching or zero range. Otherwise i_disksize update
1177     + * can get lost as it may have been postponed to submission of writeback but
1178     + * that will never happen after we truncate page cache.
1179     + */
1180     +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
1181     + loff_t len)
1182     +{
1183     + handle_t *handle;
1184     + loff_t size = i_size_read(inode);
1185     +
1186     + WARN_ON(!mutex_is_locked(&inode->i_mutex));
1187     + if (offset > size || offset + len < size)
1188     + return 0;
1189     +
1190     + if (EXT4_I(inode)->i_disksize >= size)
1191     + return 0;
1192     +
1193     + handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
1194     + if (IS_ERR(handle))
1195     + return PTR_ERR(handle);
1196     + ext4_update_i_disksize(inode, size);
1197     + ext4_mark_inode_dirty(handle, inode);
1198     + ext4_journal_stop(handle);
1199     +
1200     + return 0;
1201     +}
1202     +
1203     +/*
1204     * ext4_punch_hole: punches a hole in a file by releaseing the blocks
1205     * associated with the given offset and length
1206     *
1207     @@ -3588,17 +3617,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1208    
1209     }
1210    
1211     + /* Wait all existing dio workers, newcomers will block on i_mutex */
1212     + ext4_inode_block_unlocked_dio(inode);
1213     + inode_dio_wait(inode);
1214     +
1215     + /*
1216     + * Prevent page faults from reinstantiating pages we have released from
1217     + * page cache.
1218     + */
1219     + down_write(&EXT4_I(inode)->i_mmap_sem);
1220     first_block_offset = round_up(offset, sb->s_blocksize);
1221     last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
1222    
1223     /* Now release the pages and zero block aligned part of pages*/
1224     - if (last_block_offset > first_block_offset)
1225     + if (last_block_offset > first_block_offset) {
1226     + ret = ext4_update_disksize_before_punch(inode, offset, length);
1227     + if (ret)
1228     + goto out_dio;
1229     truncate_pagecache_range(inode, first_block_offset,
1230     last_block_offset);
1231     -
1232     - /* Wait all existing dio workers, newcomers will block on i_mutex */
1233     - ext4_inode_block_unlocked_dio(inode);
1234     - inode_dio_wait(inode);
1235     + }
1236    
1237     if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1238     credits = ext4_writepage_trans_blocks(inode);
1239     @@ -3645,16 +3683,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1240     if (IS_SYNC(inode))
1241     ext4_handle_sync(handle);
1242    
1243     - /* Now release the pages again to reduce race window */
1244     - if (last_block_offset > first_block_offset)
1245     - truncate_pagecache_range(inode, first_block_offset,
1246     - last_block_offset);
1247     -
1248     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
1249     ext4_mark_inode_dirty(handle, inode);
1250     out_stop:
1251     ext4_journal_stop(handle);
1252     out_dio:
1253     + up_write(&EXT4_I(inode)->i_mmap_sem);
1254     ext4_inode_resume_unlocked_dio(inode);
1255     out_mutex:
1256     mutex_unlock(&inode->i_mutex);
1257     @@ -4775,11 +4809,13 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
1258     } else
1259     ext4_wait_for_tail_page_commit(inode);
1260     }
1261     + down_write(&EXT4_I(inode)->i_mmap_sem);
1262     /*
1263     * Truncate pagecache after we've waited for commit
1264     * in data=journal mode to make pages freeable.
1265     */
1266     truncate_pagecache(inode, inode->i_size);
1267     + up_write(&EXT4_I(inode)->i_mmap_sem);
1268     }
1269     /*
1270     * We want to call ext4_truncate() even if attr->ia_size ==
1271     @@ -5234,6 +5270,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1272    
1273     sb_start_pagefault(inode->i_sb);
1274     file_update_time(vma->vm_file);
1275     +
1276     + down_read(&EXT4_I(inode)->i_mmap_sem);
1277     /* Delalloc case is easy... */
1278     if (test_opt(inode->i_sb, DELALLOC) &&
1279     !ext4_should_journal_data(inode) &&
1280     @@ -5303,6 +5341,19 @@ retry_alloc:
1281     out_ret:
1282     ret = block_page_mkwrite_return(ret);
1283     out:
1284     + up_read(&EXT4_I(inode)->i_mmap_sem);
1285     sb_end_pagefault(inode->i_sb);
1286     return ret;
1287     }
1288     +
1289     +int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1290     +{
1291     + struct inode *inode = file_inode(vma->vm_file);
1292     + int err;
1293     +
1294     + down_read(&EXT4_I(inode)->i_mmap_sem);
1295     + err = filemap_fault(vma, vmf);
1296     + up_read(&EXT4_I(inode)->i_mmap_sem);
1297     +
1298     + return err;
1299     +}
1300     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1301     index 8a3b9f14d198..6f5ca3e92246 100644
1302     --- a/fs/ext4/super.c
1303     +++ b/fs/ext4/super.c
1304     @@ -945,6 +945,7 @@ static void init_once(void *foo)
1305     INIT_LIST_HEAD(&ei->i_orphan);
1306     init_rwsem(&ei->xattr_sem);
1307     init_rwsem(&ei->i_data_sem);
1308     + init_rwsem(&ei->i_mmap_sem);
1309     inode_init_once(&ei->vfs_inode);
1310     }
1311    
1312     diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
1313     index 011ba6670d99..c70d06a383e2 100644
1314     --- a/fs/ext4/truncate.h
1315     +++ b/fs/ext4/truncate.h
1316     @@ -10,8 +10,10 @@
1317     */
1318     static inline void ext4_truncate_failed_write(struct inode *inode)
1319     {
1320     + down_write(&EXT4_I(inode)->i_mmap_sem);
1321     truncate_inode_pages(inode->i_mapping, inode->i_size);
1322     ext4_truncate(inode);
1323     + up_write(&EXT4_I(inode)->i_mmap_sem);
1324     }
1325    
1326     /*
1327     diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
1328     index 1ba5c97943b8..cfbceb116356 100644
1329     --- a/fs/jffs2/dir.c
1330     +++ b/fs/jffs2/dir.c
1331     @@ -845,9 +845,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
1332    
1333     pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
1334     __func__, ret);
1335     - /* Might as well let the VFS know */
1336     - d_instantiate(new_dentry, d_inode(old_dentry));
1337     - ihold(d_inode(old_dentry));
1338     + /*
1339     + * We can't keep the target in dcache after that.
1340     + * For one thing, we can't afford dentry aliases for directories.
1341     + * For another, if there was a victim, we _can't_ set new inode
1342     + * for that sucker and we have to trigger mount eviction - the
1343     + * caller won't do it on its own since we are returning an error.
1344     + */
1345     + d_invalidate(new_dentry);
1346     new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
1347     return ret;
1348     }
1349     diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
1350     index 80021c709af9..0c2632386f35 100644
1351     --- a/fs/ncpfs/dir.c
1352     +++ b/fs/ncpfs/dir.c
1353     @@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
1354     d_rehash(newdent);
1355     } else {
1356     spin_lock(&dentry->d_lock);
1357     - NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
1358     + NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
1359     spin_unlock(&dentry->d_lock);
1360     }
1361     } else {
1362     diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
1363     index 692ceda3bc21..a2b1d7ce3e1a 100644
1364     --- a/fs/overlayfs/dir.c
1365     +++ b/fs/overlayfs/dir.c
1366     @@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
1367     * sole user of this dentry. Too tricky... Just unhash for
1368     * now.
1369     */
1370     - d_drop(dentry);
1371     + if (!err)
1372     + d_drop(dentry);
1373     mutex_unlock(&dir->i_mutex);
1374    
1375     return err;
1376     @@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
1377     if (!overwrite && new_is_dir && !old_opaque && new_opaque)
1378     ovl_remove_opaque(newdentry);
1379    
1380     + /*
1381     + * Old dentry now lives in different location. Dentries in
1382     + * lowerstack are stale. We cannot drop them here because
1383     + * access to them is lockless. This could be only pure upper
1384     + * or opaque directory - numlower is zero. Or upper non-dir
1385     + * entry - its pureness is tracked by flag opaque.
1386     + */
1387     if (old_opaque != new_opaque) {
1388     ovl_dentry_set_opaque(old, new_opaque);
1389     if (!overwrite)
1390     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
1391     index a1b069e5e363..e505b44a9184 100644
1392     --- a/fs/overlayfs/inode.c
1393     +++ b/fs/overlayfs/inode.c
1394     @@ -66,6 +66,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
1395     if (upperdentry) {
1396     mutex_lock(&upperdentry->d_inode->i_mutex);
1397     err = notify_change(upperdentry, attr, NULL);
1398     + if (!err)
1399     + ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
1400     mutex_unlock(&upperdentry->d_inode->i_mutex);
1401     } else {
1402     err = ovl_copy_up_last(dentry, attr, false);
1403     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
1404     index bd6d5c1e667d..39266655d2bd 100644
1405     --- a/fs/overlayfs/super.c
1406     +++ b/fs/overlayfs/super.c
1407     @@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
1408     if (oe->__upperdentry) {
1409     type = __OVL_PATH_UPPER;
1410    
1411     - if (oe->numlower) {
1412     - if (S_ISDIR(dentry->d_inode->i_mode))
1413     - type |= __OVL_PATH_MERGE;
1414     - } else if (!oe->opaque) {
1415     + /*
1416     + * Non-dir dentry can hold lower dentry from previous
1417     + * location. Its purity depends only on opaque flag.
1418     + */
1419     + if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
1420     + type |= __OVL_PATH_MERGE;
1421     + else if (!oe->opaque)
1422     type |= __OVL_PATH_PURE;
1423     - }
1424     } else {
1425     if (oe->numlower > 1)
1426     type |= __OVL_PATH_MERGE;
1427     diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
1428     index a6e1bca88cc6..8454fb35fcbe 100644
1429     --- a/include/linux/tracepoint.h
1430     +++ b/include/linux/tracepoint.h
1431     @@ -131,9 +131,6 @@ extern void syscall_unregfunc(void);
1432     void *it_func; \
1433     void *__data; \
1434     \
1435     - if (!cpu_online(raw_smp_processor_id())) \
1436     - return; \
1437     - \
1438     if (!(cond)) \
1439     return; \
1440     prercu; \
1441     @@ -332,15 +329,19 @@ extern void syscall_unregfunc(void);
1442     * "void *__data, proto" as the callback prototype.
1443     */
1444     #define DECLARE_TRACE_NOARGS(name) \
1445     - __DECLARE_TRACE(name, void, , 1, void *__data, __data)
1446     + __DECLARE_TRACE(name, void, , \
1447     + cpu_online(raw_smp_processor_id()), \
1448     + void *__data, __data)
1449    
1450     #define DECLARE_TRACE(name, proto, args) \
1451     - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
1452     - PARAMS(void *__data, proto), \
1453     - PARAMS(__data, args))
1454     + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
1455     + cpu_online(raw_smp_processor_id()), \
1456     + PARAMS(void *__data, proto), \
1457     + PARAMS(__data, args))
1458    
1459     #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
1460     - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
1461     + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
1462     + cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
1463     PARAMS(void *__data, proto), \
1464     PARAMS(__data, args))
1465    
1466     diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
1467     index 8f81bbbc38fc..e0f4109e64c6 100644
1468     --- a/include/net/iw_handler.h
1469     +++ b/include/net/iw_handler.h
1470     @@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
1471     /* Send a single event to user space */
1472     void wireless_send_event(struct net_device *dev, unsigned int cmd,
1473     union iwreq_data *wrqu, const char *extra);
1474     +#ifdef CONFIG_WEXT_CORE
1475     +/* flush all previous wext events - if work is done from netdev notifiers */
1476     +void wireless_nlevent_flush(void);
1477     +#else
1478     +static inline void wireless_nlevent_flush(void) {}
1479     +#endif
1480    
1481     /* We may need a function to send a stream of events to user space.
1482     * More on that later... */
1483     diff --git a/kernel/events/core.c b/kernel/events/core.c
1484     index e1af58e23bee..66e6568a4736 100644
1485     --- a/kernel/events/core.c
1486     +++ b/kernel/events/core.c
1487     @@ -1562,14 +1562,14 @@ event_sched_out(struct perf_event *event,
1488    
1489     perf_pmu_disable(event->pmu);
1490    
1491     + event->tstamp_stopped = tstamp;
1492     + event->pmu->del(event, 0);
1493     + event->oncpu = -1;
1494     event->state = PERF_EVENT_STATE_INACTIVE;
1495     if (event->pending_disable) {
1496     event->pending_disable = 0;
1497     event->state = PERF_EVENT_STATE_OFF;
1498     }
1499     - event->tstamp_stopped = tstamp;
1500     - event->pmu->del(event, 0);
1501     - event->oncpu = -1;
1502    
1503     if (!is_software_event(event))
1504     cpuctx->active_oncpu--;
1505     @@ -7641,6 +7641,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1506     }
1507     }
1508    
1509     + /* symmetric to unaccount_event() in _free_event() */
1510     + account_event(event);
1511     +
1512     return event;
1513    
1514     err_per_task:
1515     @@ -8004,8 +8007,6 @@ SYSCALL_DEFINE5(perf_event_open,
1516     }
1517     }
1518    
1519     - account_event(event);
1520     -
1521     /*
1522     * Special case software events and allow them to be part of
1523     * any hardware group.
1524     @@ -8221,7 +8222,12 @@ err_context:
1525     perf_unpin_context(ctx);
1526     put_ctx(ctx);
1527     err_alloc:
1528     - free_event(event);
1529     + /*
1530     + * If event_file is set, the fput() above will have called ->release()
1531     + * and that will take care of freeing the event.
1532     + */
1533     + if (!event_file)
1534     + free_event(event);
1535     err_cpus:
1536     put_online_cpus();
1537     err_task:
1538     @@ -8265,8 +8271,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
1539     /* Mark owner so we could distinguish it from user events. */
1540     event->owner = EVENT_OWNER_KERNEL;
1541    
1542     - account_event(event);
1543     -
1544     ctx = find_get_context(event->pmu, task, event);
1545     if (IS_ERR(ctx)) {
1546     err = PTR_ERR(ctx);
1547     diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
1548     index 5c564a68fb50..d71edcbd0c58 100644
1549     --- a/net/mac80211/agg-rx.c
1550     +++ b/net/mac80211/agg-rx.c
1551     @@ -289,7 +289,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
1552     }
1553    
1554     /* prepare A-MPDU MLME for Rx aggregation */
1555     - tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
1556     + tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
1557     if (!tid_agg_rx)
1558     goto end;
1559    
1560     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
1561     index c0a9187bc3a9..cdf8609a6240 100644
1562     --- a/net/mac80211/ieee80211_i.h
1563     +++ b/net/mac80211/ieee80211_i.h
1564     @@ -90,7 +90,7 @@ struct ieee80211_fragment_entry {
1565     unsigned int last_frag;
1566     unsigned int extra_len;
1567     struct sk_buff_head skb_list;
1568     - int ccmp; /* Whether fragments were encrypted with CCMP */
1569     + bool check_sequential_pn; /* needed for CCMP/GCMP */
1570     u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
1571     };
1572    
1573     diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
1574     index 3ece7d1034c8..b54f398cda5d 100644
1575     --- a/net/mac80211/rc80211_minstrel.c
1576     +++ b/net/mac80211/rc80211_minstrel.c
1577     @@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
1578     * computing cur_tp
1579     */
1580     tmp_mrs = &mi->r[idx].stats;
1581     - tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
1582     + tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
1583     tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
1584    
1585     return tmp_cur_tp;
1586     diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
1587     index 7430a1df2ab1..1ec889dc2e46 100644
1588     --- a/net/mac80211/rc80211_minstrel_ht.c
1589     +++ b/net/mac80211/rc80211_minstrel_ht.c
1590     @@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
1591     if (likely(sta->ampdu_mlme.tid_tx[tid]))
1592     return;
1593    
1594     - ieee80211_start_tx_ba_session(pubsta, tid, 5000);
1595     + ieee80211_start_tx_ba_session(pubsta, tid, 0);
1596     }
1597    
1598     static void
1599     @@ -1328,7 +1328,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1600     prob = mi->groups[i].rates[j].prob_ewma;
1601    
1602     /* convert tp_avg from pkt per second in kbps */
1603     - tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
1604     + tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
1605     + tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
1606    
1607     return tp_avg;
1608     }
1609     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1610     index 5793f75c5ffd..d4b08d87537c 100644
1611     --- a/net/mac80211/rx.c
1612     +++ b/net/mac80211/rx.c
1613     @@ -1725,7 +1725,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1614     entry->seq = seq;
1615     entry->rx_queue = rx_queue;
1616     entry->last_frag = frag;
1617     - entry->ccmp = 0;
1618     + entry->check_sequential_pn = false;
1619     entry->extra_len = 0;
1620    
1621     return entry;
1622     @@ -1821,15 +1821,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1623     rx->seqno_idx, &(rx->skb));
1624     if (rx->key &&
1625     (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
1626     - rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
1627     + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
1628     + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
1629     + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
1630     ieee80211_has_protected(fc)) {
1631     int queue = rx->security_idx;
1632     - /* Store CCMP PN so that we can verify that the next
1633     - * fragment has a sequential PN value. */
1634     - entry->ccmp = 1;
1635     +
1636     + /* Store CCMP/GCMP PN so that we can verify that the
1637     + * next fragment has a sequential PN value.
1638     + */
1639     + entry->check_sequential_pn = true;
1640     memcpy(entry->last_pn,
1641     rx->key->u.ccmp.rx_pn[queue],
1642     IEEE80211_CCMP_PN_LEN);
1643     + BUILD_BUG_ON(offsetof(struct ieee80211_key,
1644     + u.ccmp.rx_pn) !=
1645     + offsetof(struct ieee80211_key,
1646     + u.gcmp.rx_pn));
1647     + BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
1648     + sizeof(rx->key->u.gcmp.rx_pn[queue]));
1649     + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
1650     + IEEE80211_GCMP_PN_LEN);
1651     }
1652     return RX_QUEUED;
1653     }
1654     @@ -1844,15 +1856,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1655     return RX_DROP_MONITOR;
1656     }
1657    
1658     - /* Verify that MPDUs within one MSDU have sequential PN values.
1659     - * (IEEE 802.11i, 8.3.3.4.5) */
1660     - if (entry->ccmp) {
1661     + /* "The receiver shall discard MSDUs and MMPDUs whose constituent
1662     + * MPDU PN values are not incrementing in steps of 1."
1663     + * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
1664     + * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
1665     + */
1666     + if (entry->check_sequential_pn) {
1667     int i;
1668     u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
1669     int queue;
1670     +
1671     if (!rx->key ||
1672     (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
1673     - rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
1674     + rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
1675     + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
1676     + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
1677     return RX_DROP_UNUSABLE;
1678     memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
1679     for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
1680     @@ -3359,6 +3377,7 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
1681     return false;
1682     /* ignore action frames to TDLS-peers */
1683     if (ieee80211_is_action(hdr->frame_control) &&
1684     + !is_broadcast_ether_addr(bssid) &&
1685     !ether_addr_equal(bssid, hdr->addr1))
1686     return false;
1687     }
1688     diff --git a/net/wireless/core.c b/net/wireless/core.c
1689     index 2a0bbd22854b..71e9b84847f3 100644
1690     --- a/net/wireless/core.c
1691     +++ b/net/wireless/core.c
1692     @@ -1138,6 +1138,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
1693     return NOTIFY_DONE;
1694     }
1695    
1696     + wireless_nlevent_flush();
1697     +
1698     return NOTIFY_OK;
1699     }
1700    
1701     diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
1702     index c8717c1d082e..b50ee5d622e1 100644
1703     --- a/net/wireless/wext-core.c
1704     +++ b/net/wireless/wext-core.c
1705     @@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
1706    
1707     /* IW event code */
1708    
1709     +void wireless_nlevent_flush(void)
1710     +{
1711     + struct sk_buff *skb;
1712     + struct net *net;
1713     +
1714     + ASSERT_RTNL();
1715     +
1716     + for_each_net(net) {
1717     + while ((skb = skb_dequeue(&net->wext_nlevents)))
1718     + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
1719     + GFP_KERNEL);
1720     + }
1721     +}
1722     +EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
1723     +
1724     +static int wext_netdev_notifier_call(struct notifier_block *nb,
1725     + unsigned long state, void *ptr)
1726     +{
1727     + /*
1728     + * When a netdev changes state in any way, flush all pending messages
1729     + * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
1730     + * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
1731     + * or similar - all of which could otherwise happen due to delays from
1732     + * schedule_work().
1733     + */
1734     + wireless_nlevent_flush();
1735     +
1736     + return NOTIFY_OK;
1737     +}
1738     +
1739     +static struct notifier_block wext_netdev_notifier = {
1740     + .notifier_call = wext_netdev_notifier_call,
1741     +};
1742     +
1743     static int __net_init wext_pernet_init(struct net *net)
1744     {
1745     skb_queue_head_init(&net->wext_nlevents);
1746     @@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
1747    
1748     static int __init wireless_nlevent_init(void)
1749     {
1750     - return register_pernet_subsys(&wext_pernet_ops);
1751     + int err = register_pernet_subsys(&wext_pernet_ops);
1752     +
1753     + if (err)
1754     + return err;
1755     +
1756     + return register_netdevice_notifier(&wext_netdev_notifier);
1757     }
1758    
1759     subsys_initcall(wireless_nlevent_init);
1760     @@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
1761     /* Process events generated by the wireless layer or the driver. */
1762     static void wireless_nlevent_process(struct work_struct *work)
1763     {
1764     - struct sk_buff *skb;
1765     - struct net *net;
1766     -
1767     rtnl_lock();
1768     -
1769     - for_each_net(net) {
1770     - while ((skb = skb_dequeue(&net->wext_nlevents)))
1771     - rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
1772     - GFP_KERNEL);
1773     - }
1774     -
1775     + wireless_nlevent_flush();
1776     rtnl_unlock();
1777     }
1778    
1779     diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
1780     index 198580d245e0..1659b409ef10 100755
1781     --- a/scripts/ld-version.sh
1782     +++ b/scripts/ld-version.sh
1783     @@ -1,7 +1,7 @@
1784     #!/usr/bin/awk -f
1785     # extract linker version number from stdin and turn into single number
1786     {
1787     - gsub(".*)", "");
1788     + gsub(".*\\)", "");
1789     split($1,a, ".");
1790     print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
1791     exit
1792     diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
1793     index c799cca5abeb..6b864c0fc2b6 100644
1794     --- a/sound/soc/codecs/wm8958-dsp2.c
1795     +++ b/sound/soc/codecs/wm8958-dsp2.c
1796     @@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
1797     struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1798     struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1799     struct wm8994 *control = wm8994->wm8994;
1800     - int value = ucontrol->value.integer.value[0];
1801     + int value = ucontrol->value.enumerated.item[0];
1802     int reg;
1803    
1804     /* Don't allow on the fly reconfiguration */
1805     @@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
1806     struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1807     struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1808     struct wm8994 *control = wm8994->wm8994;
1809     - int value = ucontrol->value.integer.value[0];
1810     + int value = ucontrol->value.enumerated.item[0];
1811     int reg;
1812    
1813     /* Don't allow on the fly reconfiguration */
1814     @@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
1815     struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1816     struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1817     struct wm8994 *control = wm8994->wm8994;
1818     - int value = ucontrol->value.integer.value[0];
1819     + int value = ucontrol->value.enumerated.item[0];
1820     int reg;
1821    
1822     /* Don't allow on the fly reconfiguration */
1823     @@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
1824     struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1825     struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1826     struct wm8994 *control = wm8994->wm8994;
1827     - int value = ucontrol->value.integer.value[0];
1828     + int value = ucontrol->value.enumerated.item[0];
1829     int reg;
1830    
1831     /* Don't allow on the fly reconfiguration */
1832     diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
1833     index a1c04dab6684..a484ca8421af 100644
1834     --- a/sound/soc/codecs/wm8994.c
1835     +++ b/sound/soc/codecs/wm8994.c
1836     @@ -361,7 +361,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
1837     struct wm8994 *control = wm8994->wm8994;
1838     struct wm8994_pdata *pdata = &control->pdata;
1839     int drc = wm8994_get_drc(kcontrol->id.name);
1840     - int value = ucontrol->value.integer.value[0];
1841     + int value = ucontrol->value.enumerated.item[0];
1842    
1843     if (drc < 0)
1844     return drc;
1845     @@ -468,7 +468,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
1846     struct wm8994 *control = wm8994->wm8994;
1847     struct wm8994_pdata *pdata = &control->pdata;
1848     int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
1849     - int value = ucontrol->value.integer.value[0];
1850     + int value = ucontrol->value.enumerated.item[0];
1851    
1852     if (block < 0)
1853     return block;
1854     diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
1855     index d01c2095452f..431d94397219 100644
1856     --- a/sound/soc/codecs/wm_adsp.c
1857     +++ b/sound/soc/codecs/wm_adsp.c
1858     @@ -248,7 +248,7 @@ static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
1859     struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1860     struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec);
1861    
1862     - ucontrol->value.integer.value[0] = adsp[e->shift_l].fw;
1863     + ucontrol->value.enumerated.item[0] = adsp[e->shift_l].fw;
1864    
1865     return 0;
1866     }
1867     @@ -260,16 +260,16 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
1868     struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1869     struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec);
1870    
1871     - if (ucontrol->value.integer.value[0] == adsp[e->shift_l].fw)
1872     + if (ucontrol->value.enumerated.item[0] == adsp[e->shift_l].fw)
1873     return 0;
1874    
1875     - if (ucontrol->value.integer.value[0] >= WM_ADSP_NUM_FW)
1876     + if (ucontrol->value.enumerated.item[0] >= WM_ADSP_NUM_FW)
1877     return -EINVAL;
1878    
1879     if (adsp[e->shift_l].running)
1880     return -EBUSY;
1881    
1882     - adsp[e->shift_l].fw = ucontrol->value.integer.value[0];
1883     + adsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
1884    
1885     return 0;
1886     }
1887     diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
1888     index b92ab40d2be6..5e8ccb0a7028 100644
1889     --- a/sound/soc/samsung/i2s.c
1890     +++ b/sound/soc/samsung/i2s.c
1891     @@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
1892     unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
1893     unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
1894     u32 mod, mask, val = 0;
1895     + unsigned long flags;
1896    
1897     - spin_lock(i2s->lock);
1898     + spin_lock_irqsave(i2s->lock, flags);
1899     mod = readl(i2s->addr + I2SMOD);
1900     - spin_unlock(i2s->lock);
1901     + spin_unlock_irqrestore(i2s->lock, flags);
1902    
1903     switch (clk_id) {
1904     case SAMSUNG_I2S_OPCLK:
1905     @@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
1906     return -EINVAL;
1907     }
1908    
1909     - spin_lock(i2s->lock);
1910     + spin_lock_irqsave(i2s->lock, flags);
1911     mod = readl(i2s->addr + I2SMOD);
1912     mod = (mod & ~mask) | val;
1913     writel(mod, i2s->addr + I2SMOD);
1914     - spin_unlock(i2s->lock);
1915     + spin_unlock_irqrestore(i2s->lock, flags);
1916    
1917     return 0;
1918     }
1919     @@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
1920     struct i2s_dai *i2s = to_info(dai);
1921     int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
1922     u32 mod, tmp = 0;
1923     + unsigned long flags;
1924    
1925     lrp_shift = i2s->variant_regs->lrp_off;
1926     sdf_shift = i2s->variant_regs->sdf_off;
1927     @@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
1928     return -EINVAL;
1929     }
1930    
1931     - spin_lock(i2s->lock);
1932     + spin_lock_irqsave(i2s->lock, flags);
1933     mod = readl(i2s->addr + I2SMOD);
1934     /*
1935     * Don't change the I2S mode if any controller is active on this
1936     @@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
1937     */
1938     if (any_active(i2s) &&
1939     ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
1940     - spin_unlock(i2s->lock);
1941     + spin_unlock_irqrestore(i2s->lock, flags);
1942     dev_err(&i2s->pdev->dev,
1943     "%s:%d Other DAI busy\n", __func__, __LINE__);
1944     return -EAGAIN;
1945     @@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
1946     mod &= ~(sdf_mask | lrp_rlow | mod_slave);
1947     mod |= tmp;
1948     writel(mod, i2s->addr + I2SMOD);
1949     - spin_unlock(i2s->lock);
1950     + spin_unlock_irqrestore(i2s->lock, flags);
1951    
1952     return 0;
1953     }
1954     @@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
1955     {
1956     struct i2s_dai *i2s = to_info(dai);
1957     u32 mod, mask = 0, val = 0;
1958     + unsigned long flags;
1959    
1960     if (!is_secondary(i2s))
1961     mask |= (MOD_DC2_EN | MOD_DC1_EN);
1962     @@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
1963     return -EINVAL;
1964     }
1965    
1966     - spin_lock(i2s->lock);
1967     + spin_lock_irqsave(i2s->lock, flags);
1968     mod = readl(i2s->addr + I2SMOD);
1969     mod = (mod & ~mask) | val;
1970     writel(mod, i2s->addr + I2SMOD);
1971     - spin_unlock(i2s->lock);
1972     + spin_unlock_irqrestore(i2s->lock, flags);
1973    
1974     samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
1975    
1976     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
1977     index b6c12dccb259..28df6adf362b 100644
1978     --- a/sound/soc/soc-dapm.c
1979     +++ b/sound/soc/soc-dapm.c
1980     @@ -3324,7 +3324,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
1981     {
1982     struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
1983    
1984     - ucontrol->value.integer.value[0] = w->params_select;
1985     + ucontrol->value.enumerated.item[0] = w->params_select;
1986    
1987     return 0;
1988     }
1989     @@ -3338,13 +3338,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
1990     if (w->power)
1991     return -EBUSY;
1992    
1993     - if (ucontrol->value.integer.value[0] == w->params_select)
1994     + if (ucontrol->value.enumerated.item[0] == w->params_select)
1995     return 0;
1996    
1997     - if (ucontrol->value.integer.value[0] >= w->num_params)
1998     + if (ucontrol->value.enumerated.item[0] >= w->num_params)
1999     return -EINVAL;
2000    
2001     - w->params_select = ucontrol->value.integer.value[0];
2002     + w->params_select = ucontrol->value.enumerated.item[0];
2003    
2004     return 0;
2005     }